if_sip.c revision 1.24.2.5 1 /* $NetBSD: if_sip.c,v 1.24.2.5 2002/01/08 00:31:04 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Support the 10-bit interface on the DP83820 (for fiber).
80 *
81 * - Reduce the interrupt load.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.24.2.5 2002/01/08 00:31:04 nathanw Exp $");
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/callout.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/ioctl.h>
97 #include <sys/errno.h>
98 #include <sys/device.h>
99 #include <sys/queue.h>
100
101 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <machine/bus.h>
113 #include <machine/intr.h>
114 #include <machine/endian.h>
115
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
118 #ifdef DP83820
119 #include <dev/mii/mii_bitbang.h>
120 #endif /* DP83820 */
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_sipreg.h>
127
128 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
129 #define SIP_DECL(x) __CONCAT(gsip_,x)
130 #else /* SiS900 and DP83815 */
131 #define SIP_DECL(x) __CONCAT(sip_,x)
132 #endif
133
134 #define SIP_STR(x) __STRING(SIP_DECL(x))
135
136 /*
137 * Transmit descriptor list size. This is arbitrary, but allocate
138 * enough descriptors for 128 pending transmissions, and 8 segments
139 * per packet. This MUST work out to a power of 2.
140 */
141 #define SIP_NTXSEGS 8
142
143 #define SIP_TXQUEUELEN 256
144 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS)
145 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
146 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
147
148 /*
149 * Receive descriptor list size. We have one Rx buffer per incoming
150 * packet, so this logic is a little simpler.
151 *
152 * Actually, on the DP83820, we allow the packet to consume more than
153 * one buffer, in order to support jumbo Ethernet frames. In that
154 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
155 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
156 * so we'd better be quick about handling receive interrupts.
157 */
158 #if defined(DP83820)
159 #define SIP_NRXDESC 256
160 #else
161 #define SIP_NRXDESC 128
162 #endif /* DP83820 */
163 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
164 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
165
166 /*
167 * Control structures are DMA'd to the SiS900 chip. We allocate them in
168 * a single clump that maps to a single DMA segment to make several things
169 * easier.
170 */
171 struct sip_control_data {
172 /*
173 * The transmit descriptors.
174 */
175 struct sip_desc scd_txdescs[SIP_NTXDESC];
176
177 /*
178 * The receive descriptors.
179 */
180 struct sip_desc scd_rxdescs[SIP_NRXDESC];
181 };
182
183 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
184 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
185 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
186
187 /*
188 * Software state for transmit jobs.
189 */
190 struct sip_txsoft {
191 struct mbuf *txs_mbuf; /* head of our mbuf chain */
192 bus_dmamap_t txs_dmamap; /* our DMA map */
193 int txs_firstdesc; /* first descriptor in packet */
194 int txs_lastdesc; /* last descriptor in packet */
195 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
196 };
197
198 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
199
200 /*
201 * Software state for receive jobs.
202 */
203 struct sip_rxsoft {
204 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
205 bus_dmamap_t rxs_dmamap; /* our DMA map */
206 };
207
208 /*
209 * Software state per device.
210 */
211 struct sip_softc {
212 struct device sc_dev; /* generic device information */
213 bus_space_tag_t sc_st; /* bus space tag */
214 bus_space_handle_t sc_sh; /* bus space handle */
215 bus_dma_tag_t sc_dmat; /* bus DMA tag */
216 struct ethercom sc_ethercom; /* ethernet common data */
217 void *sc_sdhook; /* shutdown hook */
218
219 const struct sip_product *sc_model; /* which model are we? */
220
221 void *sc_ih; /* interrupt cookie */
222
223 struct mii_data sc_mii; /* MII/media information */
224
225 struct callout sc_tick_ch; /* tick callout */
226
227 bus_dmamap_t sc_cddmamap; /* control data DMA map */
228 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
229
230 /*
231 * Software state for transmit and receive descriptors.
232 */
233 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
234 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
235
236 /*
237 * Control data structures.
238 */
239 struct sip_control_data *sc_control_data;
240 #define sc_txdescs sc_control_data->scd_txdescs
241 #define sc_rxdescs sc_control_data->scd_rxdescs
242
243 #ifdef SIP_EVENT_COUNTERS
244 /*
245 * Event counters.
246 */
247 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
248 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
249 struct evcnt sc_ev_txintr; /* Tx interrupts */
250 struct evcnt sc_ev_rxintr; /* Rx interrupts */
251 #ifdef DP83820
252 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
253 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
254 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
255 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
256 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
257 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
258 #endif /* DP83820 */
259 #endif /* SIP_EVENT_COUNTERS */
260
261 u_int32_t sc_txcfg; /* prototype TXCFG register */
262 u_int32_t sc_rxcfg; /* prototype RXCFG register */
263 u_int32_t sc_imr; /* prototype IMR register */
264 u_int32_t sc_rfcr; /* prototype RFCR register */
265
266 u_int32_t sc_cfg; /* prototype CFG register */
267
268 #ifdef DP83820
269 u_int32_t sc_gpior; /* prototype GPIOR register */
270 #endif /* DP83820 */
271
272 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
273 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
274
275 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
276
277 int sc_flags; /* misc. flags; see below */
278
279 int sc_txfree; /* number of free Tx descriptors */
280 int sc_txnext; /* next ready Tx descriptor */
281
282 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
283 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
284
285 int sc_rxptr; /* next ready Rx descriptor/descsoft */
286 #if defined(DP83820)
287 int sc_rxdiscard;
288 int sc_rxlen;
289 struct mbuf *sc_rxhead;
290 struct mbuf *sc_rxtail;
291 struct mbuf **sc_rxtailp;
292 #endif /* DP83820 */
293 };
294
295 /* sc_flags */
296 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
297
298 #ifdef DP83820
299 #define SIP_RXCHAIN_RESET(sc) \
300 do { \
301 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
302 *(sc)->sc_rxtailp = NULL; \
303 (sc)->sc_rxlen = 0; \
304 } while (/*CONSTCOND*/0)
305
306 #define SIP_RXCHAIN_LINK(sc, m) \
307 do { \
308 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
309 (sc)->sc_rxtailp = &(m)->m_next; \
310 } while (/*CONSTCOND*/0)
311 #endif /* DP83820 */
312
313 #ifdef SIP_EVENT_COUNTERS
314 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
315 #else
316 #define SIP_EVCNT_INCR(ev) /* nothing */
317 #endif
318
319 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
320 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
321
322 #define SIP_CDTXSYNC(sc, x, n, ops) \
323 do { \
324 int __x, __n; \
325 \
326 __x = (x); \
327 __n = (n); \
328 \
329 /* If it will wrap around, sync to the end of the ring. */ \
330 if ((__x + __n) > SIP_NTXDESC) { \
331 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
332 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
333 (SIP_NTXDESC - __x), (ops)); \
334 __n -= (SIP_NTXDESC - __x); \
335 __x = 0; \
336 } \
337 \
338 /* Now sync whatever is left. */ \
339 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
340 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
341 } while (0)
342
343 #define SIP_CDRXSYNC(sc, x, ops) \
344 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
345 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
346
347 #ifdef DP83820
348 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
349 #define SIP_RXBUF_LEN (MCLBYTES - 4)
350 #else
351 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
352 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
353 #endif
354 #define SIP_INIT_RXDESC(sc, x) \
355 do { \
356 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
357 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
358 \
359 __sipd->sipd_link = \
360 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
361 __sipd->sipd_bufptr = \
362 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
363 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
364 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
365 SIP_INIT_RXDESC_EXTSTS \
366 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
367 } while (0)
368
369 #define SIP_TIMEOUT 1000
370
371 void SIP_DECL(start)(struct ifnet *);
372 void SIP_DECL(watchdog)(struct ifnet *);
373 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
374 int SIP_DECL(init)(struct ifnet *);
375 void SIP_DECL(stop)(struct ifnet *, int);
376
377 void SIP_DECL(shutdown)(void *);
378
379 void SIP_DECL(reset)(struct sip_softc *);
380 void SIP_DECL(rxdrain)(struct sip_softc *);
381 int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
382 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *);
383 void SIP_DECL(tick)(void *);
384
385 #if !defined(DP83820)
386 void SIP_DECL(sis900_set_filter)(struct sip_softc *);
387 #endif /* ! DP83820 */
388 void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
389
390 #if defined(DP83820)
391 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *,
392 const struct pci_attach_args *, u_int8_t *);
393 #else
394 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *,
395 const struct pci_attach_args *, u_int8_t *);
396 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *,
397 const struct pci_attach_args *, u_int8_t *);
398 #endif /* DP83820 */
399
400 int SIP_DECL(intr)(void *);
401 void SIP_DECL(txintr)(struct sip_softc *);
402 void SIP_DECL(rxintr)(struct sip_softc *);
403
404 #if defined(DP83820)
405 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
406 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
407 void SIP_DECL(dp83820_mii_statchg)(struct device *);
408 #else
409 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
410 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
411 void SIP_DECL(sis900_mii_statchg)(struct device *);
412
413 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
414 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
415 void SIP_DECL(dp83815_mii_statchg)(struct device *);
416 #endif /* DP83820 */
417
418 int SIP_DECL(mediachange)(struct ifnet *);
419 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
420
421 int SIP_DECL(match)(struct device *, struct cfdata *, void *);
422 void SIP_DECL(attach)(struct device *, struct device *, void *);
423
424 int SIP_DECL(copy_small) = 0;
425
426 struct cfattach SIP_DECL(ca) = {
427 sizeof(struct sip_softc), SIP_DECL(match), SIP_DECL(attach),
428 };
429
430 /*
431 * Descriptions of the variants of the SiS900.
432 */
433 struct sip_variant {
434 int (*sipv_mii_readreg)(struct device *, int, int);
435 void (*sipv_mii_writereg)(struct device *, int, int, int);
436 void (*sipv_mii_statchg)(struct device *);
437 void (*sipv_set_filter)(struct sip_softc *);
438 void (*sipv_read_macaddr)(struct sip_softc *,
439 const struct pci_attach_args *, u_int8_t *);
440 };
441
442 #if defined(DP83820)
443 u_int32_t SIP_DECL(dp83820_mii_bitbang_read)(struct device *);
444 void SIP_DECL(dp83820_mii_bitbang_write)(struct device *, u_int32_t);
445
446 const struct mii_bitbang_ops SIP_DECL(dp83820_mii_bitbang_ops) = {
447 SIP_DECL(dp83820_mii_bitbang_read),
448 SIP_DECL(dp83820_mii_bitbang_write),
449 {
450 EROMAR_MDIO, /* MII_BIT_MDO */
451 EROMAR_MDIO, /* MII_BIT_MDI */
452 EROMAR_MDC, /* MII_BIT_MDC */
453 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
454 0, /* MII_BIT_DIR_PHY_HOST */
455 }
456 };
457 #endif /* DP83820 */
458
459 #if defined(DP83820)
460 const struct sip_variant SIP_DECL(variant_dp83820) = {
461 SIP_DECL(dp83820_mii_readreg),
462 SIP_DECL(dp83820_mii_writereg),
463 SIP_DECL(dp83820_mii_statchg),
464 SIP_DECL(dp83815_set_filter),
465 SIP_DECL(dp83820_read_macaddr),
466 };
467 #else
468 const struct sip_variant SIP_DECL(variant_sis900) = {
469 SIP_DECL(sis900_mii_readreg),
470 SIP_DECL(sis900_mii_writereg),
471 SIP_DECL(sis900_mii_statchg),
472 SIP_DECL(sis900_set_filter),
473 SIP_DECL(sis900_read_macaddr),
474 };
475
476 const struct sip_variant SIP_DECL(variant_dp83815) = {
477 SIP_DECL(dp83815_mii_readreg),
478 SIP_DECL(dp83815_mii_writereg),
479 SIP_DECL(dp83815_mii_statchg),
480 SIP_DECL(dp83815_set_filter),
481 SIP_DECL(dp83815_read_macaddr),
482 };
483 #endif /* DP83820 */
484
485 /*
486 * Devices supported by this driver.
487 */
488 const struct sip_product {
489 pci_vendor_id_t sip_vendor;
490 pci_product_id_t sip_product;
491 const char *sip_name;
492 const struct sip_variant *sip_variant;
493 } SIP_DECL(products)[] = {
494 #if defined(DP83820)
495 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
496 "NatSemi DP83820 Gigabit Ethernet",
497 &SIP_DECL(variant_dp83820) },
498 #else
499 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
500 "SiS 900 10/100 Ethernet",
501 &SIP_DECL(variant_sis900) },
502 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
503 "SiS 7016 10/100 Ethernet",
504 &SIP_DECL(variant_sis900) },
505
506 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
507 "NatSemi DP83815 10/100 Ethernet",
508 &SIP_DECL(variant_dp83815) },
509 #endif /* DP83820 */
510
511 { 0, 0,
512 NULL,
513 NULL },
514 };
515
516 static const struct sip_product *
517 SIP_DECL(lookup)(const struct pci_attach_args *pa)
518 {
519 const struct sip_product *sip;
520
521 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
522 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
523 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
524 return (sip);
525 }
526 return (NULL);
527 }
528
529 int
530 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
531 {
532 struct pci_attach_args *pa = aux;
533
534 if (SIP_DECL(lookup)(pa) != NULL)
535 return (1);
536
537 return (0);
538 }
539
540 void
541 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
542 {
543 struct sip_softc *sc = (struct sip_softc *) self;
544 struct pci_attach_args *pa = aux;
545 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
546 pci_chipset_tag_t pc = pa->pa_pc;
547 pci_intr_handle_t ih;
548 const char *intrstr = NULL;
549 bus_space_tag_t iot, memt;
550 bus_space_handle_t ioh, memh;
551 bus_dma_segment_t seg;
552 int ioh_valid, memh_valid;
553 int i, rseg, error;
554 const struct sip_product *sip;
555 pcireg_t pmode;
556 u_int8_t enaddr[ETHER_ADDR_LEN];
557 int pmreg;
558 #ifdef DP83820
559 pcireg_t memtype;
560 u_int32_t reg;
561 #endif /* DP83820 */
562
563 callout_init(&sc->sc_tick_ch);
564
565 sip = SIP_DECL(lookup)(pa);
566 if (sip == NULL) {
567 printf("\n");
568 panic(SIP_STR(attach) ": impossible");
569 }
570
571 printf(": %s\n", sip->sip_name);
572
573 sc->sc_model = sip;
574
575 /*
576 * Map the device.
577 */
578 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
579 PCI_MAPREG_TYPE_IO, 0,
580 &iot, &ioh, NULL, NULL) == 0);
581 #ifdef DP83820
582 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
583 switch (memtype) {
584 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
585 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
586 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
587 memtype, 0, &memt, &memh, NULL, NULL) == 0);
588 break;
589 default:
590 memh_valid = 0;
591 }
592 #else
593 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
594 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
595 &memt, &memh, NULL, NULL) == 0);
596 #endif /* DP83820 */
597
598 if (memh_valid) {
599 sc->sc_st = memt;
600 sc->sc_sh = memh;
601 } else if (ioh_valid) {
602 sc->sc_st = iot;
603 sc->sc_sh = ioh;
604 } else {
605 printf("%s: unable to map device registers\n",
606 sc->sc_dev.dv_xname);
607 return;
608 }
609
610 sc->sc_dmat = pa->pa_dmat;
611
612 /* Enable bus mastering. */
613 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
614 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
615 PCI_COMMAND_MASTER_ENABLE);
616
617 /* Get it out of power save mode if needed. */
618 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
619 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
620 if (pmode == 3) {
621 /*
622 * The card has lost all configuration data in
623 * this state, so punt.
624 */
625 printf("%s: unable to wake up from power state D3\n",
626 sc->sc_dev.dv_xname);
627 return;
628 }
629 if (pmode != 0) {
630 printf("%s: waking up from power state D%d\n",
631 sc->sc_dev.dv_xname, pmode);
632 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
633 }
634 }
635
636 /*
637 * Map and establish our interrupt.
638 */
639 if (pci_intr_map(pa, &ih)) {
640 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
641 return;
642 }
643 intrstr = pci_intr_string(pc, ih);
644 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
645 if (sc->sc_ih == NULL) {
646 printf("%s: unable to establish interrupt",
647 sc->sc_dev.dv_xname);
648 if (intrstr != NULL)
649 printf(" at %s", intrstr);
650 printf("\n");
651 return;
652 }
653 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
654
655 SIMPLEQ_INIT(&sc->sc_txfreeq);
656 SIMPLEQ_INIT(&sc->sc_txdirtyq);
657
658 /*
659 * Allocate the control data structures, and create and load the
660 * DMA map for it.
661 */
662 if ((error = bus_dmamem_alloc(sc->sc_dmat,
663 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
664 0)) != 0) {
665 printf("%s: unable to allocate control data, error = %d\n",
666 sc->sc_dev.dv_xname, error);
667 goto fail_0;
668 }
669
670 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
671 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
672 BUS_DMA_COHERENT)) != 0) {
673 printf("%s: unable to map control data, error = %d\n",
674 sc->sc_dev.dv_xname, error);
675 goto fail_1;
676 }
677
678 if ((error = bus_dmamap_create(sc->sc_dmat,
679 sizeof(struct sip_control_data), 1,
680 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
681 printf("%s: unable to create control data DMA map, "
682 "error = %d\n", sc->sc_dev.dv_xname, error);
683 goto fail_2;
684 }
685
686 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
687 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
688 0)) != 0) {
689 printf("%s: unable to load control data DMA map, error = %d\n",
690 sc->sc_dev.dv_xname, error);
691 goto fail_3;
692 }
693
694 /*
695 * Create the transmit buffer DMA maps.
696 */
697 for (i = 0; i < SIP_TXQUEUELEN; i++) {
698 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
699 SIP_NTXSEGS, MCLBYTES, 0, 0,
700 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
701 printf("%s: unable to create tx DMA map %d, "
702 "error = %d\n", sc->sc_dev.dv_xname, i, error);
703 goto fail_4;
704 }
705 }
706
707 /*
708 * Create the receive buffer DMA maps.
709 */
710 for (i = 0; i < SIP_NRXDESC; i++) {
711 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
712 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
713 printf("%s: unable to create rx DMA map %d, "
714 "error = %d\n", sc->sc_dev.dv_xname, i, error);
715 goto fail_5;
716 }
717 sc->sc_rxsoft[i].rxs_mbuf = NULL;
718 }
719
720 /*
721 * Reset the chip to a known state.
722 */
723 SIP_DECL(reset)(sc);
724
725 /*
726 * Read the Ethernet address from the EEPROM. This might
727 * also fetch other stuff from the EEPROM and stash it
728 * in the softc.
729 */
730 sc->sc_cfg = 0;
731 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
732
733 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
734 ether_sprintf(enaddr));
735
736 /*
737 * Initialize the configuration register: aggressive PCI
738 * bus request algorithm, default backoff, default OW timer,
739 * default parity error detection.
740 *
741 * NOTE: "Big endian mode" is useless on the SiS900 and
742 * friends -- it affects packet data, not descriptors.
743 */
744 #ifdef DP83820
745 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
746 if (reg & CFG_PCI64_DET) {
747 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
748 /*
749 * XXX Need some PCI flags indicating support for
750 * XXX 64-bit addressing (SAC or DAC) and 64-bit
751 * XXX data path.
752 */
753 }
754 if (sc->sc_cfg & (CFG_TBI_EN|CFG_EXT_125)) {
755 const char *sep = "";
756 printf("%s: using ", sc->sc_dev.dv_xname);
757 if (sc->sc_cfg & CFG_EXT_125) {
758 printf("%s125MHz clock", sep);
759 sep = ", ";
760 }
761 if (sc->sc_cfg & CFG_TBI_EN) {
762 printf("%sten-bit interface", sep);
763 sep = ", ";
764 }
765 printf("\n");
766 }
767 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0)
768 sc->sc_cfg |= CFG_MRM_DIS;
769 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
770 sc->sc_cfg |= CFG_MWI_DIS;
771
772 /*
773 * Use the extended descriptor format on the DP83820. This
774 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
775 * checksumming.
776 */
777 sc->sc_cfg |= CFG_EXTSTS_EN;
778 #endif /* DP83820 */
779
780 /*
781 * Initialize our media structures and probe the MII.
782 */
783 sc->sc_mii.mii_ifp = ifp;
784 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
785 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
786 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
787 ifmedia_init(&sc->sc_mii.mii_media, 0, SIP_DECL(mediachange),
788 SIP_DECL(mediastatus));
789 #ifdef DP83820
790 if (sc->sc_cfg & CFG_TBI_EN) {
791 /* Using ten-bit interface. */
792 printf("%s: TBI -- FIXME\n", sc->sc_dev.dv_xname);
793 } else {
794 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
795 MII_OFFSET_ANY, 0);
796 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
797 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
798 0, NULL);
799 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
800 } else
801 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
802 }
803 #else
804 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
805 MII_OFFSET_ANY, 0);
806 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
807 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
808 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
809 } else
810 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
811 #endif /* DP83820 */
812
813 ifp = &sc->sc_ethercom.ec_if;
814 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
815 ifp->if_softc = sc;
816 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
817 ifp->if_ioctl = SIP_DECL(ioctl);
818 ifp->if_start = SIP_DECL(start);
819 ifp->if_watchdog = SIP_DECL(watchdog);
820 ifp->if_init = SIP_DECL(init);
821 ifp->if_stop = SIP_DECL(stop);
822 IFQ_SET_READY(&ifp->if_snd);
823
824 /*
825 * We can support 802.1Q VLAN-sized frames.
826 */
827 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
828
829 #ifdef DP83820
830 /*
831 * And the DP83820 can do VLAN tagging in hardware, and
832 * support the jumbo Ethernet MTU.
833 */
834 sc->sc_ethercom.ec_capabilities |=
835 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
836
837 /*
838 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
839 * in hardware.
840 */
841 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
842 IFCAP_CSUM_UDPv4;
843 #endif /* DP83820 */
844
845 /*
846 * Attach the interface.
847 */
848 if_attach(ifp);
849 ether_ifattach(ifp, enaddr);
850
851 #ifdef SIP_EVENT_COUNTERS
852 /*
853 * Attach event counters.
854 */
855 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
856 NULL, sc->sc_dev.dv_xname, "txsstall");
857 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
858 NULL, sc->sc_dev.dv_xname, "txdstall");
859 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
860 NULL, sc->sc_dev.dv_xname, "txintr");
861 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
862 NULL, sc->sc_dev.dv_xname, "rxintr");
863 #ifdef DP83820
864 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
865 NULL, sc->sc_dev.dv_xname, "rxipsum");
866 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
867 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
868 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
869 NULL, sc->sc_dev.dv_xname, "rxudpsum");
870 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
871 NULL, sc->sc_dev.dv_xname, "txipsum");
872 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
873 NULL, sc->sc_dev.dv_xname, "txtcpsum");
874 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
875 NULL, sc->sc_dev.dv_xname, "txudpsum");
876 #endif /* DP83820 */
877 #endif /* SIP_EVENT_COUNTERS */
878
879 /*
880 * Make sure the interface is shutdown during reboot.
881 */
882 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
883 if (sc->sc_sdhook == NULL)
884 printf("%s: WARNING: unable to establish shutdown hook\n",
885 sc->sc_dev.dv_xname);
886 return;
887
888 /*
889 * Free any resources we've allocated during the failed attach
890 * attempt. Do this in reverse order and fall through.
891 */
892 fail_5:
893 for (i = 0; i < SIP_NRXDESC; i++) {
894 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
895 bus_dmamap_destroy(sc->sc_dmat,
896 sc->sc_rxsoft[i].rxs_dmamap);
897 }
898 fail_4:
899 for (i = 0; i < SIP_TXQUEUELEN; i++) {
900 if (sc->sc_txsoft[i].txs_dmamap != NULL)
901 bus_dmamap_destroy(sc->sc_dmat,
902 sc->sc_txsoft[i].txs_dmamap);
903 }
904 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
905 fail_3:
906 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
907 fail_2:
908 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
909 sizeof(struct sip_control_data));
910 fail_1:
911 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
912 fail_0:
913 return;
914 }
915
916 /*
917 * sip_shutdown:
918 *
919 * Make sure the interface is stopped at reboot time.
920 */
921 void
922 SIP_DECL(shutdown)(void *arg)
923 {
924 struct sip_softc *sc = arg;
925
926 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
927 }
928
929 /*
930 * sip_start: [ifnet interface function]
931 *
932 * Start packet transmission on the interface.
933 */
934 void
935 SIP_DECL(start)(struct ifnet *ifp)
936 {
937 struct sip_softc *sc = ifp->if_softc;
938 struct mbuf *m0, *m;
939 struct sip_txsoft *txs;
940 bus_dmamap_t dmamap;
941 int error, firsttx, nexttx, lasttx, ofree, seg;
942 #ifdef DP83820
943 u_int32_t extsts;
944 #endif
945
946 /*
947 * If we've been told to pause, don't transmit any more packets.
948 */
949 if (sc->sc_flags & SIPF_PAUSED)
950 ifp->if_flags |= IFF_OACTIVE;
951
952 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
953 return;
954
955 /*
956 * Remember the previous number of free descriptors and
957 * the first descriptor we'll use.
958 */
959 ofree = sc->sc_txfree;
960 firsttx = sc->sc_txnext;
961
962 /*
963 * Loop through the send queue, setting up transmit descriptors
964 * until we drain the queue, or use up all available transmit
965 * descriptors.
966 */
967 for (;;) {
968 /* Get a work queue entry. */
969 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
970 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
971 break;
972 }
973
974 /*
975 * Grab a packet off the queue.
976 */
977 IFQ_POLL(&ifp->if_snd, m0);
978 if (m0 == NULL)
979 break;
980 #ifndef DP83820
981 m = NULL;
982 #endif
983
984 dmamap = txs->txs_dmamap;
985
986 #ifdef DP83820
987 /*
988 * Load the DMA map. If this fails, the packet either
989 * didn't fit in the allotted number of segments, or we
990 * were short on resources. For the too-many-segments
991 * case, we simply report an error and drop the packet,
992 * since we can't sanely copy a jumbo packet to a single
993 * buffer.
994 */
995 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
996 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
997 if (error) {
998 if (error == EFBIG) {
999 printf("%s: Tx packet consumes too many "
1000 "DMA segments, dropping...\n",
1001 sc->sc_dev.dv_xname);
1002 IFQ_DEQUEUE(&ifp->if_snd, m0);
1003 m_freem(m0);
1004 continue;
1005 }
1006 /*
1007 * Short on resources, just stop for now.
1008 */
1009 break;
1010 }
1011 #else /* DP83820 */
1012 /*
1013 * Load the DMA map. If this fails, the packet either
1014 * didn't fit in the alloted number of segments, or we
1015 * were short on resources. In this case, we'll copy
1016 * and try again.
1017 */
1018 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1019 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1020 MGETHDR(m, M_DONTWAIT, MT_DATA);
1021 if (m == NULL) {
1022 printf("%s: unable to allocate Tx mbuf\n",
1023 sc->sc_dev.dv_xname);
1024 break;
1025 }
1026 if (m0->m_pkthdr.len > MHLEN) {
1027 MCLGET(m, M_DONTWAIT);
1028 if ((m->m_flags & M_EXT) == 0) {
1029 printf("%s: unable to allocate Tx "
1030 "cluster\n", sc->sc_dev.dv_xname);
1031 m_freem(m);
1032 break;
1033 }
1034 }
1035 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1036 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1037 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1038 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1039 if (error) {
1040 printf("%s: unable to load Tx buffer, "
1041 "error = %d\n", sc->sc_dev.dv_xname, error);
1042 break;
1043 }
1044 }
1045 #endif /* DP83820 */
1046
1047 /*
1048 * Ensure we have enough descriptors free to describe
1049 * the packet. Note, we always reserve one descriptor
1050 * at the end of the ring as a termination point, to
1051 * prevent wrap-around.
1052 */
1053 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1054 /*
1055 * Not enough free descriptors to transmit this
1056 * packet. We haven't committed anything yet,
1057 * so just unload the DMA map, put the packet
1058 * back on the queue, and punt. Notify the upper
1059 * layer that there are not more slots left.
1060 *
1061 * XXX We could allocate an mbuf and copy, but
1062 * XXX is it worth it?
1063 */
1064 ifp->if_flags |= IFF_OACTIVE;
1065 bus_dmamap_unload(sc->sc_dmat, dmamap);
1066 #ifndef DP83820
1067 if (m != NULL)
1068 m_freem(m);
1069 #endif
1070 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1071 break;
1072 }
1073
1074 IFQ_DEQUEUE(&ifp->if_snd, m0);
1075 #ifndef DP83820
1076 if (m != NULL) {
1077 m_freem(m0);
1078 m0 = m;
1079 }
1080 #endif
1081
1082 /*
1083 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1084 */
1085
1086 /* Sync the DMA map. */
1087 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1088 BUS_DMASYNC_PREWRITE);
1089
1090 /*
1091 * Initialize the transmit descriptors.
1092 */
1093 for (nexttx = sc->sc_txnext, seg = 0;
1094 seg < dmamap->dm_nsegs;
1095 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1096 /*
1097 * If this is the first descriptor we're
1098 * enqueueing, don't set the OWN bit just
1099 * yet. That could cause a race condition.
1100 * We'll do it below.
1101 */
1102 sc->sc_txdescs[nexttx].sipd_bufptr =
1103 htole32(dmamap->dm_segs[seg].ds_addr);
1104 sc->sc_txdescs[nexttx].sipd_cmdsts =
1105 htole32((nexttx == firsttx ? 0 : CMDSTS_OWN) |
1106 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1107 #ifdef DP83820
1108 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1109 #endif /* DP83820 */
1110 lasttx = nexttx;
1111 }
1112
1113 /* Clear the MORE bit on the last segment. */
1114 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1115
1116 #ifdef DP83820
1117 /*
1118 * If VLANs are enabled and the packet has a VLAN tag, set
1119 * up the descriptor to encapsulate the packet for us.
1120 *
1121 * This apparently has to be on the last descriptor of
1122 * the packet.
1123 */
1124 if (sc->sc_ethercom.ec_nvlans != 0 &&
1125 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1126 sc->sc_txdescs[lasttx].sipd_extsts |=
1127 htole32(EXTSTS_VPKT |
1128 htons(*mtod(m, int *) & EXTSTS_VTCI));
1129 }
1130
1131 /*
1132 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1133 * checksumming, set up the descriptor to do this work
1134 * for us.
1135 *
1136 * This apparently has to be on the first descriptor of
1137 * the packet.
1138 *
1139 * Byte-swap constants so the compiler can optimize.
1140 */
1141 extsts = 0;
1142 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1143 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1144 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1145 extsts |= htole32(EXTSTS_IPPKT);
1146 }
1147 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1148 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1149 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1150 extsts |= htole32(EXTSTS_TCPPKT);
1151 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1152 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1153 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1154 extsts |= htole32(EXTSTS_UDPPKT);
1155 }
1156 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1157 #endif /* DP83820 */
1158
1159 /* Sync the descriptors we're using. */
1160 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1161 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1162
1163 /*
1164 * Store a pointer to the packet so we can free it later,
1165 * and remember what txdirty will be once the packet is
1166 * done.
1167 */
1168 txs->txs_mbuf = m0;
1169 txs->txs_firstdesc = sc->sc_txnext;
1170 txs->txs_lastdesc = lasttx;
1171
1172 /* Advance the tx pointer. */
1173 sc->sc_txfree -= dmamap->dm_nsegs;
1174 sc->sc_txnext = nexttx;
1175
1176 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q);
1177 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1178
1179 #if NBPFILTER > 0
1180 /*
1181 * Pass the packet to any BPF listeners.
1182 */
1183 if (ifp->if_bpf)
1184 bpf_mtap(ifp->if_bpf, m0);
1185 #endif /* NBPFILTER > 0 */
1186 }
1187
1188 if (txs == NULL || sc->sc_txfree == 0) {
1189 /* No more slots left; notify upper layer. */
1190 ifp->if_flags |= IFF_OACTIVE;
1191 }
1192
1193 if (sc->sc_txfree != ofree) {
1194 /*
1195 * Cause a descriptor interrupt to happen on the
1196 * last packet we enqueued.
1197 */
1198 sc->sc_txdescs[lasttx].sipd_cmdsts |= htole32(CMDSTS_INTR);
1199 SIP_CDTXSYNC(sc, lasttx, 1,
1200 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1201
1202 /*
1203 * The entire packet chain is set up. Give the
1204 * first descrptor to the chip now.
1205 */
1206 sc->sc_txdescs[firsttx].sipd_cmdsts |= htole32(CMDSTS_OWN);
1207 SIP_CDTXSYNC(sc, firsttx, 1,
1208 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1209
1210 /*
1211 * Start the transmit process. Note, the manual says
1212 * that if there are no pending transmissions in the
1213 * chip's internal queue (indicated by TXE being clear),
1214 * then the driver software must set the TXDP to the
1215 * first descriptor to be transmitted. However, if we
1216 * do this, it causes serious performance degredation on
1217 * the DP83820 under load, not setting TXDP doesn't seem
1218 * to adversely affect the SiS 900 or DP83815.
1219 *
1220 * Well, I guess it wouldn't be the first time a manual
1221 * has lied -- and they could be speaking of the NULL-
1222 * terminated descriptor list case, rather than OWN-
1223 * terminated rings.
1224 */
1225 #if 0
1226 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1227 CR_TXE) == 0) {
1228 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1229 SIP_CDTXADDR(sc, firsttx));
1230 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1231 }
1232 #else
1233 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1234 #endif
1235
1236 /* Set a watchdog timer in case the chip flakes out. */
1237 ifp->if_timer = 5;
1238 }
1239 }
1240
1241 /*
1242 * sip_watchdog: [ifnet interface function]
1243 *
1244 * Watchdog timer handler.
1245 */
1246 void
1247 SIP_DECL(watchdog)(struct ifnet *ifp)
1248 {
1249 struct sip_softc *sc = ifp->if_softc;
1250
1251 /*
1252 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1253 * If we get a timeout, try and sweep up transmit descriptors.
1254 * If we manage to sweep them all up, ignore the lack of
1255 * interrupt.
1256 */
1257 SIP_DECL(txintr)(sc);
1258
1259 if (sc->sc_txfree != SIP_NTXDESC) {
1260 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1261 ifp->if_oerrors++;
1262
1263 /* Reset the interface. */
1264 (void) SIP_DECL(init)(ifp);
1265 } else if (ifp->if_flags & IFF_DEBUG)
1266 printf("%s: recovered from device timeout\n",
1267 sc->sc_dev.dv_xname);
1268
1269 /* Try to get more packets going. */
1270 SIP_DECL(start)(ifp);
1271 }
1272
1273 /*
1274 * sip_ioctl: [ifnet interface function]
1275 *
1276 * Handle control requests from the operator.
1277 */
1278 int
1279 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1280 {
1281 struct sip_softc *sc = ifp->if_softc;
1282 struct ifreq *ifr = (struct ifreq *)data;
1283 int s, error;
1284
1285 s = splnet();
1286
1287 switch (cmd) {
1288 case SIOCSIFMEDIA:
1289 case SIOCGIFMEDIA:
1290 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1291 break;
1292
1293 default:
1294 error = ether_ioctl(ifp, cmd, data);
1295 if (error == ENETRESET) {
1296 /*
1297 * Multicast list has changed; set the hardware filter
1298 * accordingly.
1299 */
1300 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1301 error = 0;
1302 }
1303 break;
1304 }
1305
1306 /* Try to get more packets going. */
1307 SIP_DECL(start)(ifp);
1308
1309 splx(s);
1310 return (error);
1311 }
1312
1313 /*
1314 * sip_intr:
1315 *
1316 * Interrupt service routine.
1317 */
1318 int
1319 SIP_DECL(intr)(void *arg)
1320 {
1321 struct sip_softc *sc = arg;
1322 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1323 u_int32_t isr;
1324 int handled = 0;
1325
1326 for (;;) {
1327 /* Reading clears interrupt. */
1328 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1329 if ((isr & sc->sc_imr) == 0)
1330 break;
1331
1332 handled = 1;
1333
1334 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1335 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1336
1337 /* Grab any new packets. */
1338 SIP_DECL(rxintr)(sc);
1339
1340 if (isr & ISR_RXORN) {
1341 printf("%s: receive FIFO overrun\n",
1342 sc->sc_dev.dv_xname);
1343
1344 /* XXX adjust rx_drain_thresh? */
1345 }
1346
1347 if (isr & ISR_RXIDLE) {
1348 printf("%s: receive ring overrun\n",
1349 sc->sc_dev.dv_xname);
1350
1351 /* Get the receive process going again. */
1352 bus_space_write_4(sc->sc_st, sc->sc_sh,
1353 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1354 bus_space_write_4(sc->sc_st, sc->sc_sh,
1355 SIP_CR, CR_RXE);
1356 }
1357 }
1358
1359 if (isr & (ISR_TXURN|ISR_TXDESC)) {
1360 SIP_EVCNT_INCR(&sc->sc_ev_txintr);
1361
1362 /* Sweep up transmit descriptors. */
1363 SIP_DECL(txintr)(sc);
1364
1365 if (isr & ISR_TXURN) {
1366 u_int32_t thresh;
1367
1368 printf("%s: transmit FIFO underrun",
1369 sc->sc_dev.dv_xname);
1370
1371 thresh = sc->sc_tx_drain_thresh + 1;
1372 if (thresh <= TXCFG_DRTH &&
1373 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1374 (sc->sc_tx_fill_thresh * 32))) {
1375 printf("; increasing Tx drain "
1376 "threshold to %u bytes\n",
1377 thresh * 32);
1378 sc->sc_tx_drain_thresh = thresh;
1379 (void) SIP_DECL(init)(ifp);
1380 } else {
1381 (void) SIP_DECL(init)(ifp);
1382 printf("\n");
1383 }
1384 }
1385 }
1386
1387 #if !defined(DP83820)
1388 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1389 if (isr & ISR_PAUSE_ST) {
1390 sc->sc_flags |= SIPF_PAUSED;
1391 ifp->if_flags |= IFF_OACTIVE;
1392 }
1393 if (isr & ISR_PAUSE_END) {
1394 sc->sc_flags &= ~SIPF_PAUSED;
1395 ifp->if_flags &= ~IFF_OACTIVE;
1396 }
1397 }
1398 #endif /* ! DP83820 */
1399
1400 if (isr & ISR_HIBERR) {
1401 #define PRINTERR(bit, str) \
1402 if (isr & (bit)) \
1403 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1404 PRINTERR(ISR_DPERR, "parity error");
1405 PRINTERR(ISR_SSERR, "system error");
1406 PRINTERR(ISR_RMABT, "master abort");
1407 PRINTERR(ISR_RTABT, "target abort");
1408 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1409 (void) SIP_DECL(init)(ifp);
1410 #undef PRINTERR
1411 }
1412 }
1413
1414 /* Try to get more packets going. */
1415 SIP_DECL(start)(ifp);
1416
1417 return (handled);
1418 }
1419
1420 /*
1421 * sip_txintr:
1422 *
1423 * Helper; handle transmit interrupts.
1424 */
1425 void
1426 SIP_DECL(txintr)(struct sip_softc *sc)
1427 {
1428 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1429 struct sip_txsoft *txs;
1430 u_int32_t cmdsts;
1431
1432 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1433 ifp->if_flags &= ~IFF_OACTIVE;
1434
1435 /*
1436 * Go through our Tx list and free mbufs for those
1437 * frames which have been transmitted.
1438 */
1439 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1440 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1441 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1442
1443 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1444 if (cmdsts & CMDSTS_OWN)
1445 break;
1446
1447 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1448
1449 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1450
1451 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1452 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1453 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1454 m_freem(txs->txs_mbuf);
1455 txs->txs_mbuf = NULL;
1456
1457 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1458
1459 /*
1460 * Check for errors and collisions.
1461 */
1462 if (cmdsts &
1463 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1464 ifp->if_oerrors++;
1465 if (cmdsts & CMDSTS_Tx_EC)
1466 ifp->if_collisions += 16;
1467 if (ifp->if_flags & IFF_DEBUG) {
1468 if (cmdsts & CMDSTS_Tx_ED)
1469 printf("%s: excessive deferral\n",
1470 sc->sc_dev.dv_xname);
1471 if (cmdsts & CMDSTS_Tx_EC)
1472 printf("%s: excessive collisions\n",
1473 sc->sc_dev.dv_xname);
1474 }
1475 } else {
1476 /* Packet was transmitted successfully. */
1477 ifp->if_opackets++;
1478 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1479 }
1480 }
1481
1482 /*
1483 * If there are no more pending transmissions, cancel the watchdog
1484 * timer.
1485 */
1486 if (txs == NULL)
1487 ifp->if_timer = 0;
1488 }
1489
1490 #if defined(DP83820)
1491 /*
1492 * sip_rxintr:
1493 *
1494 * Helper; handle receive interrupts.
1495 */
1496 void
1497 SIP_DECL(rxintr)(struct sip_softc *sc)
1498 {
1499 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1500 struct sip_rxsoft *rxs;
1501 struct mbuf *m, *tailm;
1502 u_int32_t cmdsts, extsts;
1503 int i, len;
1504
1505 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1506 rxs = &sc->sc_rxsoft[i];
1507
1508 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1509
1510 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1511 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1512
1513 /*
1514 * NOTE: OWN is set if owned by _consumer_. We're the
1515 * consumer of the receive ring, so if the bit is clear,
1516 * we have processed all of the packets.
1517 */
1518 if ((cmdsts & CMDSTS_OWN) == 0) {
1519 /*
1520 * We have processed all of the receive buffers.
1521 */
1522 break;
1523 }
1524
1525 if (__predict_false(sc->sc_rxdiscard)) {
1526 SIP_INIT_RXDESC(sc, i);
1527 if ((cmdsts & CMDSTS_MORE) == 0) {
1528 /* Reset our state. */
1529 sc->sc_rxdiscard = 0;
1530 }
1531 continue;
1532 }
1533
1534 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1535 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1536
1537 m = rxs->rxs_mbuf;
1538
1539 /*
1540 * Add a new receive buffer to the ring.
1541 */
1542 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1543 /*
1544 * Failed, throw away what we've done so
1545 * far, and discard the rest of the packet.
1546 */
1547 ifp->if_ierrors++;
1548 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1549 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1550 SIP_INIT_RXDESC(sc, i);
1551 if (cmdsts & CMDSTS_MORE)
1552 sc->sc_rxdiscard = 1;
1553 if (sc->sc_rxhead != NULL)
1554 m_freem(sc->sc_rxhead);
1555 SIP_RXCHAIN_RESET(sc);
1556 continue;
1557 }
1558
1559 SIP_RXCHAIN_LINK(sc, m);
1560
1561 /*
1562 * If this is not the end of the packet, keep
1563 * looking.
1564 */
1565 if (cmdsts & CMDSTS_MORE) {
1566 sc->sc_rxlen += m->m_len;
1567 continue;
1568 }
1569
1570 /*
1571 * Okay, we have the entire packet now...
1572 */
1573 *sc->sc_rxtailp = NULL;
1574 m = sc->sc_rxhead;
1575 tailm = sc->sc_rxtail;
1576
1577 SIP_RXCHAIN_RESET(sc);
1578
1579 /*
1580 * If an error occurred, update stats and drop the packet.
1581 */
1582 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1583 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1584 ifp->if_ierrors++;
1585 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1586 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1587 /* Receive overrun handled elsewhere. */
1588 printf("%s: receive descriptor error\n",
1589 sc->sc_dev.dv_xname);
1590 }
1591 #define PRINTERR(bit, str) \
1592 if (cmdsts & (bit)) \
1593 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1594 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1595 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1596 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1597 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1598 #undef PRINTERR
1599 m_freem(m);
1600 continue;
1601 }
1602
1603 /*
1604 * No errors.
1605 *
1606 * Note, the DP83820 includes the CRC with
1607 * every packet.
1608 */
1609 len = CMDSTS_SIZE(cmdsts);
1610 tailm->m_len = len - sc->sc_rxlen;
1611
1612 /*
1613 * If the packet is small enough to fit in a
1614 * single header mbuf, allocate one and copy
1615 * the data into it. This greatly reduces
1616 * memory consumption when we receive lots
1617 * of small packets.
1618 */
1619 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1620 struct mbuf *nm;
1621 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1622 if (nm == NULL) {
1623 ifp->if_ierrors++;
1624 m_freem(m);
1625 continue;
1626 }
1627 nm->m_data += 2;
1628 nm->m_pkthdr.len = nm->m_len = len;
1629 m_copydata(m, 0, len, mtod(nm, caddr_t));
1630 m_freem(m);
1631 m = nm;
1632 }
1633 #ifndef __NO_STRICT_ALIGNMENT
1634 else {
1635 /*
1636 * The DP83820's receive buffers must be 4-byte
1637 * aligned. But this means that the data after
1638 * the Ethernet header is misaligned. To compensate,
1639 * we have artificially shortened the buffer size
1640 * in the descriptor, and we do an overlapping copy
1641 * of the data two bytes further in (in the first
1642 * buffer of the chain only).
1643 */
1644 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1645 m->m_len);
1646 m->m_data += 2;
1647 }
1648 #endif /* ! __NO_STRICT_ALIGNMENT */
1649
1650 /*
1651 * If VLANs are enabled, VLAN packets have been unwrapped
1652 * for us. Associate the tag with the packet.
1653 */
1654 if (sc->sc_ethercom.ec_nvlans != 0 &&
1655 (extsts & EXTSTS_VPKT) != 0) {
1656 struct mbuf *vtag;
1657
1658 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1659 if (vtag == NULL) {
1660 ifp->if_ierrors++;
1661 printf("%s: unable to allocate VLAN tag\n",
1662 sc->sc_dev.dv_xname);
1663 m_freem(m);
1664 continue;
1665 }
1666
1667 *mtod(vtag, int *) = ntohs(extsts & EXTSTS_VTCI);
1668 vtag->m_len = sizeof(int);
1669 }
1670
1671 /*
1672 * Set the incoming checksum information for the
1673 * packet.
1674 */
1675 if ((extsts & EXTSTS_IPPKT) != 0) {
1676 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1677 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1678 if (extsts & EXTSTS_Rx_IPERR)
1679 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1680 if (extsts & EXTSTS_TCPPKT) {
1681 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1682 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1683 if (extsts & EXTSTS_Rx_TCPERR)
1684 m->m_pkthdr.csum_flags |=
1685 M_CSUM_TCP_UDP_BAD;
1686 } else if (extsts & EXTSTS_UDPPKT) {
1687 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1688 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1689 if (extsts & EXTSTS_Rx_UDPERR)
1690 m->m_pkthdr.csum_flags |=
1691 M_CSUM_TCP_UDP_BAD;
1692 }
1693 }
1694
1695 ifp->if_ipackets++;
1696 m->m_flags |= M_HASFCS;
1697 m->m_pkthdr.rcvif = ifp;
1698 m->m_pkthdr.len = len;
1699
1700 #if NBPFILTER > 0
1701 /*
1702 * Pass this up to any BPF listeners, but only
1703 * pass if up the stack if it's for us.
1704 */
1705 if (ifp->if_bpf)
1706 bpf_mtap(ifp->if_bpf, m);
1707 #endif /* NBPFILTER > 0 */
1708
1709 /* Pass it on. */
1710 (*ifp->if_input)(ifp, m);
1711 }
1712
1713 /* Update the receive pointer. */
1714 sc->sc_rxptr = i;
1715 }
1716 #else /* ! DP83820 */
1717 /*
1718 * sip_rxintr:
1719 *
1720 * Helper; handle receive interrupts.
1721 */
1722 void
1723 SIP_DECL(rxintr)(struct sip_softc *sc)
1724 {
1725 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1726 struct sip_rxsoft *rxs;
1727 struct mbuf *m;
1728 u_int32_t cmdsts;
1729 int i, len;
1730
1731 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1732 rxs = &sc->sc_rxsoft[i];
1733
1734 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1735
1736 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1737
1738 /*
1739 * NOTE: OWN is set if owned by _consumer_. We're the
1740 * consumer of the receive ring, so if the bit is clear,
1741 * we have processed all of the packets.
1742 */
1743 if ((cmdsts & CMDSTS_OWN) == 0) {
1744 /*
1745 * We have processed all of the receive buffers.
1746 */
1747 break;
1748 }
1749
1750 /*
1751 * If any collisions were seen on the wire, count one.
1752 */
1753 if (cmdsts & CMDSTS_Rx_COL)
1754 ifp->if_collisions++;
1755
1756 /*
1757 * If an error occurred, update stats, clear the status
1758 * word, and leave the packet buffer in place. It will
1759 * simply be reused the next time the ring comes around.
1760 */
1761 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1762 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1763 ifp->if_ierrors++;
1764 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1765 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1766 /* Receive overrun handled elsewhere. */
1767 printf("%s: receive descriptor error\n",
1768 sc->sc_dev.dv_xname);
1769 }
1770 #define PRINTERR(bit, str) \
1771 if (cmdsts & (bit)) \
1772 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1773 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1774 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1775 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1776 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1777 #undef PRINTERR
1778 SIP_INIT_RXDESC(sc, i);
1779 continue;
1780 }
1781
1782 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1783 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1784
1785 /*
1786 * No errors; receive the packet. Note, the SiS 900
1787 * includes the CRC with every packet.
1788 */
1789 len = CMDSTS_SIZE(cmdsts);
1790
1791 #ifdef __NO_STRICT_ALIGNMENT
1792 /*
1793 * If the packet is small enough to fit in a
1794 * single header mbuf, allocate one and copy
1795 * the data into it. This greatly reduces
1796 * memory consumption when we receive lots
1797 * of small packets.
1798 *
1799 * Otherwise, we add a new buffer to the receive
1800 * chain. If this fails, we drop the packet and
1801 * recycle the old buffer.
1802 */
1803 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
1804 MGETHDR(m, M_DONTWAIT, MT_DATA);
1805 if (m == NULL)
1806 goto dropit;
1807 memcpy(mtod(m, caddr_t),
1808 mtod(rxs->rxs_mbuf, caddr_t), len);
1809 SIP_INIT_RXDESC(sc, i);
1810 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1811 rxs->rxs_dmamap->dm_mapsize,
1812 BUS_DMASYNC_PREREAD);
1813 } else {
1814 m = rxs->rxs_mbuf;
1815 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1816 dropit:
1817 ifp->if_ierrors++;
1818 SIP_INIT_RXDESC(sc, i);
1819 bus_dmamap_sync(sc->sc_dmat,
1820 rxs->rxs_dmamap, 0,
1821 rxs->rxs_dmamap->dm_mapsize,
1822 BUS_DMASYNC_PREREAD);
1823 continue;
1824 }
1825 }
1826 #else
1827 /*
1828 * The SiS 900's receive buffers must be 4-byte aligned.
1829 * But this means that the data after the Ethernet header
1830 * is misaligned. We must allocate a new buffer and
1831 * copy the data, shifted forward 2 bytes.
1832 */
1833 MGETHDR(m, M_DONTWAIT, MT_DATA);
1834 if (m == NULL) {
1835 dropit:
1836 ifp->if_ierrors++;
1837 SIP_INIT_RXDESC(sc, i);
1838 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1839 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1840 continue;
1841 }
1842 if (len > (MHLEN - 2)) {
1843 MCLGET(m, M_DONTWAIT);
1844 if ((m->m_flags & M_EXT) == 0) {
1845 m_freem(m);
1846 goto dropit;
1847 }
1848 }
1849 m->m_data += 2;
1850
1851 /*
1852 * Note that we use clusters for incoming frames, so the
1853 * buffer is virtually contiguous.
1854 */
1855 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
1856
1857 /* Allow the receive descriptor to continue using its mbuf. */
1858 SIP_INIT_RXDESC(sc, i);
1859 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1860 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1861 #endif /* __NO_STRICT_ALIGNMENT */
1862
1863 ifp->if_ipackets++;
1864 m->m_flags |= M_HASFCS;
1865 m->m_pkthdr.rcvif = ifp;
1866 m->m_pkthdr.len = m->m_len = len;
1867
1868 #if NBPFILTER > 0
1869 /*
1870 * Pass this up to any BPF listeners, but only
1871 * pass if up the stack if it's for us.
1872 */
1873 if (ifp->if_bpf)
1874 bpf_mtap(ifp->if_bpf, m);
1875 #endif /* NBPFILTER > 0 */
1876
1877 /* Pass it on. */
1878 (*ifp->if_input)(ifp, m);
1879 }
1880
1881 /* Update the receive pointer. */
1882 sc->sc_rxptr = i;
1883 }
1884 #endif /* DP83820 */
1885
1886 /*
1887 * sip_tick:
1888 *
1889 * One second timer, used to tick the MII.
1890 */
1891 void
1892 SIP_DECL(tick)(void *arg)
1893 {
1894 struct sip_softc *sc = arg;
1895 int s;
1896
1897 s = splnet();
1898 mii_tick(&sc->sc_mii);
1899 splx(s);
1900
1901 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
1902 }
1903
1904 /*
1905 * sip_reset:
1906 *
1907 * Perform a soft reset on the SiS 900.
1908 */
1909 void
1910 SIP_DECL(reset)(struct sip_softc *sc)
1911 {
1912 bus_space_tag_t st = sc->sc_st;
1913 bus_space_handle_t sh = sc->sc_sh;
1914 int i;
1915
1916 bus_space_write_4(st, sh, SIP_CR, CR_RST);
1917
1918 for (i = 0; i < SIP_TIMEOUT; i++) {
1919 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
1920 break;
1921 delay(2);
1922 }
1923
1924 if (i == SIP_TIMEOUT)
1925 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1926
1927 delay(1000);
1928
1929 #ifdef DP83820
1930 /*
1931 * Set the general purpose I/O bits. Do it here in case we
1932 * need to have GPIO set up to talk to the media interface.
1933 */
1934 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
1935 delay(1000);
1936 #endif /* DP83820 */
1937 }
1938
1939 /*
1940 * sip_init: [ ifnet interface function ]
1941 *
1942 * Initialize the interface. Must be called at splnet().
1943 */
1944 int
1945 SIP_DECL(init)(struct ifnet *ifp)
1946 {
1947 struct sip_softc *sc = ifp->if_softc;
1948 bus_space_tag_t st = sc->sc_st;
1949 bus_space_handle_t sh = sc->sc_sh;
1950 struct sip_txsoft *txs;
1951 struct sip_rxsoft *rxs;
1952 struct sip_desc *sipd;
1953 u_int32_t reg;
1954 int i, error = 0;
1955
1956 /*
1957 * Cancel any pending I/O.
1958 */
1959 SIP_DECL(stop)(ifp, 0);
1960
1961 /*
1962 * Reset the chip to a known state.
1963 */
1964 SIP_DECL(reset)(sc);
1965
1966 #if !defined(DP83820)
1967 if (sc->sc_model->sip_vendor == PCI_VENDOR_NS &&
1968 sc->sc_model->sip_product == PCI_PRODUCT_NS_DP83815) {
1969 /*
1970 * DP83815 manual, page 78:
1971 * 4.4 Recommended Registers Configuration
1972 * For optimum performance of the DP83815, version noted
1973 * as DP83815CVNG (SRR = 203h), the listed register
1974 * modifications must be followed in sequence...
1975 *
1976 * It's not clear if this should be 302h or 203h because that
1977 * chip name is listed as SRR 302h in the description of the
1978 * SRR register. However, my revision 302h DP83815 on the
1979 * Netgear FA311 purchased in 02/2001 needs these settings
1980 * to avoid tons of errors in AcceptPerfectMatch (non-
1981 * IFF_PROMISC) mode. I do not know if other revisions need
1982 * this set or not. [briggs -- 09 March 2001]
1983 *
1984 * Note that only the low-order 12 bits of 0xe4 are documented
1985 * and that this sets reserved bits in that register.
1986 */
1987 reg = bus_space_read_4(st, sh, SIP_NS_SRR);
1988 if (reg == 0x302) {
1989 bus_space_write_4(st, sh, 0x00cc, 0x0001);
1990 bus_space_write_4(st, sh, 0x00e4, 0x189C);
1991 bus_space_write_4(st, sh, 0x00fc, 0x0000);
1992 bus_space_write_4(st, sh, 0x00f4, 0x5040);
1993 bus_space_write_4(st, sh, 0x00f8, 0x008c);
1994 }
1995 }
1996 #endif /* ! DP83820 */
1997
1998 /*
1999 * Initialize the transmit descriptor ring.
2000 */
2001 for (i = 0; i < SIP_NTXDESC; i++) {
2002 sipd = &sc->sc_txdescs[i];
2003 memset(sipd, 0, sizeof(struct sip_desc));
2004 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2005 }
2006 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2007 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2008 sc->sc_txfree = SIP_NTXDESC;
2009 sc->sc_txnext = 0;
2010
2011 /*
2012 * Initialize the transmit job descriptors.
2013 */
2014 SIMPLEQ_INIT(&sc->sc_txfreeq);
2015 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2016 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2017 txs = &sc->sc_txsoft[i];
2018 txs->txs_mbuf = NULL;
2019 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2020 }
2021
2022 /*
2023 * Initialize the receive descriptor and receive job
2024 * descriptor rings.
2025 */
2026 for (i = 0; i < SIP_NRXDESC; i++) {
2027 rxs = &sc->sc_rxsoft[i];
2028 if (rxs->rxs_mbuf == NULL) {
2029 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2030 printf("%s: unable to allocate or map rx "
2031 "buffer %d, error = %d\n",
2032 sc->sc_dev.dv_xname, i, error);
2033 /*
2034 * XXX Should attempt to run with fewer receive
2035 * XXX buffers instead of just failing.
2036 */
2037 SIP_DECL(rxdrain)(sc);
2038 goto out;
2039 }
2040 } else
2041 SIP_INIT_RXDESC(sc, i);
2042 }
2043 sc->sc_rxptr = 0;
2044 #ifdef DP83820
2045 sc->sc_rxdiscard = 0;
2046 SIP_RXCHAIN_RESET(sc);
2047 #endif /* DP83820 */
2048
2049 /*
2050 * Set the configuration register; it's already initialized
2051 * in sip_attach().
2052 */
2053 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2054
2055 /*
2056 * Initialize the transmit fill and drain thresholds if
2057 * we have never done so.
2058 */
2059 if (sc->sc_tx_fill_thresh == 0) {
2060 /*
2061 * XXX This value should be tuned. This is the
2062 * minimum (32 bytes), and we may be able to
2063 * improve performance by increasing it.
2064 */
2065 sc->sc_tx_fill_thresh = 1;
2066 }
2067 if (sc->sc_tx_drain_thresh == 0) {
2068 /*
2069 * Start at a drain threshold of 512 bytes. We will
2070 * increase it if a DMA underrun occurs.
2071 *
2072 * XXX The minimum value of this variable should be
2073 * tuned. We may be able to improve performance
2074 * by starting with a lower value. That, however,
2075 * may trash the first few outgoing packets if the
2076 * PCI bus is saturated.
2077 */
2078 sc->sc_tx_drain_thresh = 512 / 32;
2079 }
2080
2081 /*
2082 * Initialize the prototype TXCFG register.
2083 */
2084 sc->sc_txcfg = TXCFG_ATP | TXCFG_MXDMA_512 |
2085 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2086 sc->sc_tx_drain_thresh;
2087 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2088
2089 /*
2090 * Initialize the receive drain threshold if we have never
2091 * done so.
2092 */
2093 if (sc->sc_rx_drain_thresh == 0) {
2094 /*
2095 * XXX This value should be tuned. This is set to the
2096 * maximum of 248 bytes, and we may be able to improve
2097 * performance by decreasing it (although we should never
2098 * set this value lower than 2; 14 bytes are required to
2099 * filter the packet).
2100 */
2101 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2102 }
2103
2104 /*
2105 * Initialize the prototype RXCFG register.
2106 */
2107 sc->sc_rxcfg = RXCFG_MXDMA_512 |
2108 (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2109 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2110
2111 /* Set up the receive filter. */
2112 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2113
2114 #ifdef DP83820
2115 /*
2116 * Initialize the VLAN/IP receive control register.
2117 * We enable checksum computation on all incoming
2118 * packets, and do not reject packets w/ bad checksums.
2119 */
2120 reg = 0;
2121 if (ifp->if_capenable &
2122 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2123 reg |= VRCR_IPEN;
2124 if (sc->sc_ethercom.ec_nvlans != 0)
2125 reg |= VRCR_VTDEN|VRCR_VTREN;
2126 bus_space_write_4(st, sh, SIP_VRCR, reg);
2127
2128 /*
2129 * Initialize the VLAN/IP transmit control register.
2130 * We enable outgoing checksum computation on a
2131 * per-packet basis.
2132 */
2133 reg = 0;
2134 if (ifp->if_capenable &
2135 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2136 reg |= VTCR_PPCHK;
2137 if (sc->sc_ethercom.ec_nvlans != 0)
2138 reg |= VTCR_VPPTI;
2139 bus_space_write_4(st, sh, SIP_VTCR, reg);
2140
2141 /*
2142 * If we're using VLANs, initialize the VLAN data register.
2143 * To understand why we bswap the VLAN Ethertype, see section
2144 * 4.2.36 of the DP83820 manual.
2145 */
2146 if (sc->sc_ethercom.ec_nvlans != 0)
2147 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2148 #endif /* DP83820 */
2149
2150 /*
2151 * Give the transmit and receive rings to the chip.
2152 */
2153 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2154 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2155
2156 /*
2157 * Initialize the interrupt mask.
2158 */
2159 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2160 ISR_TXURN|ISR_TXDESC|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2161 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2162
2163 /*
2164 * Set the current media. Do this after initializing the prototype
2165 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2166 * control.
2167 */
2168 mii_mediachg(&sc->sc_mii);
2169
2170 /*
2171 * Enable interrupts.
2172 */
2173 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2174
2175 /*
2176 * Start the transmit and receive processes.
2177 */
2178 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2179
2180 /*
2181 * Start the one second MII clock.
2182 */
2183 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2184
2185 /*
2186 * ...all done!
2187 */
2188 ifp->if_flags |= IFF_RUNNING;
2189 ifp->if_flags &= ~IFF_OACTIVE;
2190
2191 out:
2192 if (error)
2193 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2194 return (error);
2195 }
2196
2197 /*
2198 * sip_drain:
2199 *
2200 * Drain the receive queue.
2201 */
2202 void
2203 SIP_DECL(rxdrain)(struct sip_softc *sc)
2204 {
2205 struct sip_rxsoft *rxs;
2206 int i;
2207
2208 for (i = 0; i < SIP_NRXDESC; i++) {
2209 rxs = &sc->sc_rxsoft[i];
2210 if (rxs->rxs_mbuf != NULL) {
2211 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2212 m_freem(rxs->rxs_mbuf);
2213 rxs->rxs_mbuf = NULL;
2214 }
2215 }
2216 }
2217
2218 /*
2219 * sip_stop: [ ifnet interface function ]
2220 *
2221 * Stop transmission on the interface.
2222 */
2223 void
2224 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2225 {
2226 struct sip_softc *sc = ifp->if_softc;
2227 bus_space_tag_t st = sc->sc_st;
2228 bus_space_handle_t sh = sc->sc_sh;
2229 struct sip_txsoft *txs;
2230 u_int32_t cmdsts = 0; /* DEBUG */
2231
2232 /*
2233 * Stop the one second clock.
2234 */
2235 callout_stop(&sc->sc_tick_ch);
2236
2237 /* Down the MII. */
2238 mii_down(&sc->sc_mii);
2239
2240 /*
2241 * Disable interrupts.
2242 */
2243 bus_space_write_4(st, sh, SIP_IER, 0);
2244
2245 /*
2246 * Stop receiver and transmitter.
2247 */
2248 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2249
2250 /*
2251 * Release any queued transmit buffers.
2252 */
2253 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2254 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2255 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2256 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2257 CMDSTS_INTR) == 0)
2258 printf("%s: sip_stop: last descriptor does not "
2259 "have INTR bit set\n", sc->sc_dev.dv_xname);
2260 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
2261 #ifdef DIAGNOSTIC
2262 if (txs->txs_mbuf == NULL) {
2263 printf("%s: dirty txsoft with no mbuf chain\n",
2264 sc->sc_dev.dv_xname);
2265 panic("sip_stop");
2266 }
2267 #endif
2268 cmdsts |= /* DEBUG */
2269 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2270 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2271 m_freem(txs->txs_mbuf);
2272 txs->txs_mbuf = NULL;
2273 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2274 }
2275
2276 if (disable)
2277 SIP_DECL(rxdrain)(sc);
2278
2279 /*
2280 * Mark the interface down and cancel the watchdog timer.
2281 */
2282 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2283 ifp->if_timer = 0;
2284
2285 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2286 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2287 printf("%s: sip_stop: no INTR bits set in dirty tx "
2288 "descriptors\n", sc->sc_dev.dv_xname);
2289 }
2290
2291 /*
2292 * sip_read_eeprom:
2293 *
2294 * Read data from the serial EEPROM.
2295 */
2296 void
2297 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2298 u_int16_t *data)
2299 {
2300 bus_space_tag_t st = sc->sc_st;
2301 bus_space_handle_t sh = sc->sc_sh;
2302 u_int16_t reg;
2303 int i, x;
2304
2305 for (i = 0; i < wordcnt; i++) {
2306 /* Send CHIP SELECT. */
2307 reg = EROMAR_EECS;
2308 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2309
2310 /* Shift in the READ opcode. */
2311 for (x = 3; x > 0; x--) {
2312 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2313 reg |= EROMAR_EEDI;
2314 else
2315 reg &= ~EROMAR_EEDI;
2316 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2317 bus_space_write_4(st, sh, SIP_EROMAR,
2318 reg | EROMAR_EESK);
2319 delay(4);
2320 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2321 delay(4);
2322 }
2323
2324 /* Shift in address. */
2325 for (x = 6; x > 0; x--) {
2326 if ((word + i) & (1 << (x - 1)))
2327 reg |= EROMAR_EEDI;
2328 else
2329 reg &= ~EROMAR_EEDI;
2330 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2331 bus_space_write_4(st, sh, SIP_EROMAR,
2332 reg | EROMAR_EESK);
2333 delay(4);
2334 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2335 delay(4);
2336 }
2337
2338 /* Shift out data. */
2339 reg = EROMAR_EECS;
2340 data[i] = 0;
2341 for (x = 16; x > 0; x--) {
2342 bus_space_write_4(st, sh, SIP_EROMAR,
2343 reg | EROMAR_EESK);
2344 delay(4);
2345 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2346 data[i] |= (1 << (x - 1));
2347 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2348 delay(4);
2349 }
2350
2351 /* Clear CHIP SELECT. */
2352 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2353 delay(4);
2354 }
2355 }
2356
2357 /*
2358 * sip_add_rxbuf:
2359 *
2360 * Add a receive buffer to the indicated descriptor.
2361 */
2362 int
2363 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2364 {
2365 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2366 struct mbuf *m;
2367 int error;
2368
2369 MGETHDR(m, M_DONTWAIT, MT_DATA);
2370 if (m == NULL)
2371 return (ENOBUFS);
2372
2373 MCLGET(m, M_DONTWAIT);
2374 if ((m->m_flags & M_EXT) == 0) {
2375 m_freem(m);
2376 return (ENOBUFS);
2377 }
2378
2379 #if defined(DP83820)
2380 m->m_len = SIP_RXBUF_LEN;
2381 #endif /* DP83820 */
2382
2383 if (rxs->rxs_mbuf != NULL)
2384 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2385
2386 rxs->rxs_mbuf = m;
2387
2388 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2389 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2390 BUS_DMA_READ|BUS_DMA_NOWAIT);
2391 if (error) {
2392 printf("%s: can't load rx DMA map %d, error = %d\n",
2393 sc->sc_dev.dv_xname, idx, error);
2394 panic("sip_add_rxbuf"); /* XXX */
2395 }
2396
2397 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2398 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2399
2400 SIP_INIT_RXDESC(sc, idx);
2401
2402 return (0);
2403 }
2404
2405 #if !defined(DP83820)
2406 /*
2407 * sip_sis900_set_filter:
2408 *
2409 * Set up the receive filter.
2410 */
2411 void
2412 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2413 {
2414 bus_space_tag_t st = sc->sc_st;
2415 bus_space_handle_t sh = sc->sc_sh;
2416 struct ethercom *ec = &sc->sc_ethercom;
2417 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2418 struct ether_multi *enm;
2419 u_int8_t *cp;
2420 struct ether_multistep step;
2421 u_int32_t crc, mchash[8];
2422
2423 /*
2424 * Initialize the prototype RFCR.
2425 */
2426 sc->sc_rfcr = RFCR_RFEN;
2427 if (ifp->if_flags & IFF_BROADCAST)
2428 sc->sc_rfcr |= RFCR_AAB;
2429 if (ifp->if_flags & IFF_PROMISC) {
2430 sc->sc_rfcr |= RFCR_AAP;
2431 goto allmulti;
2432 }
2433
2434 /*
2435 * Set up the multicast address filter by passing all multicast
2436 * addresses through a CRC generator, and then using the high-order
2437 * 6 bits as an index into the 128 bit multicast hash table (only
2438 * the lower 16 bits of each 32 bit multicast hash register are
2439 * valid). The high order bits select the register, while the
2440 * rest of the bits select the bit within the register.
2441 */
2442
2443 memset(mchash, 0, sizeof(mchash));
2444
2445 ETHER_FIRST_MULTI(step, ec, enm);
2446 while (enm != NULL) {
2447 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2448 /*
2449 * We must listen to a range of multicast addresses.
2450 * For now, just accept all multicasts, rather than
2451 * trying to set only those filter bits needed to match
2452 * the range. (At this time, the only use of address
2453 * ranges is for IP multicast routing, for which the
2454 * range is big enough to require all bits set.)
2455 */
2456 goto allmulti;
2457 }
2458
2459 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2460
2461 /* Just want the 7 most significant bits. */
2462 crc >>= 25;
2463
2464 /* Set the corresponding bit in the hash table. */
2465 mchash[crc >> 4] |= 1 << (crc & 0xf);
2466
2467 ETHER_NEXT_MULTI(step, enm);
2468 }
2469
2470 ifp->if_flags &= ~IFF_ALLMULTI;
2471 goto setit;
2472
2473 allmulti:
2474 ifp->if_flags |= IFF_ALLMULTI;
2475 sc->sc_rfcr |= RFCR_AAM;
2476
2477 setit:
2478 #define FILTER_EMIT(addr, data) \
2479 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2480 delay(1); \
2481 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2482 delay(1)
2483
2484 /*
2485 * Disable receive filter, and program the node address.
2486 */
2487 cp = LLADDR(ifp->if_sadl);
2488 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2489 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2490 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2491
2492 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2493 /*
2494 * Program the multicast hash table.
2495 */
2496 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2497 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2498 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2499 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2500 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2501 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2502 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2503 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2504 }
2505 #undef FILTER_EMIT
2506
2507 /*
2508 * Re-enable the receiver filter.
2509 */
2510 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2511 }
2512 #endif /* ! DP83820 */
2513
2514 /*
2515 * sip_dp83815_set_filter:
2516 *
2517 * Set up the receive filter.
2518 */
2519 void
2520 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2521 {
2522 bus_space_tag_t st = sc->sc_st;
2523 bus_space_handle_t sh = sc->sc_sh;
2524 struct ethercom *ec = &sc->sc_ethercom;
2525 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2526 struct ether_multi *enm;
2527 u_int8_t *cp;
2528 struct ether_multistep step;
2529 u_int32_t crc, hash, slot, bit;
2530 #ifdef DP83820
2531 #define MCHASH_NWORDS 128
2532 #else
2533 #define MCHASH_NWORDS 32
2534 #endif /* DP83820 */
2535 u_int16_t mchash[MCHASH_NWORDS];
2536 int i;
2537
2538 /*
2539 * Initialize the prototype RFCR.
2540 * Enable the receive filter, and accept on
2541 * Perfect (destination address) Match
2542 * If IFF_BROADCAST, also accept all broadcast packets.
2543 * If IFF_PROMISC, accept all unicast packets (and later, set
2544 * IFF_ALLMULTI and accept all multicast, too).
2545 */
2546 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2547 if (ifp->if_flags & IFF_BROADCAST)
2548 sc->sc_rfcr |= RFCR_AAB;
2549 if (ifp->if_flags & IFF_PROMISC) {
2550 sc->sc_rfcr |= RFCR_AAP;
2551 goto allmulti;
2552 }
2553
2554 #ifdef DP83820
2555 /*
2556 * Set up the DP83820 multicast address filter by passing all multicast
2557 * addresses through a CRC generator, and then using the high-order
2558 * 11 bits as an index into the 2048 bit multicast hash table. The
2559 * high-order 7 bits select the slot, while the low-order 4 bits
2560 * select the bit within the slot. Note that only the low 16-bits
2561 * of each filter word are used, and there are 128 filter words.
2562 */
2563 #else
2564 /*
2565 * Set up the DP83815 multicast address filter by passing all multicast
2566 * addresses through a CRC generator, and then using the high-order
2567 * 9 bits as an index into the 512 bit multicast hash table. The
2568 * high-order 5 bits select the slot, while the low-order 4 bits
2569 * select the bit within the slot. Note that only the low 16-bits
2570 * of each filter word are used, and there are 32 filter words.
2571 */
2572 #endif /* DP83820 */
2573
2574 memset(mchash, 0, sizeof(mchash));
2575
2576 ifp->if_flags &= ~IFF_ALLMULTI;
2577 ETHER_FIRST_MULTI(step, ec, enm);
2578 if (enm == NULL)
2579 goto setit;
2580 while (enm != NULL) {
2581 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2582 /*
2583 * We must listen to a range of multicast addresses.
2584 * For now, just accept all multicasts, rather than
2585 * trying to set only those filter bits needed to match
2586 * the range. (At this time, the only use of address
2587 * ranges is for IP multicast routing, for which the
2588 * range is big enough to require all bits set.)
2589 */
2590 goto allmulti;
2591 }
2592
2593 #ifdef DP83820
2594 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2595
2596 /* Just want the 11 most significant bits. */
2597 hash = crc >> 21;
2598 #else
2599 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2600
2601 /* Just want the 9 most significant bits. */
2602 hash = crc >> 23;
2603 #endif /* DP83820 */
2604 slot = hash >> 4;
2605 bit = hash & 0xf;
2606
2607 /* Set the corresponding bit in the hash table. */
2608 mchash[slot] |= 1 << bit;
2609
2610 ETHER_NEXT_MULTI(step, enm);
2611 }
2612 sc->sc_rfcr |= RFCR_MHEN;
2613 goto setit;
2614
2615 allmulti:
2616 ifp->if_flags |= IFF_ALLMULTI;
2617 sc->sc_rfcr |= RFCR_AAM;
2618
2619 setit:
2620 #define FILTER_EMIT(addr, data) \
2621 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2622 delay(1); \
2623 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2624 delay(1)
2625
2626 /*
2627 * Disable receive filter, and program the node address.
2628 */
2629 cp = LLADDR(ifp->if_sadl);
2630 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
2631 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
2632 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
2633
2634 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2635 /*
2636 * Program the multicast hash table.
2637 */
2638 for (i = 0; i < MCHASH_NWORDS; i++) {
2639 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
2640 mchash[i]);
2641 }
2642 }
2643 #undef FILTER_EMIT
2644 #undef MCHASH_NWORDS
2645
2646 /*
2647 * Re-enable the receiver filter.
2648 */
2649 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2650 }
2651
2652 #if defined(DP83820)
2653 /*
2654 * sip_dp83820_mii_readreg: [mii interface function]
2655 *
2656 * Read a PHY register on the MII of the DP83820.
2657 */
2658 int
2659 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
2660 {
2661
2662 return (mii_bitbang_readreg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2663 phy, reg));
2664 }
2665
2666 /*
2667 * sip_dp83820_mii_writereg: [mii interface function]
2668 *
2669 * Write a PHY register on the MII of the DP83820.
2670 */
2671 void
2672 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
2673 {
2674
2675 mii_bitbang_writereg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2676 phy, reg, val);
2677 }
2678
2679 /*
2680 * sip_dp83815_mii_statchg: [mii interface function]
2681 *
2682 * Callback from MII layer when media changes.
2683 */
2684 void
2685 SIP_DECL(dp83820_mii_statchg)(struct device *self)
2686 {
2687 struct sip_softc *sc = (struct sip_softc *) self;
2688 u_int32_t cfg;
2689
2690 /*
2691 * Update TXCFG for full-duplex operation.
2692 */
2693 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2694 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2695 else
2696 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2697
2698 /*
2699 * Update RXCFG for full-duplex or loopback.
2700 */
2701 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2702 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2703 sc->sc_rxcfg |= RXCFG_ATX;
2704 else
2705 sc->sc_rxcfg &= ~RXCFG_ATX;
2706
2707 /*
2708 * Update CFG for MII/GMII.
2709 */
2710 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
2711 cfg = sc->sc_cfg | CFG_MODE_1000;
2712 else
2713 cfg = sc->sc_cfg;
2714
2715 /*
2716 * XXX 802.3x flow control.
2717 */
2718
2719 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
2720 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2721 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2722 }
2723
2724 /*
2725 * sip_dp83820_mii_bitbang_read: [mii bit-bang interface function]
2726 *
2727 * Read the MII serial port for the MII bit-bang module.
2728 */
2729 u_int32_t
2730 SIP_DECL(dp83820_mii_bitbang_read)(struct device *self)
2731 {
2732 struct sip_softc *sc = (void *) self;
2733
2734 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
2735 }
2736
2737 /*
2738 * sip_dp83820_mii_bitbang_write: [mii big-bang interface function]
2739 *
2740 * Write the MII serial port for the MII bit-bang module.
2741 */
2742 void
2743 SIP_DECL(dp83820_mii_bitbang_write)(struct device *self, u_int32_t val)
2744 {
2745 struct sip_softc *sc = (void *) self;
2746
2747 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
2748 }
2749 #else /* ! DP83820 */
2750 /*
2751 * sip_sis900_mii_readreg: [mii interface function]
2752 *
2753 * Read a PHY register on the MII.
2754 */
2755 int
2756 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
2757 {
2758 struct sip_softc *sc = (struct sip_softc *) self;
2759 u_int32_t enphy;
2760
2761 /*
2762 * The SiS 900 has only an internal PHY on the MII. Only allow
2763 * MII address 0.
2764 */
2765 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
2766 return (0);
2767
2768 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2769 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
2770 ENPHY_RWCMD | ENPHY_ACCESS);
2771 do {
2772 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2773 } while (enphy & ENPHY_ACCESS);
2774 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
2775 }
2776
2777 /*
2778 * sip_sis900_mii_writereg: [mii interface function]
2779 *
2780 * Write a PHY register on the MII.
2781 */
2782 void
2783 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
2784 {
2785 struct sip_softc *sc = (struct sip_softc *) self;
2786 u_int32_t enphy;
2787
2788 /*
2789 * The SiS 900 has only an internal PHY on the MII. Only allow
2790 * MII address 0.
2791 */
2792 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
2793 return;
2794
2795 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2796 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
2797 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
2798 do {
2799 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2800 } while (enphy & ENPHY_ACCESS);
2801 }
2802
2803 /*
2804 * sip_sis900_mii_statchg: [mii interface function]
2805 *
2806 * Callback from MII layer when media changes.
2807 */
2808 void
2809 SIP_DECL(sis900_mii_statchg)(struct device *self)
2810 {
2811 struct sip_softc *sc = (struct sip_softc *) self;
2812 u_int32_t flowctl;
2813
2814 /*
2815 * Update TXCFG for full-duplex operation.
2816 */
2817 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2818 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2819 else
2820 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2821
2822 /*
2823 * Update RXCFG for full-duplex or loopback.
2824 */
2825 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2826 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2827 sc->sc_rxcfg |= RXCFG_ATX;
2828 else
2829 sc->sc_rxcfg &= ~RXCFG_ATX;
2830
2831 /*
2832 * Update IMR for use of 802.3x flow control.
2833 */
2834 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
2835 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
2836 flowctl = FLOWCTL_FLOWEN;
2837 } else {
2838 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
2839 flowctl = 0;
2840 }
2841
2842 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2843 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2844 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
2845 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
2846 }
2847
2848 /*
2849 * sip_dp83815_mii_readreg: [mii interface function]
2850 *
2851 * Read a PHY register on the MII.
2852 */
2853 int
2854 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
2855 {
2856 struct sip_softc *sc = (struct sip_softc *) self;
2857 u_int32_t val;
2858
2859 /*
2860 * The DP83815 only has an internal PHY. Only allow
2861 * MII address 0.
2862 */
2863 if (phy != 0)
2864 return (0);
2865
2866 /*
2867 * Apparently, after a reset, the DP83815 can take a while
2868 * to respond. During this recovery period, the BMSR returns
2869 * a value of 0. Catch this -- it's not supposed to happen
2870 * (the BMSR has some hardcoded-to-1 bits), and wait for the
2871 * PHY to come back to life.
2872 *
2873 * This works out because the BMSR is the first register
2874 * read during the PHY probe process.
2875 */
2876 do {
2877 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
2878 } while (reg == MII_BMSR && val == 0);
2879
2880 return (val & 0xffff);
2881 }
2882
2883 /*
2884 * sip_dp83815_mii_writereg: [mii interface function]
2885 *
2886 * Write a PHY register to the MII.
2887 */
2888 void
2889 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
2890 {
2891 struct sip_softc *sc = (struct sip_softc *) self;
2892
2893 /*
2894 * The DP83815 only has an internal PHY. Only allow
2895 * MII address 0.
2896 */
2897 if (phy != 0)
2898 return;
2899
2900 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
2901 }
2902
2903 /*
2904 * sip_dp83815_mii_statchg: [mii interface function]
2905 *
2906 * Callback from MII layer when media changes.
2907 */
2908 void
2909 SIP_DECL(dp83815_mii_statchg)(struct device *self)
2910 {
2911 struct sip_softc *sc = (struct sip_softc *) self;
2912
2913 /*
2914 * Update TXCFG for full-duplex operation.
2915 */
2916 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2917 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2918 else
2919 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2920
2921 /*
2922 * Update RXCFG for full-duplex or loopback.
2923 */
2924 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2925 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2926 sc->sc_rxcfg |= RXCFG_ATX;
2927 else
2928 sc->sc_rxcfg &= ~RXCFG_ATX;
2929
2930 /*
2931 * XXX 802.3x flow control.
2932 */
2933
2934 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2935 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2936 }
2937 #endif /* DP83820 */
2938
2939 #if defined(DP83820)
2940 void
2941 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc,
2942 const struct pci_attach_args *pa, u_int8_t *enaddr)
2943 {
2944 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
2945 u_int8_t cksum, *e, match;
2946 int i;
2947
2948 /*
2949 * EEPROM data format for the DP83820 can be found in
2950 * the DP83820 manual, section 4.2.4.
2951 */
2952
2953 SIP_DECL(read_eeprom)(sc, 0,
2954 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
2955
2956 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
2957 match = ~(match - 1);
2958
2959 cksum = 0x55;
2960 e = (u_int8_t *) eeprom_data;
2961 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
2962 cksum += *e++;
2963
2964 if (cksum != match)
2965 printf("%s: Checksum (%x) mismatch (%x)",
2966 sc->sc_dev.dv_xname, cksum, match);
2967
2968 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
2969 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
2970 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
2971 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
2972 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
2973 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
2974
2975 /* Get the GPIOR bits. */
2976 sc->sc_gpior = eeprom_data[0x04];
2977
2978 /* Get various CFG related bits. */
2979 if ((eeprom_data[0x05] >> 0) & 1)
2980 sc->sc_cfg |= CFG_EXT_125;
2981 if ((eeprom_data[0x05] >> 9) & 1)
2982 sc->sc_cfg |= CFG_TBI_EN;
2983 }
2984 #else /* ! DP83820 */
2985 void
2986 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc,
2987 const struct pci_attach_args *pa, u_int8_t *enaddr)
2988 {
2989 u_int16_t myea[ETHER_ADDR_LEN / 2];
2990
2991 switch (PCI_REVISION(pa->pa_class)) {
2992 case SIS_REV_630S:
2993 case SIS_REV_630E:
2994 case SIS_REV_630EA1:
2995 /*
2996 * The MAC address for the on-board Ethernet of
2997 * the SiS 630 chipset is in the NVRAM. Kick
2998 * the chip into re-loading it from NVRAM, and
2999 * read the MAC address out of the filter registers.
3000 */
3001 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3002
3003 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3004 RFCR_RFADDR_NODE0);
3005 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3006 0xffff;
3007
3008 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3009 RFCR_RFADDR_NODE2);
3010 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3011 0xffff;
3012
3013 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3014 RFCR_RFADDR_NODE4);
3015 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3016 0xffff;
3017 break;
3018
3019 default:
3020 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3021 sizeof(myea) / sizeof(myea[0]), myea);
3022 }
3023
3024 enaddr[0] = myea[0] & 0xff;
3025 enaddr[1] = myea[0] >> 8;
3026 enaddr[2] = myea[1] & 0xff;
3027 enaddr[3] = myea[1] >> 8;
3028 enaddr[4] = myea[2] & 0xff;
3029 enaddr[5] = myea[2] >> 8;
3030 }
3031
3032 /* Table and macro to bit-reverse an octet. */
3033 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3034 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3035
3036 void
3037 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc,
3038 const struct pci_attach_args *pa, u_int8_t *enaddr)
3039 {
3040 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3041 u_int8_t cksum, *e, match;
3042 int i;
3043
3044 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3045 sizeof(eeprom_data[0]), eeprom_data);
3046
3047 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3048 match = ~(match - 1);
3049
3050 cksum = 0x55;
3051 e = (u_int8_t *) eeprom_data;
3052 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3053 cksum += *e++;
3054 }
3055 if (cksum != match) {
3056 printf("%s: Checksum (%x) mismatch (%x)",
3057 sc->sc_dev.dv_xname, cksum, match);
3058 }
3059
3060 /*
3061 * Unrolled because it makes slightly more sense this way.
3062 * The DP83815 stores the MAC address in bit 0 of word 6
3063 * through bit 15 of word 8.
3064 */
3065 ea = &eeprom_data[6];
3066 enaddr[0] = ((*ea & 0x1) << 7);
3067 ea++;
3068 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3069 enaddr[1] = ((*ea & 0x1FE) >> 1);
3070 enaddr[2] = ((*ea & 0x1) << 7);
3071 ea++;
3072 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3073 enaddr[3] = ((*ea & 0x1FE) >> 1);
3074 enaddr[4] = ((*ea & 0x1) << 7);
3075 ea++;
3076 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3077 enaddr[5] = ((*ea & 0x1FE) >> 1);
3078
3079 /*
3080 * In case that's not weird enough, we also need to reverse
3081 * the bits in each byte. This all actually makes more sense
3082 * if you think about the EEPROM storage as an array of bits
3083 * being shifted into bytes, but that's not how we're looking
3084 * at it here...
3085 */
3086 for (i = 0; i < 6 ;i++)
3087 enaddr[i] = bbr(enaddr[i]);
3088 }
3089 #endif /* DP83820 */
3090
3091 /*
3092 * sip_mediastatus: [ifmedia interface function]
3093 *
3094 * Get the current interface media status.
3095 */
3096 void
3097 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3098 {
3099 struct sip_softc *sc = ifp->if_softc;
3100
3101 mii_pollstat(&sc->sc_mii);
3102 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3103 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3104 }
3105
3106 /*
3107 * sip_mediachange: [ifmedia interface function]
3108 *
3109 * Set hardware to newly-selected media.
3110 */
3111 int
3112 SIP_DECL(mediachange)(struct ifnet *ifp)
3113 {
3114 struct sip_softc *sc = ifp->if_softc;
3115
3116 if (ifp->if_flags & IFF_UP)
3117 mii_mediachg(&sc->sc_mii);
3118 return (0);
3119 }
3120