if_sip.c revision 1.24.2.4 1 /* $NetBSD: if_sip.c,v 1.24.2.4 2001/11/14 19:15:17 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Support the 10-bit interface on the DP83820 (for fiber).
80 *
81 * - Reduce the interrupt load.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.24.2.4 2001/11/14 19:15:17 nathanw Exp $");
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/callout.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/ioctl.h>
97 #include <sys/errno.h>
98 #include <sys/device.h>
99 #include <sys/queue.h>
100
101 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <machine/bus.h>
113 #include <machine/intr.h>
114 #include <machine/endian.h>
115
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
118 #ifdef DP83820
119 #include <dev/mii/mii_bitbang.h>
120 #endif /* DP83820 */
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_sipreg.h>
127
128 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
129 #define SIP_DECL(x) __CONCAT(gsip_,x)
130 #else /* SiS900 and DP83815 */
131 #define SIP_DECL(x) __CONCAT(sip_,x)
132 #endif
133
134 #define SIP_STR(x) __STRING(SIP_DECL(x))
135
136 /*
137 * Transmit descriptor list size. This is arbitrary, but allocate
138 * enough descriptors for 128 pending transmissions, and 8 segments
139 * per packet. This MUST work out to a power of 2.
140 */
141 #define SIP_NTXSEGS 8
142
143 #define SIP_TXQUEUELEN 256
144 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS)
145 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
146 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
147
148 /*
149 * Receive descriptor list size. We have one Rx buffer per incoming
150 * packet, so this logic is a little simpler.
151 *
152 * Actually, on the DP83820, we allow the packet to consume more than
153 * one buffer, in order to support jumbo Ethernet frames. In that
154 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
155 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
156 * so we'd better be quick about handling receive interrupts.
157 */
158 #if defined(DP83820)
159 #define SIP_NRXDESC 256
160 #else
161 #define SIP_NRXDESC 128
162 #endif /* DP83820 */
163 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
164 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
165
166 /*
167 * Control structures are DMA'd to the SiS900 chip. We allocate them in
168 * a single clump that maps to a single DMA segment to make several things
169 * easier.
170 */
171 struct sip_control_data {
172 /*
173 * The transmit descriptors.
174 */
175 struct sip_desc scd_txdescs[SIP_NTXDESC];
176
177 /*
178 * The receive descriptors.
179 */
180 struct sip_desc scd_rxdescs[SIP_NRXDESC];
181 };
182
183 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
184 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
185 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
186
187 /*
188 * Software state for transmit jobs.
189 */
190 struct sip_txsoft {
191 struct mbuf *txs_mbuf; /* head of our mbuf chain */
192 bus_dmamap_t txs_dmamap; /* our DMA map */
193 int txs_firstdesc; /* first descriptor in packet */
194 int txs_lastdesc; /* last descriptor in packet */
195 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
196 };
197
198 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
199
200 /*
201 * Software state for receive jobs.
202 */
203 struct sip_rxsoft {
204 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
205 bus_dmamap_t rxs_dmamap; /* our DMA map */
206 };
207
208 /*
209 * Software state per device.
210 */
211 struct sip_softc {
212 struct device sc_dev; /* generic device information */
213 bus_space_tag_t sc_st; /* bus space tag */
214 bus_space_handle_t sc_sh; /* bus space handle */
215 bus_dma_tag_t sc_dmat; /* bus DMA tag */
216 struct ethercom sc_ethercom; /* ethernet common data */
217 void *sc_sdhook; /* shutdown hook */
218
219 const struct sip_product *sc_model; /* which model are we? */
220
221 void *sc_ih; /* interrupt cookie */
222
223 struct mii_data sc_mii; /* MII/media information */
224
225 struct callout sc_tick_ch; /* tick callout */
226
227 bus_dmamap_t sc_cddmamap; /* control data DMA map */
228 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
229
230 /*
231 * Software state for transmit and receive descriptors.
232 */
233 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
234 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
235
236 /*
237 * Control data structures.
238 */
239 struct sip_control_data *sc_control_data;
240 #define sc_txdescs sc_control_data->scd_txdescs
241 #define sc_rxdescs sc_control_data->scd_rxdescs
242
243 #ifdef SIP_EVENT_COUNTERS
244 /*
245 * Event counters.
246 */
247 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
248 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
249 struct evcnt sc_ev_txintr; /* Tx interrupts */
250 struct evcnt sc_ev_rxintr; /* Rx interrupts */
251 #ifdef DP83820
252 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
253 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
254 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
255 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
256 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
257 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
258 #endif /* DP83820 */
259 #endif /* SIP_EVENT_COUNTERS */
260
261 u_int32_t sc_txcfg; /* prototype TXCFG register */
262 u_int32_t sc_rxcfg; /* prototype RXCFG register */
263 u_int32_t sc_imr; /* prototype IMR register */
264 u_int32_t sc_rfcr; /* prototype RFCR register */
265
266 u_int32_t sc_cfg; /* prototype CFG register */
267
268 #ifdef DP83820
269 u_int32_t sc_gpior; /* prototype GPIOR register */
270 #endif /* DP83820 */
271
272 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
273 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
274
275 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
276
277 int sc_flags; /* misc. flags; see below */
278
279 int sc_txfree; /* number of free Tx descriptors */
280 int sc_txnext; /* next ready Tx descriptor */
281
282 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
283 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
284
285 int sc_rxptr; /* next ready Rx descriptor/descsoft */
286 #if defined(DP83820)
287 int sc_rxdiscard;
288 int sc_rxlen;
289 struct mbuf *sc_rxhead;
290 struct mbuf *sc_rxtail;
291 struct mbuf **sc_rxtailp;
292 #endif /* DP83820 */
293 };
294
295 /* sc_flags */
296 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
297
298 #ifdef DP83820
299 #define SIP_RXCHAIN_RESET(sc) \
300 do { \
301 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
302 *(sc)->sc_rxtailp = NULL; \
303 (sc)->sc_rxlen = 0; \
304 } while (/*CONSTCOND*/0)
305
306 #define SIP_RXCHAIN_LINK(sc, m) \
307 do { \
308 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
309 (sc)->sc_rxtailp = &(m)->m_next; \
310 } while (/*CONSTCOND*/0)
311 #endif /* DP83820 */
312
313 #ifdef SIP_EVENT_COUNTERS
314 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
315 #else
316 #define SIP_EVCNT_INCR(ev) /* nothing */
317 #endif
318
319 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
320 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
321
322 #define SIP_CDTXSYNC(sc, x, n, ops) \
323 do { \
324 int __x, __n; \
325 \
326 __x = (x); \
327 __n = (n); \
328 \
329 /* If it will wrap around, sync to the end of the ring. */ \
330 if ((__x + __n) > SIP_NTXDESC) { \
331 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
332 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
333 (SIP_NTXDESC - __x), (ops)); \
334 __n -= (SIP_NTXDESC - __x); \
335 __x = 0; \
336 } \
337 \
338 /* Now sync whatever is left. */ \
339 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
340 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
341 } while (0)
342
343 #define SIP_CDRXSYNC(sc, x, ops) \
344 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
345 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
346
347 #ifdef DP83820
348 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
349 #define SIP_RXBUF_LEN (MCLBYTES - 4)
350 #else
351 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
352 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
353 #endif
354 #define SIP_INIT_RXDESC(sc, x) \
355 do { \
356 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
357 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
358 \
359 __sipd->sipd_link = \
360 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
361 __sipd->sipd_bufptr = \
362 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
363 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
364 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
365 SIP_INIT_RXDESC_EXTSTS \
366 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
367 } while (0)
368
369 #define SIP_TIMEOUT 1000
370
371 void SIP_DECL(start)(struct ifnet *);
372 void SIP_DECL(watchdog)(struct ifnet *);
373 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
374 int SIP_DECL(init)(struct ifnet *);
375 void SIP_DECL(stop)(struct ifnet *, int);
376
377 void SIP_DECL(shutdown)(void *);
378
379 void SIP_DECL(reset)(struct sip_softc *);
380 void SIP_DECL(rxdrain)(struct sip_softc *);
381 int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
382 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *);
383 void SIP_DECL(tick)(void *);
384
385 #if !defined(DP83820)
386 void SIP_DECL(sis900_set_filter)(struct sip_softc *);
387 #endif /* ! DP83820 */
388 void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
389
390 #if defined(DP83820)
391 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *, u_int8_t *);
392 #else
393 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *, u_int8_t *);
394 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *, u_int8_t *);
395 #endif /* DP83820 */
396
397 int SIP_DECL(intr)(void *);
398 void SIP_DECL(txintr)(struct sip_softc *);
399 void SIP_DECL(rxintr)(struct sip_softc *);
400
401 #if defined(DP83820)
402 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
403 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
404 void SIP_DECL(dp83820_mii_statchg)(struct device *);
405 #else
406 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
407 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
408 void SIP_DECL(sis900_mii_statchg)(struct device *);
409
410 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
411 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
412 void SIP_DECL(dp83815_mii_statchg)(struct device *);
413 #endif /* DP83820 */
414
415 int SIP_DECL(mediachange)(struct ifnet *);
416 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
417
418 int SIP_DECL(match)(struct device *, struct cfdata *, void *);
419 void SIP_DECL(attach)(struct device *, struct device *, void *);
420
421 int SIP_DECL(copy_small) = 0;
422
423 struct cfattach SIP_DECL(ca) = {
424 sizeof(struct sip_softc), SIP_DECL(match), SIP_DECL(attach),
425 };
426
427 /*
428 * Descriptions of the variants of the SiS900.
429 */
430 struct sip_variant {
431 int (*sipv_mii_readreg)(struct device *, int, int);
432 void (*sipv_mii_writereg)(struct device *, int, int, int);
433 void (*sipv_mii_statchg)(struct device *);
434 void (*sipv_set_filter)(struct sip_softc *);
435 void (*sipv_read_macaddr)(struct sip_softc *, u_int8_t *);
436 };
437
438 #if defined(DP83820)
439 u_int32_t SIP_DECL(dp83820_mii_bitbang_read)(struct device *);
440 void SIP_DECL(dp83820_mii_bitbang_write)(struct device *, u_int32_t);
441
442 const struct mii_bitbang_ops SIP_DECL(dp83820_mii_bitbang_ops) = {
443 SIP_DECL(dp83820_mii_bitbang_read),
444 SIP_DECL(dp83820_mii_bitbang_write),
445 {
446 EROMAR_MDIO, /* MII_BIT_MDO */
447 EROMAR_MDIO, /* MII_BIT_MDI */
448 EROMAR_MDC, /* MII_BIT_MDC */
449 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
450 0, /* MII_BIT_DIR_PHY_HOST */
451 }
452 };
453 #endif /* DP83820 */
454
455 #if defined(DP83820)
456 const struct sip_variant SIP_DECL(variant_dp83820) = {
457 SIP_DECL(dp83820_mii_readreg),
458 SIP_DECL(dp83820_mii_writereg),
459 SIP_DECL(dp83820_mii_statchg),
460 SIP_DECL(dp83815_set_filter),
461 SIP_DECL(dp83820_read_macaddr),
462 };
463 #else
464 const struct sip_variant SIP_DECL(variant_sis900) = {
465 SIP_DECL(sis900_mii_readreg),
466 SIP_DECL(sis900_mii_writereg),
467 SIP_DECL(sis900_mii_statchg),
468 SIP_DECL(sis900_set_filter),
469 SIP_DECL(sis900_read_macaddr),
470 };
471
472 const struct sip_variant SIP_DECL(variant_dp83815) = {
473 SIP_DECL(dp83815_mii_readreg),
474 SIP_DECL(dp83815_mii_writereg),
475 SIP_DECL(dp83815_mii_statchg),
476 SIP_DECL(dp83815_set_filter),
477 SIP_DECL(dp83815_read_macaddr),
478 };
479 #endif /* DP83820 */
480
481 /*
482 * Devices supported by this driver.
483 */
484 const struct sip_product {
485 pci_vendor_id_t sip_vendor;
486 pci_product_id_t sip_product;
487 const char *sip_name;
488 const struct sip_variant *sip_variant;
489 } SIP_DECL(products)[] = {
490 #if defined(DP83820)
491 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
492 "NatSemi DP83820 Gigabit Ethernet",
493 &SIP_DECL(variant_dp83820) },
494 #else
495 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
496 "SiS 900 10/100 Ethernet",
497 &SIP_DECL(variant_sis900) },
498 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
499 "SiS 7016 10/100 Ethernet",
500 &SIP_DECL(variant_sis900) },
501
502 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
503 "NatSemi DP83815 10/100 Ethernet",
504 &SIP_DECL(variant_dp83815) },
505 #endif /* DP83820 */
506
507 { 0, 0,
508 NULL,
509 NULL },
510 };
511
512 static const struct sip_product *
513 SIP_DECL(lookup)(const struct pci_attach_args *pa)
514 {
515 const struct sip_product *sip;
516
517 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
518 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
519 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
520 return (sip);
521 }
522 return (NULL);
523 }
524
525 int
526 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
527 {
528 struct pci_attach_args *pa = aux;
529
530 if (SIP_DECL(lookup)(pa) != NULL)
531 return (1);
532
533 return (0);
534 }
535
536 void
537 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
538 {
539 struct sip_softc *sc = (struct sip_softc *) self;
540 struct pci_attach_args *pa = aux;
541 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
542 pci_chipset_tag_t pc = pa->pa_pc;
543 pci_intr_handle_t ih;
544 const char *intrstr = NULL;
545 bus_space_tag_t iot, memt;
546 bus_space_handle_t ioh, memh;
547 bus_dma_segment_t seg;
548 int ioh_valid, memh_valid;
549 int i, rseg, error;
550 const struct sip_product *sip;
551 pcireg_t pmode;
552 u_int8_t enaddr[ETHER_ADDR_LEN];
553 int pmreg;
554 #ifdef DP83820
555 pcireg_t memtype;
556 u_int32_t reg;
557 #endif /* DP83820 */
558
559 callout_init(&sc->sc_tick_ch);
560
561 sip = SIP_DECL(lookup)(pa);
562 if (sip == NULL) {
563 printf("\n");
564 panic(SIP_STR(attach) ": impossible");
565 }
566
567 printf(": %s\n", sip->sip_name);
568
569 sc->sc_model = sip;
570
571 /*
572 * Map the device.
573 */
574 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
575 PCI_MAPREG_TYPE_IO, 0,
576 &iot, &ioh, NULL, NULL) == 0);
577 #ifdef DP83820
578 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
579 switch (memtype) {
580 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
581 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
582 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
583 memtype, 0, &memt, &memh, NULL, NULL) == 0);
584 break;
585 default:
586 memh_valid = 0;
587 }
588 #else
589 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
590 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
591 &memt, &memh, NULL, NULL) == 0);
592 #endif /* DP83820 */
593
594 if (memh_valid) {
595 sc->sc_st = memt;
596 sc->sc_sh = memh;
597 } else if (ioh_valid) {
598 sc->sc_st = iot;
599 sc->sc_sh = ioh;
600 } else {
601 printf("%s: unable to map device registers\n",
602 sc->sc_dev.dv_xname);
603 return;
604 }
605
606 sc->sc_dmat = pa->pa_dmat;
607
608 /* Enable bus mastering. */
609 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
610 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
611 PCI_COMMAND_MASTER_ENABLE);
612
613 /* Get it out of power save mode if needed. */
614 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
615 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
616 if (pmode == 3) {
617 /*
618 * The card has lost all configuration data in
619 * this state, so punt.
620 */
621 printf("%s: unable to wake up from power state D3\n",
622 sc->sc_dev.dv_xname);
623 return;
624 }
625 if (pmode != 0) {
626 printf("%s: waking up from power state D%d\n",
627 sc->sc_dev.dv_xname, pmode);
628 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
629 }
630 }
631
632 /*
633 * Map and establish our interrupt.
634 */
635 if (pci_intr_map(pa, &ih)) {
636 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
637 return;
638 }
639 intrstr = pci_intr_string(pc, ih);
640 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
641 if (sc->sc_ih == NULL) {
642 printf("%s: unable to establish interrupt",
643 sc->sc_dev.dv_xname);
644 if (intrstr != NULL)
645 printf(" at %s", intrstr);
646 printf("\n");
647 return;
648 }
649 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
650
651 SIMPLEQ_INIT(&sc->sc_txfreeq);
652 SIMPLEQ_INIT(&sc->sc_txdirtyq);
653
654 /*
655 * Allocate the control data structures, and create and load the
656 * DMA map for it.
657 */
658 if ((error = bus_dmamem_alloc(sc->sc_dmat,
659 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
660 0)) != 0) {
661 printf("%s: unable to allocate control data, error = %d\n",
662 sc->sc_dev.dv_xname, error);
663 goto fail_0;
664 }
665
666 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
667 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
668 BUS_DMA_COHERENT)) != 0) {
669 printf("%s: unable to map control data, error = %d\n",
670 sc->sc_dev.dv_xname, error);
671 goto fail_1;
672 }
673
674 if ((error = bus_dmamap_create(sc->sc_dmat,
675 sizeof(struct sip_control_data), 1,
676 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
677 printf("%s: unable to create control data DMA map, "
678 "error = %d\n", sc->sc_dev.dv_xname, error);
679 goto fail_2;
680 }
681
682 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
683 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
684 0)) != 0) {
685 printf("%s: unable to load control data DMA map, error = %d\n",
686 sc->sc_dev.dv_xname, error);
687 goto fail_3;
688 }
689
690 /*
691 * Create the transmit buffer DMA maps.
692 */
693 for (i = 0; i < SIP_TXQUEUELEN; i++) {
694 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
695 SIP_NTXSEGS, MCLBYTES, 0, 0,
696 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
697 printf("%s: unable to create tx DMA map %d, "
698 "error = %d\n", sc->sc_dev.dv_xname, i, error);
699 goto fail_4;
700 }
701 }
702
703 /*
704 * Create the receive buffer DMA maps.
705 */
706 for (i = 0; i < SIP_NRXDESC; i++) {
707 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
708 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
709 printf("%s: unable to create rx DMA map %d, "
710 "error = %d\n", sc->sc_dev.dv_xname, i, error);
711 goto fail_5;
712 }
713 sc->sc_rxsoft[i].rxs_mbuf = NULL;
714 }
715
716 /*
717 * Reset the chip to a known state.
718 */
719 SIP_DECL(reset)(sc);
720
721 /*
722 * Read the Ethernet address from the EEPROM. This might
723 * also fetch other stuff from the EEPROM and stash it
724 * in the softc.
725 */
726 sc->sc_cfg = 0;
727 (*sip->sip_variant->sipv_read_macaddr)(sc, enaddr);
728
729 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
730 ether_sprintf(enaddr));
731
732 /*
733 * Initialize the configuration register: aggressive PCI
734 * bus request algorithm, default backoff, default OW timer,
735 * default parity error detection.
736 *
737 * NOTE: "Big endian mode" is useless on the SiS900 and
738 * friends -- it affects packet data, not descriptors.
739 */
740 #ifdef DP83820
741 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
742 if (reg & CFG_PCI64_DET) {
743 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
744 /*
745 * XXX Need some PCI flags indicating support for
746 * XXX 64-bit addressing (SAC or DAC) and 64-bit
747 * XXX data path.
748 */
749 }
750 if (sc->sc_cfg & (CFG_TBI_EN|CFG_EXT_125)) {
751 const char *sep = "";
752 printf("%s: using ", sc->sc_dev.dv_xname);
753 if (sc->sc_cfg & CFG_EXT_125) {
754 printf("%s125MHz clock", sep);
755 sep = ", ";
756 }
757 if (sc->sc_cfg & CFG_TBI_EN) {
758 printf("%sten-bit interface", sep);
759 sep = ", ";
760 }
761 printf("\n");
762 }
763 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0)
764 sc->sc_cfg |= CFG_MRM_DIS;
765 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
766 sc->sc_cfg |= CFG_MWI_DIS;
767
768 /*
769 * Use the extended descriptor format on the DP83820. This
770 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
771 * checksumming.
772 */
773 sc->sc_cfg |= CFG_EXTSTS_EN;
774 #endif /* DP83820 */
775
776 /*
777 * Initialize our media structures and probe the MII.
778 */
779 sc->sc_mii.mii_ifp = ifp;
780 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
781 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
782 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
783 ifmedia_init(&sc->sc_mii.mii_media, 0, SIP_DECL(mediachange),
784 SIP_DECL(mediastatus));
785 #ifdef DP83820
786 if (sc->sc_cfg & CFG_TBI_EN) {
787 /* Using ten-bit interface. */
788 printf("%s: TBI -- FIXME\n", sc->sc_dev.dv_xname);
789 } else {
790 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
791 MII_OFFSET_ANY, 0);
792 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
793 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
794 0, NULL);
795 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
796 } else
797 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
798 }
799 #else
800 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
801 MII_OFFSET_ANY, 0);
802 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
803 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
804 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
805 } else
806 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
807 #endif /* DP83820 */
808
809 ifp = &sc->sc_ethercom.ec_if;
810 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
811 ifp->if_softc = sc;
812 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
813 ifp->if_ioctl = SIP_DECL(ioctl);
814 ifp->if_start = SIP_DECL(start);
815 ifp->if_watchdog = SIP_DECL(watchdog);
816 ifp->if_init = SIP_DECL(init);
817 ifp->if_stop = SIP_DECL(stop);
818 IFQ_SET_READY(&ifp->if_snd);
819
820 /*
821 * We can support 802.1Q VLAN-sized frames.
822 */
823 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
824
825 #ifdef DP83820
826 /*
827 * And the DP83820 can do VLAN tagging in hardware, and
828 * support the jumbo Ethernet MTU.
829 */
830 sc->sc_ethercom.ec_capabilities |=
831 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
832
833 /*
834 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
835 * in hardware.
836 */
837 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
838 IFCAP_CSUM_UDPv4;
839 #endif /* DP83820 */
840
841 /*
842 * Attach the interface.
843 */
844 if_attach(ifp);
845 ether_ifattach(ifp, enaddr);
846
847 #ifdef SIP_EVENT_COUNTERS
848 /*
849 * Attach event counters.
850 */
851 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
852 NULL, sc->sc_dev.dv_xname, "txsstall");
853 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
854 NULL, sc->sc_dev.dv_xname, "txdstall");
855 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
856 NULL, sc->sc_dev.dv_xname, "txintr");
857 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
858 NULL, sc->sc_dev.dv_xname, "rxintr");
859 #ifdef DP83820
860 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
861 NULL, sc->sc_dev.dv_xname, "rxipsum");
862 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
863 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
864 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
865 NULL, sc->sc_dev.dv_xname, "rxudpsum");
866 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
867 NULL, sc->sc_dev.dv_xname, "txipsum");
868 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
869 NULL, sc->sc_dev.dv_xname, "txtcpsum");
870 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
871 NULL, sc->sc_dev.dv_xname, "txudpsum");
872 #endif /* DP83820 */
873 #endif /* SIP_EVENT_COUNTERS */
874
875 /*
876 * Make sure the interface is shutdown during reboot.
877 */
878 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
879 if (sc->sc_sdhook == NULL)
880 printf("%s: WARNING: unable to establish shutdown hook\n",
881 sc->sc_dev.dv_xname);
882 return;
883
884 /*
885 * Free any resources we've allocated during the failed attach
886 * attempt. Do this in reverse order and fall through.
887 */
888 fail_5:
889 for (i = 0; i < SIP_NRXDESC; i++) {
890 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
891 bus_dmamap_destroy(sc->sc_dmat,
892 sc->sc_rxsoft[i].rxs_dmamap);
893 }
894 fail_4:
895 for (i = 0; i < SIP_TXQUEUELEN; i++) {
896 if (sc->sc_txsoft[i].txs_dmamap != NULL)
897 bus_dmamap_destroy(sc->sc_dmat,
898 sc->sc_txsoft[i].txs_dmamap);
899 }
900 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
901 fail_3:
902 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
903 fail_2:
904 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
905 sizeof(struct sip_control_data));
906 fail_1:
907 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
908 fail_0:
909 return;
910 }
911
912 /*
913 * sip_shutdown:
914 *
915 * Make sure the interface is stopped at reboot time.
916 */
917 void
918 SIP_DECL(shutdown)(void *arg)
919 {
920 struct sip_softc *sc = arg;
921
922 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
923 }
924
925 /*
926 * sip_start: [ifnet interface function]
927 *
928 * Start packet transmission on the interface.
929 */
930 void
931 SIP_DECL(start)(struct ifnet *ifp)
932 {
933 struct sip_softc *sc = ifp->if_softc;
934 struct mbuf *m0, *m;
935 struct sip_txsoft *txs;
936 bus_dmamap_t dmamap;
937 int error, firsttx, nexttx, lasttx, ofree, seg;
938 #ifdef DP83820
939 u_int32_t extsts;
940 #endif
941
942 /*
943 * If we've been told to pause, don't transmit any more packets.
944 */
945 if (sc->sc_flags & SIPF_PAUSED)
946 ifp->if_flags |= IFF_OACTIVE;
947
948 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
949 return;
950
951 /*
952 * Remember the previous number of free descriptors and
953 * the first descriptor we'll use.
954 */
955 ofree = sc->sc_txfree;
956 firsttx = sc->sc_txnext;
957
958 /*
959 * Loop through the send queue, setting up transmit descriptors
960 * until we drain the queue, or use up all available transmit
961 * descriptors.
962 */
963 for (;;) {
964 /* Get a work queue entry. */
965 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
966 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
967 break;
968 }
969
970 /*
971 * Grab a packet off the queue.
972 */
973 IFQ_POLL(&ifp->if_snd, m0);
974 if (m0 == NULL)
975 break;
976 #ifndef DP83820
977 m = NULL;
978 #endif
979
980 dmamap = txs->txs_dmamap;
981
982 #ifdef DP83820
983 /*
984 * Load the DMA map. If this fails, the packet either
985 * didn't fit in the allotted number of segments, or we
986 * were short on resources. For the too-many-segments
987 * case, we simply report an error and drop the packet,
988 * since we can't sanely copy a jumbo packet to a single
989 * buffer.
990 */
991 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
992 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
993 if (error) {
994 if (error == EFBIG) {
995 printf("%s: Tx packet consumes too many "
996 "DMA segments, dropping...\n",
997 sc->sc_dev.dv_xname);
998 IFQ_DEQUEUE(&ifp->if_snd, m0);
999 m_freem(m0);
1000 continue;
1001 }
1002 /*
1003 * Short on resources, just stop for now.
1004 */
1005 break;
1006 }
1007 #else /* DP83820 */
1008 /*
1009 * Load the DMA map. If this fails, the packet either
1010 * didn't fit in the alloted number of segments, or we
1011 * were short on resources. In this case, we'll copy
1012 * and try again.
1013 */
1014 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1015 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1016 MGETHDR(m, M_DONTWAIT, MT_DATA);
1017 if (m == NULL) {
1018 printf("%s: unable to allocate Tx mbuf\n",
1019 sc->sc_dev.dv_xname);
1020 break;
1021 }
1022 if (m0->m_pkthdr.len > MHLEN) {
1023 MCLGET(m, M_DONTWAIT);
1024 if ((m->m_flags & M_EXT) == 0) {
1025 printf("%s: unable to allocate Tx "
1026 "cluster\n", sc->sc_dev.dv_xname);
1027 m_freem(m);
1028 break;
1029 }
1030 }
1031 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1032 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1033 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1034 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1035 if (error) {
1036 printf("%s: unable to load Tx buffer, "
1037 "error = %d\n", sc->sc_dev.dv_xname, error);
1038 break;
1039 }
1040 }
1041 #endif /* DP83820 */
1042
1043 /*
1044 * Ensure we have enough descriptors free to describe
1045 * the packet. Note, we always reserve one descriptor
1046 * at the end of the ring as a termination point, to
1047 * prevent wrap-around.
1048 */
1049 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1050 /*
1051 * Not enough free descriptors to transmit this
1052 * packet. We haven't committed anything yet,
1053 * so just unload the DMA map, put the packet
1054 * back on the queue, and punt. Notify the upper
1055 * layer that there are not more slots left.
1056 *
1057 * XXX We could allocate an mbuf and copy, but
1058 * XXX is it worth it?
1059 */
1060 ifp->if_flags |= IFF_OACTIVE;
1061 bus_dmamap_unload(sc->sc_dmat, dmamap);
1062 #ifndef DP83820
1063 if (m != NULL)
1064 m_freem(m);
1065 #endif
1066 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1067 break;
1068 }
1069
1070 IFQ_DEQUEUE(&ifp->if_snd, m0);
1071 #ifndef DP83820
1072 if (m != NULL) {
1073 m_freem(m0);
1074 m0 = m;
1075 }
1076 #endif
1077
1078 /*
1079 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1080 */
1081
1082 /* Sync the DMA map. */
1083 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1084 BUS_DMASYNC_PREWRITE);
1085
1086 /*
1087 * Initialize the transmit descriptors.
1088 */
1089 for (nexttx = sc->sc_txnext, seg = 0;
1090 seg < dmamap->dm_nsegs;
1091 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1092 /*
1093 * If this is the first descriptor we're
1094 * enqueueing, don't set the OWN bit just
1095 * yet. That could cause a race condition.
1096 * We'll do it below.
1097 */
1098 sc->sc_txdescs[nexttx].sipd_bufptr =
1099 htole32(dmamap->dm_segs[seg].ds_addr);
1100 sc->sc_txdescs[nexttx].sipd_cmdsts =
1101 htole32((nexttx == firsttx ? 0 : CMDSTS_OWN) |
1102 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1103 #ifdef DP83820
1104 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1105 #endif /* DP83820 */
1106 lasttx = nexttx;
1107 }
1108
1109 /* Clear the MORE bit on the last segment. */
1110 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1111
1112 #ifdef DP83820
1113 /*
1114 * If VLANs are enabled and the packet has a VLAN tag, set
1115 * up the descriptor to encapsulate the packet for us.
1116 *
1117 * This apparently has to be on the last descriptor of
1118 * the packet.
1119 */
1120 if (sc->sc_ethercom.ec_nvlans != 0 &&
1121 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1122 sc->sc_txdescs[lasttx].sipd_extsts |=
1123 htole32(EXTSTS_VPKT |
1124 htons(*mtod(m, int *) & EXTSTS_VTCI));
1125 }
1126
1127 /*
1128 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1129 * checksumming, set up the descriptor to do this work
1130 * for us.
1131 *
1132 * This apparently has to be on the first descriptor of
1133 * the packet.
1134 *
1135 * Byte-swap constants so the compiler can optimize.
1136 */
1137 extsts = 0;
1138 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1139 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1140 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1141 extsts |= htole32(EXTSTS_IPPKT);
1142 }
1143 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1144 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1145 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1146 extsts |= htole32(EXTSTS_TCPPKT);
1147 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1148 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1149 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1150 extsts |= htole32(EXTSTS_UDPPKT);
1151 }
1152 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1153 #endif /* DP83820 */
1154
1155 /* Sync the descriptors we're using. */
1156 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1157 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1158
1159 /*
1160 * Store a pointer to the packet so we can free it later,
1161 * and remember what txdirty will be once the packet is
1162 * done.
1163 */
1164 txs->txs_mbuf = m0;
1165 txs->txs_firstdesc = sc->sc_txnext;
1166 txs->txs_lastdesc = lasttx;
1167
1168 /* Advance the tx pointer. */
1169 sc->sc_txfree -= dmamap->dm_nsegs;
1170 sc->sc_txnext = nexttx;
1171
1172 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q);
1173 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1174
1175 #if NBPFILTER > 0
1176 /*
1177 * Pass the packet to any BPF listeners.
1178 */
1179 if (ifp->if_bpf)
1180 bpf_mtap(ifp->if_bpf, m0);
1181 #endif /* NBPFILTER > 0 */
1182 }
1183
1184 if (txs == NULL || sc->sc_txfree == 0) {
1185 /* No more slots left; notify upper layer. */
1186 ifp->if_flags |= IFF_OACTIVE;
1187 }
1188
1189 if (sc->sc_txfree != ofree) {
1190 /*
1191 * Cause a descriptor interrupt to happen on the
1192 * last packet we enqueued.
1193 */
1194 sc->sc_txdescs[lasttx].sipd_cmdsts |= htole32(CMDSTS_INTR);
1195 SIP_CDTXSYNC(sc, lasttx, 1,
1196 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1197
1198 /*
1199 * The entire packet chain is set up. Give the
1200 * first descrptor to the chip now.
1201 */
1202 sc->sc_txdescs[firsttx].sipd_cmdsts |= htole32(CMDSTS_OWN);
1203 SIP_CDTXSYNC(sc, firsttx, 1,
1204 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1205
1206 /*
1207 * Start the transmit process. Note, the manual says
1208 * that if there are no pending transmissions in the
1209 * chip's internal queue (indicated by TXE being clear),
1210 * then the driver software must set the TXDP to the
1211 * first descriptor to be transmitted. However, if we
1212 * do this, it causes serious performance degredation on
1213 * the DP83820 under load, not setting TXDP doesn't seem
1214 * to adversely affect the SiS 900 or DP83815.
1215 *
1216 * Well, I guess it wouldn't be the first time a manual
1217 * has lied -- and they could be speaking of the NULL-
1218 * terminated descriptor list case, rather than OWN-
1219 * terminated rings.
1220 */
1221 #if 0
1222 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1223 CR_TXE) == 0) {
1224 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1225 SIP_CDTXADDR(sc, firsttx));
1226 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1227 }
1228 #else
1229 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1230 #endif
1231
1232 /* Set a watchdog timer in case the chip flakes out. */
1233 ifp->if_timer = 5;
1234 }
1235 }
1236
1237 /*
1238 * sip_watchdog: [ifnet interface function]
1239 *
1240 * Watchdog timer handler.
1241 */
1242 void
1243 SIP_DECL(watchdog)(struct ifnet *ifp)
1244 {
1245 struct sip_softc *sc = ifp->if_softc;
1246
1247 /*
1248 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1249 * If we get a timeout, try and sweep up transmit descriptors.
1250 * If we manage to sweep them all up, ignore the lack of
1251 * interrupt.
1252 */
1253 SIP_DECL(txintr)(sc);
1254
1255 if (sc->sc_txfree != SIP_NTXDESC) {
1256 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1257 ifp->if_oerrors++;
1258
1259 /* Reset the interface. */
1260 (void) SIP_DECL(init)(ifp);
1261 } else if (ifp->if_flags & IFF_DEBUG)
1262 printf("%s: recovered from device timeout\n",
1263 sc->sc_dev.dv_xname);
1264
1265 /* Try to get more packets going. */
1266 SIP_DECL(start)(ifp);
1267 }
1268
1269 /*
1270 * sip_ioctl: [ifnet interface function]
1271 *
1272 * Handle control requests from the operator.
1273 */
1274 int
1275 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1276 {
1277 struct sip_softc *sc = ifp->if_softc;
1278 struct ifreq *ifr = (struct ifreq *)data;
1279 int s, error;
1280
1281 s = splnet();
1282
1283 switch (cmd) {
1284 case SIOCSIFMEDIA:
1285 case SIOCGIFMEDIA:
1286 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1287 break;
1288
1289 default:
1290 error = ether_ioctl(ifp, cmd, data);
1291 if (error == ENETRESET) {
1292 /*
1293 * Multicast list has changed; set the hardware filter
1294 * accordingly.
1295 */
1296 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1297 error = 0;
1298 }
1299 break;
1300 }
1301
1302 /* Try to get more packets going. */
1303 SIP_DECL(start)(ifp);
1304
1305 splx(s);
1306 return (error);
1307 }
1308
1309 /*
1310 * sip_intr:
1311 *
1312 * Interrupt service routine.
1313 */
1314 int
1315 SIP_DECL(intr)(void *arg)
1316 {
1317 struct sip_softc *sc = arg;
1318 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1319 u_int32_t isr;
1320 int handled = 0;
1321
1322 for (;;) {
1323 /* Reading clears interrupt. */
1324 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1325 if ((isr & sc->sc_imr) == 0)
1326 break;
1327
1328 handled = 1;
1329
1330 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1331 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1332
1333 /* Grab any new packets. */
1334 SIP_DECL(rxintr)(sc);
1335
1336 if (isr & ISR_RXORN) {
1337 printf("%s: receive FIFO overrun\n",
1338 sc->sc_dev.dv_xname);
1339
1340 /* XXX adjust rx_drain_thresh? */
1341 }
1342
1343 if (isr & ISR_RXIDLE) {
1344 printf("%s: receive ring overrun\n",
1345 sc->sc_dev.dv_xname);
1346
1347 /* Get the receive process going again. */
1348 bus_space_write_4(sc->sc_st, sc->sc_sh,
1349 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1350 bus_space_write_4(sc->sc_st, sc->sc_sh,
1351 SIP_CR, CR_RXE);
1352 }
1353 }
1354
1355 if (isr & (ISR_TXURN|ISR_TXDESC)) {
1356 SIP_EVCNT_INCR(&sc->sc_ev_txintr);
1357
1358 /* Sweep up transmit descriptors. */
1359 SIP_DECL(txintr)(sc);
1360
1361 if (isr & ISR_TXURN) {
1362 u_int32_t thresh;
1363
1364 printf("%s: transmit FIFO underrun",
1365 sc->sc_dev.dv_xname);
1366
1367 thresh = sc->sc_tx_drain_thresh + 1;
1368 if (thresh <= TXCFG_DRTH &&
1369 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1370 (sc->sc_tx_fill_thresh * 32))) {
1371 printf("; increasing Tx drain "
1372 "threshold to %u bytes\n",
1373 thresh * 32);
1374 sc->sc_tx_drain_thresh = thresh;
1375 (void) SIP_DECL(init)(ifp);
1376 } else {
1377 (void) SIP_DECL(init)(ifp);
1378 printf("\n");
1379 }
1380 }
1381 }
1382
1383 #if !defined(DP83820)
1384 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1385 if (isr & ISR_PAUSE_ST) {
1386 sc->sc_flags |= SIPF_PAUSED;
1387 ifp->if_flags |= IFF_OACTIVE;
1388 }
1389 if (isr & ISR_PAUSE_END) {
1390 sc->sc_flags &= ~SIPF_PAUSED;
1391 ifp->if_flags &= ~IFF_OACTIVE;
1392 }
1393 }
1394 #endif /* ! DP83820 */
1395
1396 if (isr & ISR_HIBERR) {
1397 #define PRINTERR(bit, str) \
1398 if (isr & (bit)) \
1399 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1400 PRINTERR(ISR_DPERR, "parity error");
1401 PRINTERR(ISR_SSERR, "system error");
1402 PRINTERR(ISR_RMABT, "master abort");
1403 PRINTERR(ISR_RTABT, "target abort");
1404 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1405 (void) SIP_DECL(init)(ifp);
1406 #undef PRINTERR
1407 }
1408 }
1409
1410 /* Try to get more packets going. */
1411 SIP_DECL(start)(ifp);
1412
1413 return (handled);
1414 }
1415
1416 /*
1417 * sip_txintr:
1418 *
1419 * Helper; handle transmit interrupts.
1420 */
1421 void
1422 SIP_DECL(txintr)(struct sip_softc *sc)
1423 {
1424 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1425 struct sip_txsoft *txs;
1426 u_int32_t cmdsts;
1427
1428 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1429 ifp->if_flags &= ~IFF_OACTIVE;
1430
1431 /*
1432 * Go through our Tx list and free mbufs for those
1433 * frames which have been transmitted.
1434 */
1435 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1436 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1437 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1438
1439 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1440 if (cmdsts & CMDSTS_OWN)
1441 break;
1442
1443 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1444
1445 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1446
1447 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1448 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1449 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1450 m_freem(txs->txs_mbuf);
1451 txs->txs_mbuf = NULL;
1452
1453 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1454
1455 /*
1456 * Check for errors and collisions.
1457 */
1458 if (cmdsts &
1459 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1460 ifp->if_oerrors++;
1461 if (cmdsts & CMDSTS_Tx_EC)
1462 ifp->if_collisions += 16;
1463 if (ifp->if_flags & IFF_DEBUG) {
1464 if (cmdsts & CMDSTS_Tx_ED)
1465 printf("%s: excessive deferral\n",
1466 sc->sc_dev.dv_xname);
1467 if (cmdsts & CMDSTS_Tx_EC)
1468 printf("%s: excessive collisions\n",
1469 sc->sc_dev.dv_xname);
1470 }
1471 } else {
1472 /* Packet was transmitted successfully. */
1473 ifp->if_opackets++;
1474 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1475 }
1476 }
1477
1478 /*
1479 * If there are no more pending transmissions, cancel the watchdog
1480 * timer.
1481 */
1482 if (txs == NULL)
1483 ifp->if_timer = 0;
1484 }
1485
1486 #if defined(DP83820)
1487 /*
1488 * sip_rxintr:
1489 *
1490 * Helper; handle receive interrupts.
1491 */
1492 void
1493 SIP_DECL(rxintr)(struct sip_softc *sc)
1494 {
1495 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1496 struct sip_rxsoft *rxs;
1497 struct mbuf *m, *tailm;
1498 u_int32_t cmdsts, extsts;
1499 int i, len;
1500
1501 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1502 rxs = &sc->sc_rxsoft[i];
1503
1504 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1505
1506 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1507 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1508
1509 /*
1510 * NOTE: OWN is set if owned by _consumer_. We're the
1511 * consumer of the receive ring, so if the bit is clear,
1512 * we have processed all of the packets.
1513 */
1514 if ((cmdsts & CMDSTS_OWN) == 0) {
1515 /*
1516 * We have processed all of the receive buffers.
1517 */
1518 break;
1519 }
1520
1521 if (__predict_false(sc->sc_rxdiscard)) {
1522 SIP_INIT_RXDESC(sc, i);
1523 if ((cmdsts & CMDSTS_MORE) == 0) {
1524 /* Reset our state. */
1525 sc->sc_rxdiscard = 0;
1526 }
1527 continue;
1528 }
1529
1530 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1531 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1532
1533 m = rxs->rxs_mbuf;
1534
1535 /*
1536 * Add a new receive buffer to the ring.
1537 */
1538 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1539 /*
1540 * Failed, throw away what we've done so
1541 * far, and discard the rest of the packet.
1542 */
1543 ifp->if_ierrors++;
1544 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1545 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1546 SIP_INIT_RXDESC(sc, i);
1547 if (cmdsts & CMDSTS_MORE)
1548 sc->sc_rxdiscard = 1;
1549 if (sc->sc_rxhead != NULL)
1550 m_freem(sc->sc_rxhead);
1551 SIP_RXCHAIN_RESET(sc);
1552 continue;
1553 }
1554
1555 SIP_RXCHAIN_LINK(sc, m);
1556
1557 /*
1558 * If this is not the end of the packet, keep
1559 * looking.
1560 */
1561 if (cmdsts & CMDSTS_MORE) {
1562 sc->sc_rxlen += m->m_len;
1563 continue;
1564 }
1565
1566 /*
1567 * Okay, we have the entire packet now...
1568 */
1569 *sc->sc_rxtailp = NULL;
1570 m = sc->sc_rxhead;
1571 tailm = sc->sc_rxtail;
1572
1573 SIP_RXCHAIN_RESET(sc);
1574
1575 /*
1576 * If an error occurred, update stats and drop the packet.
1577 */
1578 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1579 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1580 ifp->if_ierrors++;
1581 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1582 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1583 /* Receive overrun handled elsewhere. */
1584 printf("%s: receive descriptor error\n",
1585 sc->sc_dev.dv_xname);
1586 }
1587 #define PRINTERR(bit, str) \
1588 if (cmdsts & (bit)) \
1589 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1590 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1591 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1592 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1593 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1594 #undef PRINTERR
1595 m_freem(m);
1596 continue;
1597 }
1598
1599 /*
1600 * No errors.
1601 *
1602 * Note, the DP83820 includes the CRC with
1603 * every packet.
1604 */
1605 len = CMDSTS_SIZE(cmdsts);
1606 tailm->m_len = len - sc->sc_rxlen;
1607
1608 /*
1609 * If the packet is small enough to fit in a
1610 * single header mbuf, allocate one and copy
1611 * the data into it. This greatly reduces
1612 * memory consumption when we receive lots
1613 * of small packets.
1614 */
1615 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1616 struct mbuf *nm;
1617 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1618 if (nm == NULL) {
1619 ifp->if_ierrors++;
1620 m_freem(m);
1621 continue;
1622 }
1623 nm->m_data += 2;
1624 nm->m_pkthdr.len = nm->m_len = len;
1625 m_copydata(m, 0, len, mtod(nm, caddr_t));
1626 m_freem(m);
1627 m = nm;
1628 }
1629 #ifndef __NO_STRICT_ALIGNMENT
1630 else {
1631 /*
1632 * The DP83820's receive buffers must be 4-byte
1633 * aligned. But this means that the data after
1634 * the Ethernet header is misaligned. To compensate,
1635 * we have artificially shortened the buffer size
1636 * in the descriptor, and we do an overlapping copy
1637 * of the data two bytes further in (in the first
1638 * buffer of the chain only).
1639 */
1640 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1641 m->m_len);
1642 m->m_data += 2;
1643 }
1644 #endif /* ! __NO_STRICT_ALIGNMENT */
1645
1646 /*
1647 * If VLANs are enabled, VLAN packets have been unwrapped
1648 * for us. Associate the tag with the packet.
1649 */
1650 if (sc->sc_ethercom.ec_nvlans != 0 &&
1651 (extsts & EXTSTS_VPKT) != 0) {
1652 struct mbuf *vtag;
1653
1654 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1655 if (vtag == NULL) {
1656 ifp->if_ierrors++;
1657 printf("%s: unable to allocate VLAN tag\n",
1658 sc->sc_dev.dv_xname);
1659 m_freem(m);
1660 continue;
1661 }
1662
1663 *mtod(vtag, int *) = ntohs(extsts & EXTSTS_VTCI);
1664 vtag->m_len = sizeof(int);
1665 }
1666
1667 /*
1668 * Set the incoming checksum information for the
1669 * packet.
1670 */
1671 if ((extsts & EXTSTS_IPPKT) != 0) {
1672 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1673 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1674 if (extsts & EXTSTS_Rx_IPERR)
1675 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1676 if (extsts & EXTSTS_TCPPKT) {
1677 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1678 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1679 if (extsts & EXTSTS_Rx_TCPERR)
1680 m->m_pkthdr.csum_flags |=
1681 M_CSUM_TCP_UDP_BAD;
1682 } else if (extsts & EXTSTS_UDPPKT) {
1683 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1684 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1685 if (extsts & EXTSTS_Rx_UDPERR)
1686 m->m_pkthdr.csum_flags |=
1687 M_CSUM_TCP_UDP_BAD;
1688 }
1689 }
1690
1691 ifp->if_ipackets++;
1692 m->m_flags |= M_HASFCS;
1693 m->m_pkthdr.rcvif = ifp;
1694 m->m_pkthdr.len = len;
1695
1696 #if NBPFILTER > 0
1697 /*
1698 * Pass this up to any BPF listeners, but only
1699 * pass if up the stack if it's for us.
1700 */
1701 if (ifp->if_bpf)
1702 bpf_mtap(ifp->if_bpf, m);
1703 #endif /* NBPFILTER > 0 */
1704
1705 /* Pass it on. */
1706 (*ifp->if_input)(ifp, m);
1707 }
1708
1709 /* Update the receive pointer. */
1710 sc->sc_rxptr = i;
1711 }
1712 #else /* ! DP83820 */
1713 /*
1714 * sip_rxintr:
1715 *
1716 * Helper; handle receive interrupts.
1717 */
1718 void
1719 SIP_DECL(rxintr)(struct sip_softc *sc)
1720 {
1721 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1722 struct sip_rxsoft *rxs;
1723 struct mbuf *m;
1724 u_int32_t cmdsts;
1725 int i, len;
1726
1727 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1728 rxs = &sc->sc_rxsoft[i];
1729
1730 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1731
1732 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1733
1734 /*
1735 * NOTE: OWN is set if owned by _consumer_. We're the
1736 * consumer of the receive ring, so if the bit is clear,
1737 * we have processed all of the packets.
1738 */
1739 if ((cmdsts & CMDSTS_OWN) == 0) {
1740 /*
1741 * We have processed all of the receive buffers.
1742 */
1743 break;
1744 }
1745
1746 /*
1747 * If any collisions were seen on the wire, count one.
1748 */
1749 if (cmdsts & CMDSTS_Rx_COL)
1750 ifp->if_collisions++;
1751
1752 /*
1753 * If an error occurred, update stats, clear the status
1754 * word, and leave the packet buffer in place. It will
1755 * simply be reused the next time the ring comes around.
1756 */
1757 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1758 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1759 ifp->if_ierrors++;
1760 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1761 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1762 /* Receive overrun handled elsewhere. */
1763 printf("%s: receive descriptor error\n",
1764 sc->sc_dev.dv_xname);
1765 }
1766 #define PRINTERR(bit, str) \
1767 if (cmdsts & (bit)) \
1768 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1769 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1770 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1771 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1772 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1773 #undef PRINTERR
1774 SIP_INIT_RXDESC(sc, i);
1775 continue;
1776 }
1777
1778 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1779 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1780
1781 /*
1782 * No errors; receive the packet. Note, the SiS 900
1783 * includes the CRC with every packet.
1784 */
1785 len = CMDSTS_SIZE(cmdsts);
1786
1787 #ifdef __NO_STRICT_ALIGNMENT
1788 /*
1789 * If the packet is small enough to fit in a
1790 * single header mbuf, allocate one and copy
1791 * the data into it. This greatly reduces
1792 * memory consumption when we receive lots
1793 * of small packets.
1794 *
1795 * Otherwise, we add a new buffer to the receive
1796 * chain. If this fails, we drop the packet and
1797 * recycle the old buffer.
1798 */
1799 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
1800 MGETHDR(m, M_DONTWAIT, MT_DATA);
1801 if (m == NULL)
1802 goto dropit;
1803 memcpy(mtod(m, caddr_t),
1804 mtod(rxs->rxs_mbuf, caddr_t), len);
1805 SIP_INIT_RXDESC(sc, i);
1806 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1807 rxs->rxs_dmamap->dm_mapsize,
1808 BUS_DMASYNC_PREREAD);
1809 } else {
1810 m = rxs->rxs_mbuf;
1811 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1812 dropit:
1813 ifp->if_ierrors++;
1814 SIP_INIT_RXDESC(sc, i);
1815 bus_dmamap_sync(sc->sc_dmat,
1816 rxs->rxs_dmamap, 0,
1817 rxs->rxs_dmamap->dm_mapsize,
1818 BUS_DMASYNC_PREREAD);
1819 continue;
1820 }
1821 }
1822 #else
1823 /*
1824 * The SiS 900's receive buffers must be 4-byte aligned.
1825 * But this means that the data after the Ethernet header
1826 * is misaligned. We must allocate a new buffer and
1827 * copy the data, shifted forward 2 bytes.
1828 */
1829 MGETHDR(m, M_DONTWAIT, MT_DATA);
1830 if (m == NULL) {
1831 dropit:
1832 ifp->if_ierrors++;
1833 SIP_INIT_RXDESC(sc, i);
1834 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1835 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1836 continue;
1837 }
1838 if (len > (MHLEN - 2)) {
1839 MCLGET(m, M_DONTWAIT);
1840 if ((m->m_flags & M_EXT) == 0) {
1841 m_freem(m);
1842 goto dropit;
1843 }
1844 }
1845 m->m_data += 2;
1846
1847 /*
1848 * Note that we use clusters for incoming frames, so the
1849 * buffer is virtually contiguous.
1850 */
1851 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
1852
1853 /* Allow the receive descriptor to continue using its mbuf. */
1854 SIP_INIT_RXDESC(sc, i);
1855 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1856 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1857 #endif /* __NO_STRICT_ALIGNMENT */
1858
1859 ifp->if_ipackets++;
1860 m->m_flags |= M_HASFCS;
1861 m->m_pkthdr.rcvif = ifp;
1862 m->m_pkthdr.len = m->m_len = len;
1863
1864 #if NBPFILTER > 0
1865 /*
1866 * Pass this up to any BPF listeners, but only
1867 * pass if up the stack if it's for us.
1868 */
1869 if (ifp->if_bpf)
1870 bpf_mtap(ifp->if_bpf, m);
1871 #endif /* NBPFILTER > 0 */
1872
1873 /* Pass it on. */
1874 (*ifp->if_input)(ifp, m);
1875 }
1876
1877 /* Update the receive pointer. */
1878 sc->sc_rxptr = i;
1879 }
1880 #endif /* DP83820 */
1881
1882 /*
1883 * sip_tick:
1884 *
1885 * One second timer, used to tick the MII.
1886 */
1887 void
1888 SIP_DECL(tick)(void *arg)
1889 {
1890 struct sip_softc *sc = arg;
1891 int s;
1892
1893 s = splnet();
1894 mii_tick(&sc->sc_mii);
1895 splx(s);
1896
1897 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
1898 }
1899
1900 /*
1901 * sip_reset:
1902 *
1903 * Perform a soft reset on the SiS 900.
1904 */
1905 void
1906 SIP_DECL(reset)(struct sip_softc *sc)
1907 {
1908 bus_space_tag_t st = sc->sc_st;
1909 bus_space_handle_t sh = sc->sc_sh;
1910 int i;
1911
1912 bus_space_write_4(st, sh, SIP_CR, CR_RST);
1913
1914 for (i = 0; i < SIP_TIMEOUT; i++) {
1915 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
1916 break;
1917 delay(2);
1918 }
1919
1920 if (i == SIP_TIMEOUT)
1921 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1922
1923 delay(1000);
1924
1925 #ifdef DP83820
1926 /*
1927 * Set the general purpose I/O bits. Do it here in case we
1928 * need to have GPIO set up to talk to the media interface.
1929 */
1930 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
1931 delay(1000);
1932 #endif /* DP83820 */
1933 }
1934
1935 /*
1936 * sip_init: [ ifnet interface function ]
1937 *
1938 * Initialize the interface. Must be called at splnet().
1939 */
1940 int
1941 SIP_DECL(init)(struct ifnet *ifp)
1942 {
1943 struct sip_softc *sc = ifp->if_softc;
1944 bus_space_tag_t st = sc->sc_st;
1945 bus_space_handle_t sh = sc->sc_sh;
1946 struct sip_txsoft *txs;
1947 struct sip_rxsoft *rxs;
1948 struct sip_desc *sipd;
1949 u_int32_t reg;
1950 int i, error = 0;
1951
1952 /*
1953 * Cancel any pending I/O.
1954 */
1955 SIP_DECL(stop)(ifp, 0);
1956
1957 /*
1958 * Reset the chip to a known state.
1959 */
1960 SIP_DECL(reset)(sc);
1961
1962 #if !defined(DP83820)
1963 if (sc->sc_model->sip_vendor == PCI_VENDOR_NS &&
1964 sc->sc_model->sip_product == PCI_PRODUCT_NS_DP83815) {
1965 /*
1966 * DP83815 manual, page 78:
1967 * 4.4 Recommended Registers Configuration
1968 * For optimum performance of the DP83815, version noted
1969 * as DP83815CVNG (SRR = 203h), the listed register
1970 * modifications must be followed in sequence...
1971 *
1972 * It's not clear if this should be 302h or 203h because that
1973 * chip name is listed as SRR 302h in the description of the
1974 * SRR register. However, my revision 302h DP83815 on the
1975 * Netgear FA311 purchased in 02/2001 needs these settings
1976 * to avoid tons of errors in AcceptPerfectMatch (non-
1977 * IFF_PROMISC) mode. I do not know if other revisions need
1978 * this set or not. [briggs -- 09 March 2001]
1979 *
1980 * Note that only the low-order 12 bits of 0xe4 are documented
1981 * and that this sets reserved bits in that register.
1982 */
1983 reg = bus_space_read_4(st, sh, SIP_NS_SRR);
1984 if (reg == 0x302) {
1985 bus_space_write_4(st, sh, 0x00cc, 0x0001);
1986 bus_space_write_4(st, sh, 0x00e4, 0x189C);
1987 bus_space_write_4(st, sh, 0x00fc, 0x0000);
1988 bus_space_write_4(st, sh, 0x00f4, 0x5040);
1989 bus_space_write_4(st, sh, 0x00f8, 0x008c);
1990 }
1991 }
1992 #endif /* ! DP83820 */
1993
1994 /*
1995 * Initialize the transmit descriptor ring.
1996 */
1997 for (i = 0; i < SIP_NTXDESC; i++) {
1998 sipd = &sc->sc_txdescs[i];
1999 memset(sipd, 0, sizeof(struct sip_desc));
2000 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2001 }
2002 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2003 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2004 sc->sc_txfree = SIP_NTXDESC;
2005 sc->sc_txnext = 0;
2006
2007 /*
2008 * Initialize the transmit job descriptors.
2009 */
2010 SIMPLEQ_INIT(&sc->sc_txfreeq);
2011 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2012 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2013 txs = &sc->sc_txsoft[i];
2014 txs->txs_mbuf = NULL;
2015 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2016 }
2017
2018 /*
2019 * Initialize the receive descriptor and receive job
2020 * descriptor rings.
2021 */
2022 for (i = 0; i < SIP_NRXDESC; i++) {
2023 rxs = &sc->sc_rxsoft[i];
2024 if (rxs->rxs_mbuf == NULL) {
2025 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2026 printf("%s: unable to allocate or map rx "
2027 "buffer %d, error = %d\n",
2028 sc->sc_dev.dv_xname, i, error);
2029 /*
2030 * XXX Should attempt to run with fewer receive
2031 * XXX buffers instead of just failing.
2032 */
2033 SIP_DECL(rxdrain)(sc);
2034 goto out;
2035 }
2036 } else
2037 SIP_INIT_RXDESC(sc, i);
2038 }
2039 sc->sc_rxptr = 0;
2040 #ifdef DP83820
2041 sc->sc_rxdiscard = 0;
2042 SIP_RXCHAIN_RESET(sc);
2043 #endif /* DP83820 */
2044
2045 /*
2046 * Set the configuration register; it's already initialized
2047 * in sip_attach().
2048 */
2049 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2050
2051 /*
2052 * Initialize the transmit fill and drain thresholds if
2053 * we have never done so.
2054 */
2055 if (sc->sc_tx_fill_thresh == 0) {
2056 /*
2057 * XXX This value should be tuned. This is the
2058 * minimum (32 bytes), and we may be able to
2059 * improve performance by increasing it.
2060 */
2061 sc->sc_tx_fill_thresh = 1;
2062 }
2063 if (sc->sc_tx_drain_thresh == 0) {
2064 /*
2065 * Start at a drain threshold of 512 bytes. We will
2066 * increase it if a DMA underrun occurs.
2067 *
2068 * XXX The minimum value of this variable should be
2069 * tuned. We may be able to improve performance
2070 * by starting with a lower value. That, however,
2071 * may trash the first few outgoing packets if the
2072 * PCI bus is saturated.
2073 */
2074 sc->sc_tx_drain_thresh = 512 / 32;
2075 }
2076
2077 /*
2078 * Initialize the prototype TXCFG register.
2079 */
2080 sc->sc_txcfg = TXCFG_ATP | TXCFG_MXDMA_512 |
2081 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2082 sc->sc_tx_drain_thresh;
2083 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2084
2085 /*
2086 * Initialize the receive drain threshold if we have never
2087 * done so.
2088 */
2089 if (sc->sc_rx_drain_thresh == 0) {
2090 /*
2091 * XXX This value should be tuned. This is set to the
2092 * maximum of 248 bytes, and we may be able to improve
2093 * performance by decreasing it (although we should never
2094 * set this value lower than 2; 14 bytes are required to
2095 * filter the packet).
2096 */
2097 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2098 }
2099
2100 /*
2101 * Initialize the prototype RXCFG register.
2102 */
2103 sc->sc_rxcfg = RXCFG_MXDMA_512 |
2104 (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2105 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2106
2107 /* Set up the receive filter. */
2108 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2109
2110 #ifdef DP83820
2111 /*
2112 * Initialize the VLAN/IP receive control register.
2113 * We enable checksum computation on all incoming
2114 * packets, and do not reject packets w/ bad checksums.
2115 */
2116 reg = 0;
2117 if (ifp->if_capenable &
2118 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2119 reg |= VRCR_IPEN;
2120 if (sc->sc_ethercom.ec_nvlans != 0)
2121 reg |= VRCR_VTDEN|VRCR_VTREN;
2122 bus_space_write_4(st, sh, SIP_VRCR, reg);
2123
2124 /*
2125 * Initialize the VLAN/IP transmit control register.
2126 * We enable outgoing checksum computation on a
2127 * per-packet basis.
2128 */
2129 reg = 0;
2130 if (ifp->if_capenable &
2131 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2132 reg |= VTCR_PPCHK;
2133 if (sc->sc_ethercom.ec_nvlans != 0)
2134 reg |= VTCR_VPPTI;
2135 bus_space_write_4(st, sh, SIP_VTCR, reg);
2136
2137 /*
2138 * If we're using VLANs, initialize the VLAN data register.
2139 * To understand why we bswap the VLAN Ethertype, see section
2140 * 4.2.36 of the DP83820 manual.
2141 */
2142 if (sc->sc_ethercom.ec_nvlans != 0)
2143 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2144 #endif /* DP83820 */
2145
2146 /*
2147 * Give the transmit and receive rings to the chip.
2148 */
2149 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2150 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2151
2152 /*
2153 * Initialize the interrupt mask.
2154 */
2155 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2156 ISR_TXURN|ISR_TXDESC|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2157 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2158
2159 /*
2160 * Set the current media. Do this after initializing the prototype
2161 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2162 * control.
2163 */
2164 mii_mediachg(&sc->sc_mii);
2165
2166 /*
2167 * Enable interrupts.
2168 */
2169 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2170
2171 /*
2172 * Start the transmit and receive processes.
2173 */
2174 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2175
2176 /*
2177 * Start the one second MII clock.
2178 */
2179 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2180
2181 /*
2182 * ...all done!
2183 */
2184 ifp->if_flags |= IFF_RUNNING;
2185 ifp->if_flags &= ~IFF_OACTIVE;
2186
2187 out:
2188 if (error)
2189 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2190 return (error);
2191 }
2192
2193 /*
2194 * sip_drain:
2195 *
2196 * Drain the receive queue.
2197 */
2198 void
2199 SIP_DECL(rxdrain)(struct sip_softc *sc)
2200 {
2201 struct sip_rxsoft *rxs;
2202 int i;
2203
2204 for (i = 0; i < SIP_NRXDESC; i++) {
2205 rxs = &sc->sc_rxsoft[i];
2206 if (rxs->rxs_mbuf != NULL) {
2207 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2208 m_freem(rxs->rxs_mbuf);
2209 rxs->rxs_mbuf = NULL;
2210 }
2211 }
2212 }
2213
2214 /*
2215 * sip_stop: [ ifnet interface function ]
2216 *
2217 * Stop transmission on the interface.
2218 */
2219 void
2220 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2221 {
2222 struct sip_softc *sc = ifp->if_softc;
2223 bus_space_tag_t st = sc->sc_st;
2224 bus_space_handle_t sh = sc->sc_sh;
2225 struct sip_txsoft *txs;
2226 u_int32_t cmdsts = 0; /* DEBUG */
2227
2228 /*
2229 * Stop the one second clock.
2230 */
2231 callout_stop(&sc->sc_tick_ch);
2232
2233 /* Down the MII. */
2234 mii_down(&sc->sc_mii);
2235
2236 /*
2237 * Disable interrupts.
2238 */
2239 bus_space_write_4(st, sh, SIP_IER, 0);
2240
2241 /*
2242 * Stop receiver and transmitter.
2243 */
2244 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2245
2246 /*
2247 * Release any queued transmit buffers.
2248 */
2249 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2250 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2251 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2252 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2253 CMDSTS_INTR) == 0)
2254 printf("%s: sip_stop: last descriptor does not "
2255 "have INTR bit set\n", sc->sc_dev.dv_xname);
2256 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
2257 #ifdef DIAGNOSTIC
2258 if (txs->txs_mbuf == NULL) {
2259 printf("%s: dirty txsoft with no mbuf chain\n",
2260 sc->sc_dev.dv_xname);
2261 panic("sip_stop");
2262 }
2263 #endif
2264 cmdsts |= /* DEBUG */
2265 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2266 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2267 m_freem(txs->txs_mbuf);
2268 txs->txs_mbuf = NULL;
2269 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2270 }
2271
2272 if (disable)
2273 SIP_DECL(rxdrain)(sc);
2274
2275 /*
2276 * Mark the interface down and cancel the watchdog timer.
2277 */
2278 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2279 ifp->if_timer = 0;
2280
2281 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2282 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2283 printf("%s: sip_stop: no INTR bits set in dirty tx "
2284 "descriptors\n", sc->sc_dev.dv_xname);
2285 }
2286
2287 /*
2288 * sip_read_eeprom:
2289 *
2290 * Read data from the serial EEPROM.
2291 */
2292 void
2293 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2294 u_int16_t *data)
2295 {
2296 bus_space_tag_t st = sc->sc_st;
2297 bus_space_handle_t sh = sc->sc_sh;
2298 u_int16_t reg;
2299 int i, x;
2300
2301 for (i = 0; i < wordcnt; i++) {
2302 /* Send CHIP SELECT. */
2303 reg = EROMAR_EECS;
2304 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2305
2306 /* Shift in the READ opcode. */
2307 for (x = 3; x > 0; x--) {
2308 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2309 reg |= EROMAR_EEDI;
2310 else
2311 reg &= ~EROMAR_EEDI;
2312 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2313 bus_space_write_4(st, sh, SIP_EROMAR,
2314 reg | EROMAR_EESK);
2315 delay(4);
2316 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2317 delay(4);
2318 }
2319
2320 /* Shift in address. */
2321 for (x = 6; x > 0; x--) {
2322 if ((word + i) & (1 << (x - 1)))
2323 reg |= EROMAR_EEDI;
2324 else
2325 reg &= ~EROMAR_EEDI;
2326 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2327 bus_space_write_4(st, sh, SIP_EROMAR,
2328 reg | EROMAR_EESK);
2329 delay(4);
2330 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2331 delay(4);
2332 }
2333
2334 /* Shift out data. */
2335 reg = EROMAR_EECS;
2336 data[i] = 0;
2337 for (x = 16; x > 0; x--) {
2338 bus_space_write_4(st, sh, SIP_EROMAR,
2339 reg | EROMAR_EESK);
2340 delay(4);
2341 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2342 data[i] |= (1 << (x - 1));
2343 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2344 delay(4);
2345 }
2346
2347 /* Clear CHIP SELECT. */
2348 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2349 delay(4);
2350 }
2351 }
2352
2353 /*
2354 * sip_add_rxbuf:
2355 *
2356 * Add a receive buffer to the indicated descriptor.
2357 */
2358 int
2359 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2360 {
2361 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2362 struct mbuf *m;
2363 int error;
2364
2365 MGETHDR(m, M_DONTWAIT, MT_DATA);
2366 if (m == NULL)
2367 return (ENOBUFS);
2368
2369 MCLGET(m, M_DONTWAIT);
2370 if ((m->m_flags & M_EXT) == 0) {
2371 m_freem(m);
2372 return (ENOBUFS);
2373 }
2374
2375 #if defined(DP83820)
2376 m->m_len = SIP_RXBUF_LEN;
2377 #endif /* DP83820 */
2378
2379 if (rxs->rxs_mbuf != NULL)
2380 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2381
2382 rxs->rxs_mbuf = m;
2383
2384 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2385 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2386 BUS_DMA_READ|BUS_DMA_NOWAIT);
2387 if (error) {
2388 printf("%s: can't load rx DMA map %d, error = %d\n",
2389 sc->sc_dev.dv_xname, idx, error);
2390 panic("sip_add_rxbuf"); /* XXX */
2391 }
2392
2393 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2394 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2395
2396 SIP_INIT_RXDESC(sc, idx);
2397
2398 return (0);
2399 }
2400
2401 #if !defined(DP83820)
2402 /*
2403 * sip_sis900_set_filter:
2404 *
2405 * Set up the receive filter.
2406 */
2407 void
2408 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2409 {
2410 bus_space_tag_t st = sc->sc_st;
2411 bus_space_handle_t sh = sc->sc_sh;
2412 struct ethercom *ec = &sc->sc_ethercom;
2413 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2414 struct ether_multi *enm;
2415 u_int8_t *cp;
2416 struct ether_multistep step;
2417 u_int32_t crc, mchash[8];
2418
2419 /*
2420 * Initialize the prototype RFCR.
2421 */
2422 sc->sc_rfcr = RFCR_RFEN;
2423 if (ifp->if_flags & IFF_BROADCAST)
2424 sc->sc_rfcr |= RFCR_AAB;
2425 if (ifp->if_flags & IFF_PROMISC) {
2426 sc->sc_rfcr |= RFCR_AAP;
2427 goto allmulti;
2428 }
2429
2430 /*
2431 * Set up the multicast address filter by passing all multicast
2432 * addresses through a CRC generator, and then using the high-order
2433 * 6 bits as an index into the 128 bit multicast hash table (only
2434 * the lower 16 bits of each 32 bit multicast hash register are
2435 * valid). The high order bits select the register, while the
2436 * rest of the bits select the bit within the register.
2437 */
2438
2439 memset(mchash, 0, sizeof(mchash));
2440
2441 ETHER_FIRST_MULTI(step, ec, enm);
2442 while (enm != NULL) {
2443 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2444 /*
2445 * We must listen to a range of multicast addresses.
2446 * For now, just accept all multicasts, rather than
2447 * trying to set only those filter bits needed to match
2448 * the range. (At this time, the only use of address
2449 * ranges is for IP multicast routing, for which the
2450 * range is big enough to require all bits set.)
2451 */
2452 goto allmulti;
2453 }
2454
2455 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2456
2457 /* Just want the 7 most significant bits. */
2458 crc >>= 25;
2459
2460 /* Set the corresponding bit in the hash table. */
2461 mchash[crc >> 4] |= 1 << (crc & 0xf);
2462
2463 ETHER_NEXT_MULTI(step, enm);
2464 }
2465
2466 ifp->if_flags &= ~IFF_ALLMULTI;
2467 goto setit;
2468
2469 allmulti:
2470 ifp->if_flags |= IFF_ALLMULTI;
2471 sc->sc_rfcr |= RFCR_AAM;
2472
2473 setit:
2474 #define FILTER_EMIT(addr, data) \
2475 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2476 delay(1); \
2477 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2478 delay(1)
2479
2480 /*
2481 * Disable receive filter, and program the node address.
2482 */
2483 cp = LLADDR(ifp->if_sadl);
2484 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2485 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2486 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2487
2488 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2489 /*
2490 * Program the multicast hash table.
2491 */
2492 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2493 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2494 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2495 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2496 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2497 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2498 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2499 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2500 }
2501 #undef FILTER_EMIT
2502
2503 /*
2504 * Re-enable the receiver filter.
2505 */
2506 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2507 }
2508 #endif /* ! DP83820 */
2509
2510 /*
2511 * sip_dp83815_set_filter:
2512 *
2513 * Set up the receive filter.
2514 */
2515 void
2516 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2517 {
2518 bus_space_tag_t st = sc->sc_st;
2519 bus_space_handle_t sh = sc->sc_sh;
2520 struct ethercom *ec = &sc->sc_ethercom;
2521 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2522 struct ether_multi *enm;
2523 u_int8_t *cp;
2524 struct ether_multistep step;
2525 u_int32_t crc, hash, slot, bit;
2526 #ifdef DP83820
2527 #define MCHASH_NWORDS 128
2528 #else
2529 #define MCHASH_NWORDS 32
2530 #endif /* DP83820 */
2531 u_int16_t mchash[MCHASH_NWORDS];
2532 int i;
2533
2534 /*
2535 * Initialize the prototype RFCR.
2536 * Enable the receive filter, and accept on
2537 * Perfect (destination address) Match
2538 * If IFF_BROADCAST, also accept all broadcast packets.
2539 * If IFF_PROMISC, accept all unicast packets (and later, set
2540 * IFF_ALLMULTI and accept all multicast, too).
2541 */
2542 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2543 if (ifp->if_flags & IFF_BROADCAST)
2544 sc->sc_rfcr |= RFCR_AAB;
2545 if (ifp->if_flags & IFF_PROMISC) {
2546 sc->sc_rfcr |= RFCR_AAP;
2547 goto allmulti;
2548 }
2549
2550 #ifdef DP83820
2551 /*
2552 * Set up the DP83820 multicast address filter by passing all multicast
2553 * addresses through a CRC generator, and then using the high-order
2554 * 11 bits as an index into the 2048 bit multicast hash table. The
2555 * high-order 7 bits select the slot, while the low-order 4 bits
2556 * select the bit within the slot. Note that only the low 16-bits
2557 * of each filter word are used, and there are 128 filter words.
2558 */
2559 #else
2560 /*
2561 * Set up the DP83815 multicast address filter by passing all multicast
2562 * addresses through a CRC generator, and then using the high-order
2563 * 9 bits as an index into the 512 bit multicast hash table. The
2564 * high-order 5 bits select the slot, while the low-order 4 bits
2565 * select the bit within the slot. Note that only the low 16-bits
2566 * of each filter word are used, and there are 32 filter words.
2567 */
2568 #endif /* DP83820 */
2569
2570 memset(mchash, 0, sizeof(mchash));
2571
2572 ifp->if_flags &= ~IFF_ALLMULTI;
2573 ETHER_FIRST_MULTI(step, ec, enm);
2574 if (enm == NULL)
2575 goto setit;
2576 while (enm != NULL) {
2577 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2578 /*
2579 * We must listen to a range of multicast addresses.
2580 * For now, just accept all multicasts, rather than
2581 * trying to set only those filter bits needed to match
2582 * the range. (At this time, the only use of address
2583 * ranges is for IP multicast routing, for which the
2584 * range is big enough to require all bits set.)
2585 */
2586 goto allmulti;
2587 }
2588
2589 #ifdef DP83820
2590 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2591
2592 /* Just want the 11 most significant bits. */
2593 hash = crc >> 21;
2594 #else
2595 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2596
2597 /* Just want the 9 most significant bits. */
2598 hash = crc >> 23;
2599 #endif /* DP83820 */
2600 slot = hash >> 4;
2601 bit = hash & 0xf;
2602
2603 /* Set the corresponding bit in the hash table. */
2604 mchash[slot] |= 1 << bit;
2605
2606 ETHER_NEXT_MULTI(step, enm);
2607 }
2608 sc->sc_rfcr |= RFCR_MHEN;
2609 goto setit;
2610
2611 allmulti:
2612 ifp->if_flags |= IFF_ALLMULTI;
2613 sc->sc_rfcr |= RFCR_AAM;
2614
2615 setit:
2616 #define FILTER_EMIT(addr, data) \
2617 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2618 delay(1); \
2619 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2620 delay(1)
2621
2622 /*
2623 * Disable receive filter, and program the node address.
2624 */
2625 cp = LLADDR(ifp->if_sadl);
2626 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
2627 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
2628 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
2629
2630 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2631 /*
2632 * Program the multicast hash table.
2633 */
2634 for (i = 0; i < MCHASH_NWORDS; i++) {
2635 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
2636 mchash[i]);
2637 }
2638 }
2639 #undef FILTER_EMIT
2640 #undef MCHASH_NWORDS
2641
2642 /*
2643 * Re-enable the receiver filter.
2644 */
2645 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2646 }
2647
2648 #if defined(DP83820)
2649 /*
2650 * sip_dp83820_mii_readreg: [mii interface function]
2651 *
2652 * Read a PHY register on the MII of the DP83820.
2653 */
2654 int
2655 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
2656 {
2657
2658 return (mii_bitbang_readreg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2659 phy, reg));
2660 }
2661
2662 /*
2663 * sip_dp83820_mii_writereg: [mii interface function]
2664 *
2665 * Write a PHY register on the MII of the DP83820.
2666 */
2667 void
2668 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
2669 {
2670
2671 mii_bitbang_writereg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2672 phy, reg, val);
2673 }
2674
2675 /*
2676 * sip_dp83815_mii_statchg: [mii interface function]
2677 *
2678 * Callback from MII layer when media changes.
2679 */
2680 void
2681 SIP_DECL(dp83820_mii_statchg)(struct device *self)
2682 {
2683 struct sip_softc *sc = (struct sip_softc *) self;
2684 u_int32_t cfg;
2685
2686 /*
2687 * Update TXCFG for full-duplex operation.
2688 */
2689 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2690 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2691 else
2692 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2693
2694 /*
2695 * Update RXCFG for full-duplex or loopback.
2696 */
2697 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2698 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2699 sc->sc_rxcfg |= RXCFG_ATX;
2700 else
2701 sc->sc_rxcfg &= ~RXCFG_ATX;
2702
2703 /*
2704 * Update CFG for MII/GMII.
2705 */
2706 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
2707 cfg = sc->sc_cfg | CFG_MODE_1000;
2708 else
2709 cfg = sc->sc_cfg;
2710
2711 /*
2712 * XXX 802.3x flow control.
2713 */
2714
2715 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
2716 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2717 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2718 }
2719
2720 /*
2721 * sip_dp83820_mii_bitbang_read: [mii bit-bang interface function]
2722 *
2723 * Read the MII serial port for the MII bit-bang module.
2724 */
2725 u_int32_t
2726 SIP_DECL(dp83820_mii_bitbang_read)(struct device *self)
2727 {
2728 struct sip_softc *sc = (void *) self;
2729
2730 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
2731 }
2732
2733 /*
2734 * sip_dp83820_mii_bitbang_write: [mii big-bang interface function]
2735 *
2736 * Write the MII serial port for the MII bit-bang module.
2737 */
2738 void
2739 SIP_DECL(dp83820_mii_bitbang_write)(struct device *self, u_int32_t val)
2740 {
2741 struct sip_softc *sc = (void *) self;
2742
2743 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
2744 }
2745 #else /* ! DP83820 */
2746 /*
2747 * sip_sis900_mii_readreg: [mii interface function]
2748 *
2749 * Read a PHY register on the MII.
2750 */
2751 int
2752 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
2753 {
2754 struct sip_softc *sc = (struct sip_softc *) self;
2755 u_int32_t enphy;
2756
2757 /*
2758 * The SiS 900 has only an internal PHY on the MII. Only allow
2759 * MII address 0.
2760 */
2761 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
2762 return (0);
2763
2764 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2765 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
2766 ENPHY_RWCMD | ENPHY_ACCESS);
2767 do {
2768 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2769 } while (enphy & ENPHY_ACCESS);
2770 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
2771 }
2772
2773 /*
2774 * sip_sis900_mii_writereg: [mii interface function]
2775 *
2776 * Write a PHY register on the MII.
2777 */
2778 void
2779 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
2780 {
2781 struct sip_softc *sc = (struct sip_softc *) self;
2782 u_int32_t enphy;
2783
2784 /*
2785 * The SiS 900 has only an internal PHY on the MII. Only allow
2786 * MII address 0.
2787 */
2788 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
2789 return;
2790
2791 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2792 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
2793 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
2794 do {
2795 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2796 } while (enphy & ENPHY_ACCESS);
2797 }
2798
2799 /*
2800 * sip_sis900_mii_statchg: [mii interface function]
2801 *
2802 * Callback from MII layer when media changes.
2803 */
2804 void
2805 SIP_DECL(sis900_mii_statchg)(struct device *self)
2806 {
2807 struct sip_softc *sc = (struct sip_softc *) self;
2808 u_int32_t flowctl;
2809
2810 /*
2811 * Update TXCFG for full-duplex operation.
2812 */
2813 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2814 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2815 else
2816 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2817
2818 /*
2819 * Update RXCFG for full-duplex or loopback.
2820 */
2821 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2822 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2823 sc->sc_rxcfg |= RXCFG_ATX;
2824 else
2825 sc->sc_rxcfg &= ~RXCFG_ATX;
2826
2827 /*
2828 * Update IMR for use of 802.3x flow control.
2829 */
2830 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
2831 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
2832 flowctl = FLOWCTL_FLOWEN;
2833 } else {
2834 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
2835 flowctl = 0;
2836 }
2837
2838 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2839 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2840 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
2841 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
2842 }
2843
2844 /*
2845 * sip_dp83815_mii_readreg: [mii interface function]
2846 *
2847 * Read a PHY register on the MII.
2848 */
2849 int
2850 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
2851 {
2852 struct sip_softc *sc = (struct sip_softc *) self;
2853 u_int32_t val;
2854
2855 /*
2856 * The DP83815 only has an internal PHY. Only allow
2857 * MII address 0.
2858 */
2859 if (phy != 0)
2860 return (0);
2861
2862 /*
2863 * Apparently, after a reset, the DP83815 can take a while
2864 * to respond. During this recovery period, the BMSR returns
2865 * a value of 0. Catch this -- it's not supposed to happen
2866 * (the BMSR has some hardcoded-to-1 bits), and wait for the
2867 * PHY to come back to life.
2868 *
2869 * This works out because the BMSR is the first register
2870 * read during the PHY probe process.
2871 */
2872 do {
2873 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
2874 } while (reg == MII_BMSR && val == 0);
2875
2876 return (val & 0xffff);
2877 }
2878
2879 /*
2880 * sip_dp83815_mii_writereg: [mii interface function]
2881 *
2882 * Write a PHY register to the MII.
2883 */
2884 void
2885 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
2886 {
2887 struct sip_softc *sc = (struct sip_softc *) self;
2888
2889 /*
2890 * The DP83815 only has an internal PHY. Only allow
2891 * MII address 0.
2892 */
2893 if (phy != 0)
2894 return;
2895
2896 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
2897 }
2898
2899 /*
2900 * sip_dp83815_mii_statchg: [mii interface function]
2901 *
2902 * Callback from MII layer when media changes.
2903 */
2904 void
2905 SIP_DECL(dp83815_mii_statchg)(struct device *self)
2906 {
2907 struct sip_softc *sc = (struct sip_softc *) self;
2908
2909 /*
2910 * Update TXCFG for full-duplex operation.
2911 */
2912 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2913 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2914 else
2915 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2916
2917 /*
2918 * Update RXCFG for full-duplex or loopback.
2919 */
2920 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2921 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2922 sc->sc_rxcfg |= RXCFG_ATX;
2923 else
2924 sc->sc_rxcfg &= ~RXCFG_ATX;
2925
2926 /*
2927 * XXX 802.3x flow control.
2928 */
2929
2930 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2931 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2932 }
2933 #endif /* DP83820 */
2934
2935 #if defined(DP83820)
2936 void
2937 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc, u_int8_t *enaddr)
2938 {
2939 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
2940 u_int8_t cksum, *e, match;
2941 int i;
2942
2943 /*
2944 * EEPROM data format for the DP83820 can be found in
2945 * the DP83820 manual, section 4.2.4.
2946 */
2947
2948 SIP_DECL(read_eeprom)(sc, 0,
2949 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
2950
2951 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
2952 match = ~(match - 1);
2953
2954 cksum = 0x55;
2955 e = (u_int8_t *) eeprom_data;
2956 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
2957 cksum += *e++;
2958
2959 if (cksum != match)
2960 printf("%s: Checksum (%x) mismatch (%x)",
2961 sc->sc_dev.dv_xname, cksum, match);
2962
2963 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
2964 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
2965 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
2966 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
2967 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
2968 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
2969
2970 /* Get the GPIOR bits. */
2971 sc->sc_gpior = eeprom_data[0x04];
2972
2973 /* Get various CFG related bits. */
2974 if ((eeprom_data[0x05] >> 0) & 1)
2975 sc->sc_cfg |= CFG_EXT_125;
2976 if ((eeprom_data[0x05] >> 9) & 1)
2977 sc->sc_cfg |= CFG_TBI_EN;
2978 }
2979 #else /* ! DP83820 */
2980 void
2981 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc, u_int8_t *enaddr)
2982 {
2983 u_int16_t myea[ETHER_ADDR_LEN / 2];
2984
2985 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
2986 sizeof(myea) / sizeof(myea[0]), myea);
2987
2988 enaddr[0] = myea[0] & 0xff;
2989 enaddr[1] = myea[0] >> 8;
2990 enaddr[2] = myea[1] & 0xff;
2991 enaddr[3] = myea[1] >> 8;
2992 enaddr[4] = myea[2] & 0xff;
2993 enaddr[5] = myea[2] >> 8;
2994 }
2995
2996 /* Table and macro to bit-reverse an octet. */
2997 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
2998 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
2999
3000 void
3001 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc, u_int8_t *enaddr)
3002 {
3003 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3004 u_int8_t cksum, *e, match;
3005 int i;
3006
3007 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3008 sizeof(eeprom_data[0]), eeprom_data);
3009
3010 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3011 match = ~(match - 1);
3012
3013 cksum = 0x55;
3014 e = (u_int8_t *) eeprom_data;
3015 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3016 cksum += *e++;
3017 }
3018 if (cksum != match) {
3019 printf("%s: Checksum (%x) mismatch (%x)",
3020 sc->sc_dev.dv_xname, cksum, match);
3021 }
3022
3023 /*
3024 * Unrolled because it makes slightly more sense this way.
3025 * The DP83815 stores the MAC address in bit 0 of word 6
3026 * through bit 15 of word 8.
3027 */
3028 ea = &eeprom_data[6];
3029 enaddr[0] = ((*ea & 0x1) << 7);
3030 ea++;
3031 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3032 enaddr[1] = ((*ea & 0x1FE) >> 1);
3033 enaddr[2] = ((*ea & 0x1) << 7);
3034 ea++;
3035 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3036 enaddr[3] = ((*ea & 0x1FE) >> 1);
3037 enaddr[4] = ((*ea & 0x1) << 7);
3038 ea++;
3039 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3040 enaddr[5] = ((*ea & 0x1FE) >> 1);
3041
3042 /*
3043 * In case that's not weird enough, we also need to reverse
3044 * the bits in each byte. This all actually makes more sense
3045 * if you think about the EEPROM storage as an array of bits
3046 * being shifted into bytes, but that's not how we're looking
3047 * at it here...
3048 */
3049 for (i = 0; i < 6 ;i++)
3050 enaddr[i] = bbr(enaddr[i]);
3051 }
3052 #endif /* DP83820 */
3053
3054 /*
3055 * sip_mediastatus: [ifmedia interface function]
3056 *
3057 * Get the current interface media status.
3058 */
3059 void
3060 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3061 {
3062 struct sip_softc *sc = ifp->if_softc;
3063
3064 mii_pollstat(&sc->sc_mii);
3065 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3066 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3067 }
3068
3069 /*
3070 * sip_mediachange: [ifmedia interface function]
3071 *
3072 * Set hardware to newly-selected media.
3073 */
3074 int
3075 SIP_DECL(mediachange)(struct ifnet *ifp)
3076 {
3077 struct sip_softc *sc = ifp->if_softc;
3078
3079 if (ifp->if_flags & IFF_UP)
3080 mii_mediachg(&sc->sc_mii);
3081 return (0);
3082 }
3083