if_sip.c revision 1.52.4.3 1 /* $NetBSD: if_sip.c,v 1.52.4.3 2002/11/01 18:23:53 tron Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Support the 10-bit interface on the DP83820 (for fiber).
80 *
81 * - Reduce the interrupt load.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.52.4.3 2002/11/01 18:23:53 tron Exp $");
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/callout.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/ioctl.h>
97 #include <sys/errno.h>
98 #include <sys/device.h>
99 #include <sys/queue.h>
100
101 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <machine/bus.h>
113 #include <machine/intr.h>
114 #include <machine/endian.h>
115
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
118 #ifdef DP83820
119 #include <dev/mii/mii_bitbang.h>
120 #endif /* DP83820 */
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_sipreg.h>
127
128 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
129 #define SIP_DECL(x) __CONCAT(gsip_,x)
130 #else /* SiS900 and DP83815 */
131 #define SIP_DECL(x) __CONCAT(sip_,x)
132 #endif
133
134 #define SIP_STR(x) __STRING(SIP_DECL(x))
135
136 /*
137 * Transmit descriptor list size. This is arbitrary, but allocate
138 * enough descriptors for 128 pending transmissions, and 8 segments
139 * per packet. This MUST work out to a power of 2.
140 */
141 #define SIP_NTXSEGS 16
142 #define SIP_NTXSEGS_ALLOC 8
143
144 #define SIP_TXQUEUELEN 256
145 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS_ALLOC)
146 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
147 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
148
149 #if defined(DP83020)
150 #define TX_DMAMAP_SIZE ETHER_MAX_LEN_JUMBO
151 #else
152 #define TX_DMAMAP_SIZE MCLBYTES
153 #endif
154
155 /*
156 * Receive descriptor list size. We have one Rx buffer per incoming
157 * packet, so this logic is a little simpler.
158 *
159 * Actually, on the DP83820, we allow the packet to consume more than
160 * one buffer, in order to support jumbo Ethernet frames. In that
161 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
162 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
163 * so we'd better be quick about handling receive interrupts.
164 */
165 #if defined(DP83820)
166 #define SIP_NRXDESC 256
167 #else
168 #define SIP_NRXDESC 128
169 #endif /* DP83820 */
170 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
171 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
172
173 /*
174 * Control structures are DMA'd to the SiS900 chip. We allocate them in
175 * a single clump that maps to a single DMA segment to make several things
176 * easier.
177 */
178 struct sip_control_data {
179 /*
180 * The transmit descriptors.
181 */
182 struct sip_desc scd_txdescs[SIP_NTXDESC];
183
184 /*
185 * The receive descriptors.
186 */
187 struct sip_desc scd_rxdescs[SIP_NRXDESC];
188 };
189
190 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
191 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
192 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
193
194 /*
195 * Software state for transmit jobs.
196 */
197 struct sip_txsoft {
198 struct mbuf *txs_mbuf; /* head of our mbuf chain */
199 bus_dmamap_t txs_dmamap; /* our DMA map */
200 int txs_firstdesc; /* first descriptor in packet */
201 int txs_lastdesc; /* last descriptor in packet */
202 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
203 };
204
205 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
206
207 /*
208 * Software state for receive jobs.
209 */
210 struct sip_rxsoft {
211 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
212 bus_dmamap_t rxs_dmamap; /* our DMA map */
213 };
214
215 /*
216 * Software state per device.
217 */
218 struct sip_softc {
219 struct device sc_dev; /* generic device information */
220 bus_space_tag_t sc_st; /* bus space tag */
221 bus_space_handle_t sc_sh; /* bus space handle */
222 bus_dma_tag_t sc_dmat; /* bus DMA tag */
223 struct ethercom sc_ethercom; /* ethernet common data */
224 void *sc_sdhook; /* shutdown hook */
225
226 const struct sip_product *sc_model; /* which model are we? */
227 int sc_rev; /* chip revision */
228
229 void *sc_ih; /* interrupt cookie */
230
231 struct mii_data sc_mii; /* MII/media information */
232
233 struct callout sc_tick_ch; /* tick callout */
234
235 bus_dmamap_t sc_cddmamap; /* control data DMA map */
236 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
237
238 /*
239 * Software state for transmit and receive descriptors.
240 */
241 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
242 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
243
244 /*
245 * Control data structures.
246 */
247 struct sip_control_data *sc_control_data;
248 #define sc_txdescs sc_control_data->scd_txdescs
249 #define sc_rxdescs sc_control_data->scd_rxdescs
250
251 #ifdef SIP_EVENT_COUNTERS
252 /*
253 * Event counters.
254 */
255 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
256 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
257 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
258 struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */
259 struct evcnt sc_ev_txiintr; /* Tx idle interrupts */
260 struct evcnt sc_ev_rxintr; /* Rx interrupts */
261 #ifdef DP83820
262 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
263 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
264 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
265 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
266 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
267 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
268 #endif /* DP83820 */
269 #endif /* SIP_EVENT_COUNTERS */
270
271 u_int32_t sc_txcfg; /* prototype TXCFG register */
272 u_int32_t sc_rxcfg; /* prototype RXCFG register */
273 u_int32_t sc_imr; /* prototype IMR register */
274 u_int32_t sc_rfcr; /* prototype RFCR register */
275
276 u_int32_t sc_cfg; /* prototype CFG register */
277
278 #ifdef DP83820
279 u_int32_t sc_gpior; /* prototype GPIOR register */
280 #endif /* DP83820 */
281
282 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
283 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
284
285 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
286
287 int sc_flags; /* misc. flags; see below */
288
289 int sc_txfree; /* number of free Tx descriptors */
290 int sc_txnext; /* next ready Tx descriptor */
291 int sc_txwin; /* Tx descriptors since last intr */
292
293 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
294 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
295
296 int sc_rxptr; /* next ready Rx descriptor/descsoft */
297 #if defined(DP83820)
298 int sc_rxdiscard;
299 int sc_rxlen;
300 struct mbuf *sc_rxhead;
301 struct mbuf *sc_rxtail;
302 struct mbuf **sc_rxtailp;
303 #endif /* DP83820 */
304 };
305
306 /* sc_flags */
307 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
308
309 #ifdef DP83820
310 #define SIP_RXCHAIN_RESET(sc) \
311 do { \
312 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
313 *(sc)->sc_rxtailp = NULL; \
314 (sc)->sc_rxlen = 0; \
315 } while (/*CONSTCOND*/0)
316
317 #define SIP_RXCHAIN_LINK(sc, m) \
318 do { \
319 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
320 (sc)->sc_rxtailp = &(m)->m_next; \
321 } while (/*CONSTCOND*/0)
322 #endif /* DP83820 */
323
324 #ifdef SIP_EVENT_COUNTERS
325 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
326 #else
327 #define SIP_EVCNT_INCR(ev) /* nothing */
328 #endif
329
330 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
331 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
332
333 #define SIP_CDTXSYNC(sc, x, n, ops) \
334 do { \
335 int __x, __n; \
336 \
337 __x = (x); \
338 __n = (n); \
339 \
340 /* If it will wrap around, sync to the end of the ring. */ \
341 if ((__x + __n) > SIP_NTXDESC) { \
342 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
343 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
344 (SIP_NTXDESC - __x), (ops)); \
345 __n -= (SIP_NTXDESC - __x); \
346 __x = 0; \
347 } \
348 \
349 /* Now sync whatever is left. */ \
350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
351 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
352 } while (0)
353
354 #define SIP_CDRXSYNC(sc, x, ops) \
355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
356 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
357
358 #ifdef DP83820
359 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
360 #define SIP_RXBUF_LEN (MCLBYTES - 4)
361 #else
362 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
363 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
364 #endif
365 #define SIP_INIT_RXDESC(sc, x) \
366 do { \
367 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
368 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
369 \
370 __sipd->sipd_link = \
371 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
372 __sipd->sipd_bufptr = \
373 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
374 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
375 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
376 SIP_INIT_RXDESC_EXTSTS \
377 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
378 } while (0)
379
380 #define SIP_CHIP_VERS(sc, v, p, r) \
381 ((sc)->sc_model->sip_vendor == (v) && \
382 (sc)->sc_model->sip_product == (p) && \
383 (sc)->sc_rev == (r))
384
385 #define SIP_CHIP_MODEL(sc, v, p) \
386 ((sc)->sc_model->sip_vendor == (v) && \
387 (sc)->sc_model->sip_product == (p))
388
389 #if !defined(DP83820)
390 #define SIP_SIS900_REV(sc, rev) \
391 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev))
392 #endif
393
394 #define SIP_TIMEOUT 1000
395
396 void SIP_DECL(start)(struct ifnet *);
397 void SIP_DECL(watchdog)(struct ifnet *);
398 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
399 int SIP_DECL(init)(struct ifnet *);
400 void SIP_DECL(stop)(struct ifnet *, int);
401
402 void SIP_DECL(shutdown)(void *);
403
404 void SIP_DECL(reset)(struct sip_softc *);
405 void SIP_DECL(rxdrain)(struct sip_softc *);
406 int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
407 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *);
408 void SIP_DECL(tick)(void *);
409
410 #if !defined(DP83820)
411 void SIP_DECL(sis900_set_filter)(struct sip_softc *);
412 #endif /* ! DP83820 */
413 void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
414
415 #if defined(DP83820)
416 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *,
417 const struct pci_attach_args *, u_int8_t *);
418 #else
419 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *,
420 const struct pci_attach_args *, u_int8_t *);
421 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *,
422 const struct pci_attach_args *, u_int8_t *);
423 #endif /* DP83820 */
424
425 int SIP_DECL(intr)(void *);
426 void SIP_DECL(txintr)(struct sip_softc *);
427 void SIP_DECL(rxintr)(struct sip_softc *);
428
429 #if defined(DP83820)
430 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
431 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
432 void SIP_DECL(dp83820_mii_statchg)(struct device *);
433 #else
434 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
435 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
436 void SIP_DECL(sis900_mii_statchg)(struct device *);
437
438 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
439 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
440 void SIP_DECL(dp83815_mii_statchg)(struct device *);
441 #endif /* DP83820 */
442
443 int SIP_DECL(mediachange)(struct ifnet *);
444 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
445
446 int SIP_DECL(match)(struct device *, struct cfdata *, void *);
447 void SIP_DECL(attach)(struct device *, struct device *, void *);
448
449 int SIP_DECL(copy_small) = 0;
450
451 struct cfattach SIP_DECL(ca) = {
452 sizeof(struct sip_softc), SIP_DECL(match), SIP_DECL(attach),
453 };
454
455 /*
456 * Descriptions of the variants of the SiS900.
457 */
458 struct sip_variant {
459 int (*sipv_mii_readreg)(struct device *, int, int);
460 void (*sipv_mii_writereg)(struct device *, int, int, int);
461 void (*sipv_mii_statchg)(struct device *);
462 void (*sipv_set_filter)(struct sip_softc *);
463 void (*sipv_read_macaddr)(struct sip_softc *,
464 const struct pci_attach_args *, u_int8_t *);
465 };
466
467 #if defined(DP83820)
468 u_int32_t SIP_DECL(dp83820_mii_bitbang_read)(struct device *);
469 void SIP_DECL(dp83820_mii_bitbang_write)(struct device *, u_int32_t);
470
471 const struct mii_bitbang_ops SIP_DECL(dp83820_mii_bitbang_ops) = {
472 SIP_DECL(dp83820_mii_bitbang_read),
473 SIP_DECL(dp83820_mii_bitbang_write),
474 {
475 EROMAR_MDIO, /* MII_BIT_MDO */
476 EROMAR_MDIO, /* MII_BIT_MDI */
477 EROMAR_MDC, /* MII_BIT_MDC */
478 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
479 0, /* MII_BIT_DIR_PHY_HOST */
480 }
481 };
482 #endif /* DP83820 */
483
484 #if defined(DP83820)
485 const struct sip_variant SIP_DECL(variant_dp83820) = {
486 SIP_DECL(dp83820_mii_readreg),
487 SIP_DECL(dp83820_mii_writereg),
488 SIP_DECL(dp83820_mii_statchg),
489 SIP_DECL(dp83815_set_filter),
490 SIP_DECL(dp83820_read_macaddr),
491 };
492 #else
493 const struct sip_variant SIP_DECL(variant_sis900) = {
494 SIP_DECL(sis900_mii_readreg),
495 SIP_DECL(sis900_mii_writereg),
496 SIP_DECL(sis900_mii_statchg),
497 SIP_DECL(sis900_set_filter),
498 SIP_DECL(sis900_read_macaddr),
499 };
500
501 const struct sip_variant SIP_DECL(variant_dp83815) = {
502 SIP_DECL(dp83815_mii_readreg),
503 SIP_DECL(dp83815_mii_writereg),
504 SIP_DECL(dp83815_mii_statchg),
505 SIP_DECL(dp83815_set_filter),
506 SIP_DECL(dp83815_read_macaddr),
507 };
508 #endif /* DP83820 */
509
510 /*
511 * Devices supported by this driver.
512 */
513 const struct sip_product {
514 pci_vendor_id_t sip_vendor;
515 pci_product_id_t sip_product;
516 const char *sip_name;
517 const struct sip_variant *sip_variant;
518 } SIP_DECL(products)[] = {
519 #if defined(DP83820)
520 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
521 "NatSemi DP83820 Gigabit Ethernet",
522 &SIP_DECL(variant_dp83820) },
523 #else
524 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
525 "SiS 900 10/100 Ethernet",
526 &SIP_DECL(variant_sis900) },
527 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
528 "SiS 7016 10/100 Ethernet",
529 &SIP_DECL(variant_sis900) },
530
531 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
532 "NatSemi DP83815 10/100 Ethernet",
533 &SIP_DECL(variant_dp83815) },
534 #endif /* DP83820 */
535
536 { 0, 0,
537 NULL,
538 NULL },
539 };
540
541 static const struct sip_product *
542 SIP_DECL(lookup)(const struct pci_attach_args *pa)
543 {
544 const struct sip_product *sip;
545
546 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
547 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
548 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
549 return (sip);
550 }
551 return (NULL);
552 }
553
554 int
555 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
556 {
557 struct pci_attach_args *pa = aux;
558
559 if (SIP_DECL(lookup)(pa) != NULL)
560 return (1);
561
562 return (0);
563 }
564
565 void
566 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
567 {
568 struct sip_softc *sc = (struct sip_softc *) self;
569 struct pci_attach_args *pa = aux;
570 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
571 pci_chipset_tag_t pc = pa->pa_pc;
572 pci_intr_handle_t ih;
573 const char *intrstr = NULL;
574 bus_space_tag_t iot, memt;
575 bus_space_handle_t ioh, memh;
576 bus_dma_segment_t seg;
577 int ioh_valid, memh_valid;
578 int i, rseg, error;
579 const struct sip_product *sip;
580 pcireg_t pmode;
581 u_int8_t enaddr[ETHER_ADDR_LEN];
582 int pmreg;
583 #ifdef DP83820
584 pcireg_t memtype;
585 u_int32_t reg;
586 #endif /* DP83820 */
587
588 callout_init(&sc->sc_tick_ch);
589
590 sip = SIP_DECL(lookup)(pa);
591 if (sip == NULL) {
592 printf("\n");
593 panic(SIP_STR(attach) ": impossible");
594 }
595 sc->sc_rev = PCI_REVISION(pa->pa_class);
596
597 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev);
598
599 sc->sc_model = sip;
600
601 /*
602 * XXX Work-around broken PXE firmware on some boards.
603 *
604 * The DP83815 shares an address decoder with the MEM BAR
605 * and the ROM BAR. Make sure the ROM BAR is disabled,
606 * so that memory mapped access works.
607 */
608 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM,
609 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) &
610 ~PCI_MAPREG_ROM_ENABLE);
611
612 /*
613 * Map the device.
614 */
615 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
616 PCI_MAPREG_TYPE_IO, 0,
617 &iot, &ioh, NULL, NULL) == 0);
618 #ifdef DP83820
619 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
620 switch (memtype) {
621 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
622 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
623 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
624 memtype, 0, &memt, &memh, NULL, NULL) == 0);
625 break;
626 default:
627 memh_valid = 0;
628 }
629 #else
630 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
631 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
632 &memt, &memh, NULL, NULL) == 0);
633 #endif /* DP83820 */
634
635 if (memh_valid) {
636 sc->sc_st = memt;
637 sc->sc_sh = memh;
638 } else if (ioh_valid) {
639 sc->sc_st = iot;
640 sc->sc_sh = ioh;
641 } else {
642 printf("%s: unable to map device registers\n",
643 sc->sc_dev.dv_xname);
644 return;
645 }
646
647 sc->sc_dmat = pa->pa_dmat;
648
649 /*
650 * Make sure bus mastering is enabled. Also make sure
651 * Write/Invalidate is enabled if we're allowed to use it.
652 */
653 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
654 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY)
655 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE;
656 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
657 pmreg | PCI_COMMAND_MASTER_ENABLE);
658
659 /* Get it out of power save mode if needed. */
660 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
661 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
662 if (pmode == 3) {
663 /*
664 * The card has lost all configuration data in
665 * this state, so punt.
666 */
667 printf("%s: unable to wake up from power state D3\n",
668 sc->sc_dev.dv_xname);
669 return;
670 }
671 if (pmode != 0) {
672 printf("%s: waking up from power state D%d\n",
673 sc->sc_dev.dv_xname, pmode);
674 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
675 }
676 }
677
678 /*
679 * Map and establish our interrupt.
680 */
681 if (pci_intr_map(pa, &ih)) {
682 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
683 return;
684 }
685 intrstr = pci_intr_string(pc, ih);
686 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
687 if (sc->sc_ih == NULL) {
688 printf("%s: unable to establish interrupt",
689 sc->sc_dev.dv_xname);
690 if (intrstr != NULL)
691 printf(" at %s", intrstr);
692 printf("\n");
693 return;
694 }
695 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
696
697 SIMPLEQ_INIT(&sc->sc_txfreeq);
698 SIMPLEQ_INIT(&sc->sc_txdirtyq);
699
700 /*
701 * Allocate the control data structures, and create and load the
702 * DMA map for it.
703 */
704 if ((error = bus_dmamem_alloc(sc->sc_dmat,
705 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
706 0)) != 0) {
707 printf("%s: unable to allocate control data, error = %d\n",
708 sc->sc_dev.dv_xname, error);
709 goto fail_0;
710 }
711
712 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
713 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
714 BUS_DMA_COHERENT)) != 0) {
715 printf("%s: unable to map control data, error = %d\n",
716 sc->sc_dev.dv_xname, error);
717 goto fail_1;
718 }
719
720 if ((error = bus_dmamap_create(sc->sc_dmat,
721 sizeof(struct sip_control_data), 1,
722 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
723 printf("%s: unable to create control data DMA map, "
724 "error = %d\n", sc->sc_dev.dv_xname, error);
725 goto fail_2;
726 }
727
728 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
729 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
730 0)) != 0) {
731 printf("%s: unable to load control data DMA map, error = %d\n",
732 sc->sc_dev.dv_xname, error);
733 goto fail_3;
734 }
735
736 /*
737 * Create the transmit buffer DMA maps.
738 */
739 for (i = 0; i < SIP_TXQUEUELEN; i++) {
740 if ((error = bus_dmamap_create(sc->sc_dmat, TX_DMAMAP_SIZE,
741 SIP_NTXSEGS, MCLBYTES, 0, 0,
742 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
743 printf("%s: unable to create tx DMA map %d, "
744 "error = %d\n", sc->sc_dev.dv_xname, i, error);
745 goto fail_4;
746 }
747 }
748
749 /*
750 * Create the receive buffer DMA maps.
751 */
752 for (i = 0; i < SIP_NRXDESC; i++) {
753 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
754 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
755 printf("%s: unable to create rx DMA map %d, "
756 "error = %d\n", sc->sc_dev.dv_xname, i, error);
757 goto fail_5;
758 }
759 sc->sc_rxsoft[i].rxs_mbuf = NULL;
760 }
761
762 /*
763 * Reset the chip to a known state.
764 */
765 SIP_DECL(reset)(sc);
766
767 /*
768 * Read the Ethernet address from the EEPROM. This might
769 * also fetch other stuff from the EEPROM and stash it
770 * in the softc.
771 */
772 sc->sc_cfg = 0;
773 #if !defined(DP83820)
774 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
775 SIP_SIS900_REV(sc,SIS_REV_900B))
776 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT);
777 #endif
778
779 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
780
781 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
782 ether_sprintf(enaddr));
783
784 /*
785 * Initialize the configuration register: aggressive PCI
786 * bus request algorithm, default backoff, default OW timer,
787 * default parity error detection.
788 *
789 * NOTE: "Big endian mode" is useless on the SiS900 and
790 * friends -- it affects packet data, not descriptors.
791 */
792 #ifdef DP83820
793 /*
794 * XXX Need some PCI flags indicating support for
795 * XXX 64-bit addressing.
796 */
797 sc->sc_cfg &= ~(CFG_M64ADDR | CFG_T64ADDR);
798
799 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
800 if (reg & CFG_PCI64_DET) {
801 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
802 if ((sc->sc_cfg & CFG_DATA64_EN) == 0)
803 printf("%s: 64-bit data transfers disabled in EEPROM\n",
804 sc->sc_dev.dv_xname);
805 } else
806 sc->sc_cfg &= ~CFG_DATA64_EN;
807
808 if (sc->sc_cfg & (CFG_TBI_EN|CFG_EXT_125)) {
809 const char *sep = "";
810 printf("%s: using ", sc->sc_dev.dv_xname);
811 if (sc->sc_cfg & CFG_EXT_125) {
812 printf("%s125MHz clock", sep);
813 sep = ", ";
814 }
815 if (sc->sc_cfg & CFG_TBI_EN) {
816 printf("%sten-bit interface", sep);
817 sep = ", ";
818 }
819 printf("\n");
820 }
821 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0)
822 sc->sc_cfg |= CFG_MRM_DIS;
823 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
824 sc->sc_cfg |= CFG_MWI_DIS;
825
826 /*
827 * Use the extended descriptor format on the DP83820. This
828 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
829 * checksumming.
830 */
831 sc->sc_cfg |= CFG_EXTSTS_EN;
832 #endif /* DP83820 */
833
834 /*
835 * Initialize our media structures and probe the MII.
836 */
837 sc->sc_mii.mii_ifp = ifp;
838 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
839 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
840 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
841 ifmedia_init(&sc->sc_mii.mii_media, 0, SIP_DECL(mediachange),
842 SIP_DECL(mediastatus));
843 #ifdef DP83820
844 if (sc->sc_cfg & CFG_TBI_EN) {
845 /* Using ten-bit interface. */
846 printf("%s: TBI -- FIXME\n", sc->sc_dev.dv_xname);
847 } else {
848 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
849 MII_OFFSET_ANY, 0);
850 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
851 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
852 0, NULL);
853 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
854 } else
855 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
856 }
857 #else
858 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
859 MII_OFFSET_ANY, 0);
860 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
861 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
862 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
863 } else
864 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
865 #endif /* DP83820 */
866
867 ifp = &sc->sc_ethercom.ec_if;
868 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
869 ifp->if_softc = sc;
870 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
871 ifp->if_ioctl = SIP_DECL(ioctl);
872 ifp->if_start = SIP_DECL(start);
873 ifp->if_watchdog = SIP_DECL(watchdog);
874 ifp->if_init = SIP_DECL(init);
875 ifp->if_stop = SIP_DECL(stop);
876 IFQ_SET_READY(&ifp->if_snd);
877
878 /*
879 * We can support 802.1Q VLAN-sized frames.
880 */
881 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
882
883 #ifdef DP83820
884 /*
885 * And the DP83820 can do VLAN tagging in hardware, and
886 * support the jumbo Ethernet MTU.
887 */
888 sc->sc_ethercom.ec_capabilities |=
889 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
890
891 /*
892 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
893 * in hardware.
894 */
895 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
896 IFCAP_CSUM_UDPv4;
897 #endif /* DP83820 */
898
899 /*
900 * Attach the interface.
901 */
902 if_attach(ifp);
903 ether_ifattach(ifp, enaddr);
904
905 /*
906 * The number of bytes that must be available in
907 * the Tx FIFO before the bus master can DMA more
908 * data into the FIFO.
909 */
910 sc->sc_tx_fill_thresh = 64 / 32;
911
912 /*
913 * Start at a drain threshold of 512 bytes. We will
914 * increase it if a DMA underrun occurs.
915 *
916 * XXX The minimum value of this variable should be
917 * tuned. We may be able to improve performance
918 * by starting with a lower value. That, however,
919 * may trash the first few outgoing packets if the
920 * PCI bus is saturated.
921 */
922 sc->sc_tx_drain_thresh = 1504 / 32;
923
924 /*
925 * Initialize the Rx FIFO drain threshold.
926 *
927 * This is in units of 8 bytes.
928 *
929 * We should never set this value lower than 2; 14 bytes are
930 * required to filter the packet.
931 */
932 sc->sc_rx_drain_thresh = 128 / 8;
933
934 #ifdef SIP_EVENT_COUNTERS
935 /*
936 * Attach event counters.
937 */
938 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
939 NULL, sc->sc_dev.dv_xname, "txsstall");
940 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
941 NULL, sc->sc_dev.dv_xname, "txdstall");
942 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR,
943 NULL, sc->sc_dev.dv_xname, "txforceintr");
944 evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR,
945 NULL, sc->sc_dev.dv_xname, "txdintr");
946 evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR,
947 NULL, sc->sc_dev.dv_xname, "txiintr");
948 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
949 NULL, sc->sc_dev.dv_xname, "rxintr");
950 #ifdef DP83820
951 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
952 NULL, sc->sc_dev.dv_xname, "rxipsum");
953 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
954 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
955 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
956 NULL, sc->sc_dev.dv_xname, "rxudpsum");
957 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
958 NULL, sc->sc_dev.dv_xname, "txipsum");
959 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
960 NULL, sc->sc_dev.dv_xname, "txtcpsum");
961 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
962 NULL, sc->sc_dev.dv_xname, "txudpsum");
963 #endif /* DP83820 */
964 #endif /* SIP_EVENT_COUNTERS */
965
966 /*
967 * Make sure the interface is shutdown during reboot.
968 */
969 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
970 if (sc->sc_sdhook == NULL)
971 printf("%s: WARNING: unable to establish shutdown hook\n",
972 sc->sc_dev.dv_xname);
973 return;
974
975 /*
976 * Free any resources we've allocated during the failed attach
977 * attempt. Do this in reverse order and fall through.
978 */
979 fail_5:
980 for (i = 0; i < SIP_NRXDESC; i++) {
981 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
982 bus_dmamap_destroy(sc->sc_dmat,
983 sc->sc_rxsoft[i].rxs_dmamap);
984 }
985 fail_4:
986 for (i = 0; i < SIP_TXQUEUELEN; i++) {
987 if (sc->sc_txsoft[i].txs_dmamap != NULL)
988 bus_dmamap_destroy(sc->sc_dmat,
989 sc->sc_txsoft[i].txs_dmamap);
990 }
991 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
992 fail_3:
993 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
994 fail_2:
995 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
996 sizeof(struct sip_control_data));
997 fail_1:
998 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
999 fail_0:
1000 return;
1001 }
1002
1003 /*
1004 * sip_shutdown:
1005 *
1006 * Make sure the interface is stopped at reboot time.
1007 */
1008 void
1009 SIP_DECL(shutdown)(void *arg)
1010 {
1011 struct sip_softc *sc = arg;
1012
1013 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
1014 }
1015
1016 /*
1017 * sip_start: [ifnet interface function]
1018 *
1019 * Start packet transmission on the interface.
1020 */
1021 void
1022 SIP_DECL(start)(struct ifnet *ifp)
1023 {
1024 struct sip_softc *sc = ifp->if_softc;
1025 struct mbuf *m0, *m;
1026 struct sip_txsoft *txs;
1027 bus_dmamap_t dmamap;
1028 int error, firsttx, nexttx, lasttx, ofree, seg;
1029 #ifdef DP83820
1030 u_int32_t extsts;
1031 #endif
1032
1033 /*
1034 * If we've been told to pause, don't transmit any more packets.
1035 */
1036 if (sc->sc_flags & SIPF_PAUSED)
1037 ifp->if_flags |= IFF_OACTIVE;
1038
1039 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1040 return;
1041
1042 /*
1043 * Remember the previous number of free descriptors and
1044 * the first descriptor we'll use.
1045 */
1046 ofree = sc->sc_txfree;
1047 firsttx = sc->sc_txnext;
1048
1049 /*
1050 * Loop through the send queue, setting up transmit descriptors
1051 * until we drain the queue, or use up all available transmit
1052 * descriptors.
1053 */
1054 for (;;) {
1055 /* Get a work queue entry. */
1056 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1057 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
1058 break;
1059 }
1060
1061 /*
1062 * Grab a packet off the queue.
1063 */
1064 IFQ_POLL(&ifp->if_snd, m0);
1065 if (m0 == NULL)
1066 break;
1067 #ifndef DP83820
1068 m = NULL;
1069 #endif
1070
1071 dmamap = txs->txs_dmamap;
1072
1073 #ifdef DP83820
1074 /*
1075 * Load the DMA map. If this fails, the packet either
1076 * didn't fit in the allotted number of segments, or we
1077 * were short on resources. For the too-many-segments
1078 * case, we simply report an error and drop the packet,
1079 * since we can't sanely copy a jumbo packet to a single
1080 * buffer.
1081 */
1082 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1083 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1084 if (error) {
1085 if (error == EFBIG) {
1086 printf("%s: Tx packet consumes too many "
1087 "DMA segments, dropping...\n",
1088 sc->sc_dev.dv_xname);
1089 IFQ_DEQUEUE(&ifp->if_snd, m0);
1090 m_freem(m0);
1091 continue;
1092 }
1093 /*
1094 * Short on resources, just stop for now.
1095 */
1096 break;
1097 }
1098 #else /* DP83820 */
1099 /*
1100 * Load the DMA map. If this fails, the packet either
1101 * didn't fit in the alloted number of segments, or we
1102 * were short on resources. In this case, we'll copy
1103 * and try again.
1104 */
1105 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1106 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1107 MGETHDR(m, M_DONTWAIT, MT_DATA);
1108 if (m == NULL) {
1109 printf("%s: unable to allocate Tx mbuf\n",
1110 sc->sc_dev.dv_xname);
1111 break;
1112 }
1113 if (m0->m_pkthdr.len > MHLEN) {
1114 MCLGET(m, M_DONTWAIT);
1115 if ((m->m_flags & M_EXT) == 0) {
1116 printf("%s: unable to allocate Tx "
1117 "cluster\n", sc->sc_dev.dv_xname);
1118 m_freem(m);
1119 break;
1120 }
1121 }
1122 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1123 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1124 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1125 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1126 if (error) {
1127 printf("%s: unable to load Tx buffer, "
1128 "error = %d\n", sc->sc_dev.dv_xname, error);
1129 break;
1130 }
1131 }
1132 #endif /* DP83820 */
1133
1134 /*
1135 * Ensure we have enough descriptors free to describe
1136 * the packet. Note, we always reserve one descriptor
1137 * at the end of the ring as a termination point, to
1138 * prevent wrap-around.
1139 */
1140 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1141 /*
1142 * Not enough free descriptors to transmit this
1143 * packet. We haven't committed anything yet,
1144 * so just unload the DMA map, put the packet
1145 * back on the queue, and punt. Notify the upper
1146 * layer that there are not more slots left.
1147 *
1148 * XXX We could allocate an mbuf and copy, but
1149 * XXX is it worth it?
1150 */
1151 ifp->if_flags |= IFF_OACTIVE;
1152 bus_dmamap_unload(sc->sc_dmat, dmamap);
1153 #ifndef DP83820
1154 if (m != NULL)
1155 m_freem(m);
1156 #endif
1157 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1158 break;
1159 }
1160
1161 IFQ_DEQUEUE(&ifp->if_snd, m0);
1162 #ifndef DP83820
1163 if (m != NULL) {
1164 m_freem(m0);
1165 m0 = m;
1166 }
1167 #endif
1168
1169 /*
1170 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1171 */
1172
1173 /* Sync the DMA map. */
1174 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1175 BUS_DMASYNC_PREWRITE);
1176
1177 /*
1178 * Initialize the transmit descriptors.
1179 */
1180 for (nexttx = sc->sc_txnext, seg = 0;
1181 seg < dmamap->dm_nsegs;
1182 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1183 /*
1184 * If this is the first descriptor we're
1185 * enqueueing, don't set the OWN bit just
1186 * yet. That could cause a race condition.
1187 * We'll do it below.
1188 */
1189 sc->sc_txdescs[nexttx].sipd_bufptr =
1190 htole32(dmamap->dm_segs[seg].ds_addr);
1191 sc->sc_txdescs[nexttx].sipd_cmdsts =
1192 htole32((nexttx == firsttx ? 0 : CMDSTS_OWN) |
1193 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1194 #ifdef DP83820
1195 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1196 #endif /* DP83820 */
1197 lasttx = nexttx;
1198 }
1199
1200 /* Clear the MORE bit on the last segment. */
1201 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1202
1203 /*
1204 * If we're in the interrupt delay window, delay the
1205 * interrupt.
1206 */
1207 if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) {
1208 SIP_EVCNT_INCR(&sc->sc_ev_txforceintr);
1209 sc->sc_txdescs[lasttx].sipd_cmdsts |=
1210 htole32(CMDSTS_INTR);
1211 sc->sc_txwin = 0;
1212 }
1213
1214 #ifdef DP83820
1215 /*
1216 * If VLANs are enabled and the packet has a VLAN tag, set
1217 * up the descriptor to encapsulate the packet for us.
1218 *
1219 * This apparently has to be on the last descriptor of
1220 * the packet.
1221 */
1222 if (sc->sc_ethercom.ec_nvlans != 0 &&
1223 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1224 sc->sc_txdescs[lasttx].sipd_extsts |=
1225 htole32(EXTSTS_VPKT |
1226 htons(*mtod(m, int *) & EXTSTS_VTCI));
1227 }
1228
1229 /*
1230 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1231 * checksumming, set up the descriptor to do this work
1232 * for us.
1233 *
1234 * This apparently has to be on the first descriptor of
1235 * the packet.
1236 *
1237 * Byte-swap constants so the compiler can optimize.
1238 */
1239 extsts = 0;
1240 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1241 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1242 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1243 extsts |= htole32(EXTSTS_IPPKT);
1244 }
1245 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1246 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1247 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1248 extsts |= htole32(EXTSTS_TCPPKT);
1249 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1250 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1251 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1252 extsts |= htole32(EXTSTS_UDPPKT);
1253 }
1254 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1255 #endif /* DP83820 */
1256
1257 /* Sync the descriptors we're using. */
1258 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1259 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1260
1261 /*
1262 * Store a pointer to the packet so we can free it later,
1263 * and remember what txdirty will be once the packet is
1264 * done.
1265 */
1266 txs->txs_mbuf = m0;
1267 txs->txs_firstdesc = sc->sc_txnext;
1268 txs->txs_lastdesc = lasttx;
1269
1270 /* Advance the tx pointer. */
1271 sc->sc_txfree -= dmamap->dm_nsegs;
1272 sc->sc_txnext = nexttx;
1273
1274 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q);
1275 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1276
1277 #if NBPFILTER > 0
1278 /*
1279 * Pass the packet to any BPF listeners.
1280 */
1281 if (ifp->if_bpf)
1282 bpf_mtap(ifp->if_bpf, m0);
1283 #endif /* NBPFILTER > 0 */
1284 }
1285
1286 if (txs == NULL || sc->sc_txfree == 0) {
1287 /* No more slots left; notify upper layer. */
1288 ifp->if_flags |= IFF_OACTIVE;
1289 }
1290
1291 if (sc->sc_txfree != ofree) {
1292 /*
1293 * The entire packet chain is set up. Give the
1294 * first descrptor to the chip now.
1295 */
1296 sc->sc_txdescs[firsttx].sipd_cmdsts |= htole32(CMDSTS_OWN);
1297 SIP_CDTXSYNC(sc, firsttx, 1,
1298 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1299
1300 /*
1301 * Start the transmit process. Note, the manual says
1302 * that if there are no pending transmissions in the
1303 * chip's internal queue (indicated by TXE being clear),
1304 * then the driver software must set the TXDP to the
1305 * first descriptor to be transmitted. However, if we
1306 * do this, it causes serious performance degredation on
1307 * the DP83820 under load, not setting TXDP doesn't seem
1308 * to adversely affect the SiS 900 or DP83815.
1309 *
1310 * Well, I guess it wouldn't be the first time a manual
1311 * has lied -- and they could be speaking of the NULL-
1312 * terminated descriptor list case, rather than OWN-
1313 * terminated rings.
1314 */
1315 #if 0
1316 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1317 CR_TXE) == 0) {
1318 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1319 SIP_CDTXADDR(sc, firsttx));
1320 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1321 }
1322 #else
1323 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1324 #endif
1325
1326 /* Set a watchdog timer in case the chip flakes out. */
1327 ifp->if_timer = 5;
1328 }
1329 }
1330
1331 /*
1332 * sip_watchdog: [ifnet interface function]
1333 *
1334 * Watchdog timer handler.
1335 */
1336 void
1337 SIP_DECL(watchdog)(struct ifnet *ifp)
1338 {
1339 struct sip_softc *sc = ifp->if_softc;
1340
1341 /*
1342 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1343 * If we get a timeout, try and sweep up transmit descriptors.
1344 * If we manage to sweep them all up, ignore the lack of
1345 * interrupt.
1346 */
1347 SIP_DECL(txintr)(sc);
1348
1349 if (sc->sc_txfree != SIP_NTXDESC) {
1350 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1351 ifp->if_oerrors++;
1352
1353 /* Reset the interface. */
1354 (void) SIP_DECL(init)(ifp);
1355 } else if (ifp->if_flags & IFF_DEBUG)
1356 printf("%s: recovered from device timeout\n",
1357 sc->sc_dev.dv_xname);
1358
1359 /* Try to get more packets going. */
1360 SIP_DECL(start)(ifp);
1361 }
1362
1363 /*
1364 * sip_ioctl: [ifnet interface function]
1365 *
1366 * Handle control requests from the operator.
1367 */
1368 int
1369 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1370 {
1371 struct sip_softc *sc = ifp->if_softc;
1372 struct ifreq *ifr = (struct ifreq *)data;
1373 int s, error;
1374
1375 s = splnet();
1376
1377 switch (cmd) {
1378 case SIOCSIFMEDIA:
1379 case SIOCGIFMEDIA:
1380 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1381 break;
1382
1383 default:
1384 error = ether_ioctl(ifp, cmd, data);
1385 if (error == ENETRESET) {
1386 /*
1387 * Multicast list has changed; set the hardware filter
1388 * accordingly.
1389 */
1390 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1391 error = 0;
1392 }
1393 break;
1394 }
1395
1396 /* Try to get more packets going. */
1397 SIP_DECL(start)(ifp);
1398
1399 splx(s);
1400 return (error);
1401 }
1402
1403 /*
1404 * sip_intr:
1405 *
1406 * Interrupt service routine.
1407 */
1408 int
1409 SIP_DECL(intr)(void *arg)
1410 {
1411 struct sip_softc *sc = arg;
1412 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1413 u_int32_t isr;
1414 int handled = 0;
1415
1416 for (;;) {
1417 /* Reading clears interrupt. */
1418 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1419 if ((isr & sc->sc_imr) == 0)
1420 break;
1421
1422 handled = 1;
1423
1424 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1425 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1426
1427 /* Grab any new packets. */
1428 SIP_DECL(rxintr)(sc);
1429
1430 if (isr & ISR_RXORN) {
1431 printf("%s: receive FIFO overrun\n",
1432 sc->sc_dev.dv_xname);
1433
1434 /* XXX adjust rx_drain_thresh? */
1435 }
1436
1437 if (isr & ISR_RXIDLE) {
1438 printf("%s: receive ring overrun\n",
1439 sc->sc_dev.dv_xname);
1440
1441 /* Get the receive process going again. */
1442 bus_space_write_4(sc->sc_st, sc->sc_sh,
1443 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1444 bus_space_write_4(sc->sc_st, sc->sc_sh,
1445 SIP_CR, CR_RXE);
1446 }
1447 }
1448
1449 if (isr & (ISR_TXURN|ISR_TXDESC|ISR_TXIDLE)) {
1450 #ifdef SIP_EVENT_COUNTERS
1451 if (isr & ISR_TXDESC)
1452 SIP_EVCNT_INCR(&sc->sc_ev_txdintr);
1453 else if (isr & ISR_TXIDLE)
1454 SIP_EVCNT_INCR(&sc->sc_ev_txiintr);
1455 #endif
1456
1457 /* Sweep up transmit descriptors. */
1458 SIP_DECL(txintr)(sc);
1459
1460 if (isr & ISR_TXURN) {
1461 u_int32_t thresh;
1462
1463 printf("%s: transmit FIFO underrun",
1464 sc->sc_dev.dv_xname);
1465
1466 thresh = sc->sc_tx_drain_thresh + 1;
1467 if (thresh <= TXCFG_DRTH &&
1468 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1469 (sc->sc_tx_fill_thresh * 32))) {
1470 printf("; increasing Tx drain "
1471 "threshold to %u bytes\n",
1472 thresh * 32);
1473 sc->sc_tx_drain_thresh = thresh;
1474 (void) SIP_DECL(init)(ifp);
1475 } else {
1476 (void) SIP_DECL(init)(ifp);
1477 printf("\n");
1478 }
1479 }
1480 }
1481
1482 #if !defined(DP83820)
1483 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1484 if (isr & ISR_PAUSE_ST) {
1485 sc->sc_flags |= SIPF_PAUSED;
1486 ifp->if_flags |= IFF_OACTIVE;
1487 }
1488 if (isr & ISR_PAUSE_END) {
1489 sc->sc_flags &= ~SIPF_PAUSED;
1490 ifp->if_flags &= ~IFF_OACTIVE;
1491 }
1492 }
1493 #endif /* ! DP83820 */
1494
1495 if (isr & ISR_HIBERR) {
1496 #define PRINTERR(bit, str) \
1497 if (isr & (bit)) \
1498 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1499 PRINTERR(ISR_DPERR, "parity error");
1500 PRINTERR(ISR_SSERR, "system error");
1501 PRINTERR(ISR_RMABT, "master abort");
1502 PRINTERR(ISR_RTABT, "target abort");
1503 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1504 (void) SIP_DECL(init)(ifp);
1505 #undef PRINTERR
1506 }
1507 }
1508
1509 /* Try to get more packets going. */
1510 SIP_DECL(start)(ifp);
1511
1512 return (handled);
1513 }
1514
1515 /*
1516 * sip_txintr:
1517 *
1518 * Helper; handle transmit interrupts.
1519 */
1520 void
1521 SIP_DECL(txintr)(struct sip_softc *sc)
1522 {
1523 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1524 struct sip_txsoft *txs;
1525 u_int32_t cmdsts;
1526
1527 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1528 ifp->if_flags &= ~IFF_OACTIVE;
1529
1530 /*
1531 * Go through our Tx list and free mbufs for those
1532 * frames which have been transmitted.
1533 */
1534 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1535 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1536 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1537
1538 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1539 if (cmdsts & CMDSTS_OWN)
1540 break;
1541
1542 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1543
1544 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1545
1546 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1547 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1548 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1549 m_freem(txs->txs_mbuf);
1550 txs->txs_mbuf = NULL;
1551
1552 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1553
1554 /*
1555 * Check for errors and collisions.
1556 */
1557 if (cmdsts &
1558 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1559 ifp->if_oerrors++;
1560 if (cmdsts & CMDSTS_Tx_EC)
1561 ifp->if_collisions += 16;
1562 if (ifp->if_flags & IFF_DEBUG) {
1563 if (cmdsts & CMDSTS_Tx_ED)
1564 printf("%s: excessive deferral\n",
1565 sc->sc_dev.dv_xname);
1566 if (cmdsts & CMDSTS_Tx_EC)
1567 printf("%s: excessive collisions\n",
1568 sc->sc_dev.dv_xname);
1569 }
1570 } else {
1571 /* Packet was transmitted successfully. */
1572 ifp->if_opackets++;
1573 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1574 }
1575 }
1576
1577 /*
1578 * If there are no more pending transmissions, cancel the watchdog
1579 * timer.
1580 */
1581 if (txs == NULL) {
1582 ifp->if_timer = 0;
1583 sc->sc_txwin = 0;
1584 }
1585 }
1586
1587 #if defined(DP83820)
1588 /*
1589 * sip_rxintr:
1590 *
1591 * Helper; handle receive interrupts.
1592 */
1593 void
1594 SIP_DECL(rxintr)(struct sip_softc *sc)
1595 {
1596 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1597 struct sip_rxsoft *rxs;
1598 struct mbuf *m, *tailm;
1599 u_int32_t cmdsts, extsts;
1600 int i, len;
1601
1602 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1603 rxs = &sc->sc_rxsoft[i];
1604
1605 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1606
1607 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1608 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1609
1610 /*
1611 * NOTE: OWN is set if owned by _consumer_. We're the
1612 * consumer of the receive ring, so if the bit is clear,
1613 * we have processed all of the packets.
1614 */
1615 if ((cmdsts & CMDSTS_OWN) == 0) {
1616 /*
1617 * We have processed all of the receive buffers.
1618 */
1619 break;
1620 }
1621
1622 if (__predict_false(sc->sc_rxdiscard)) {
1623 SIP_INIT_RXDESC(sc, i);
1624 if ((cmdsts & CMDSTS_MORE) == 0) {
1625 /* Reset our state. */
1626 sc->sc_rxdiscard = 0;
1627 }
1628 continue;
1629 }
1630
1631 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1632 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1633
1634 m = rxs->rxs_mbuf;
1635
1636 /*
1637 * Add a new receive buffer to the ring.
1638 */
1639 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1640 /*
1641 * Failed, throw away what we've done so
1642 * far, and discard the rest of the packet.
1643 */
1644 ifp->if_ierrors++;
1645 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1646 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1647 SIP_INIT_RXDESC(sc, i);
1648 if (cmdsts & CMDSTS_MORE)
1649 sc->sc_rxdiscard = 1;
1650 if (sc->sc_rxhead != NULL)
1651 m_freem(sc->sc_rxhead);
1652 SIP_RXCHAIN_RESET(sc);
1653 continue;
1654 }
1655
1656 SIP_RXCHAIN_LINK(sc, m);
1657
1658 /*
1659 * If this is not the end of the packet, keep
1660 * looking.
1661 */
1662 if (cmdsts & CMDSTS_MORE) {
1663 sc->sc_rxlen += m->m_len;
1664 continue;
1665 }
1666
1667 /*
1668 * Okay, we have the entire packet now...
1669 */
1670 *sc->sc_rxtailp = NULL;
1671 m = sc->sc_rxhead;
1672 tailm = sc->sc_rxtail;
1673
1674 SIP_RXCHAIN_RESET(sc);
1675
1676 /*
1677 * If an error occurred, update stats and drop the packet.
1678 */
1679 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1680 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1681 ifp->if_ierrors++;
1682 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1683 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1684 /* Receive overrun handled elsewhere. */
1685 printf("%s: receive descriptor error\n",
1686 sc->sc_dev.dv_xname);
1687 }
1688 #define PRINTERR(bit, str) \
1689 if (cmdsts & (bit)) \
1690 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1691 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1692 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1693 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1694 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1695 #undef PRINTERR
1696 m_freem(m);
1697 continue;
1698 }
1699
1700 /*
1701 * No errors.
1702 *
1703 * Note, the DP83820 includes the CRC with
1704 * every packet.
1705 */
1706 len = CMDSTS_SIZE(cmdsts);
1707 tailm->m_len = len - sc->sc_rxlen;
1708
1709 /*
1710 * If the packet is small enough to fit in a
1711 * single header mbuf, allocate one and copy
1712 * the data into it. This greatly reduces
1713 * memory consumption when we receive lots
1714 * of small packets.
1715 */
1716 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1717 struct mbuf *nm;
1718 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1719 if (nm == NULL) {
1720 ifp->if_ierrors++;
1721 m_freem(m);
1722 continue;
1723 }
1724 nm->m_data += 2;
1725 nm->m_pkthdr.len = nm->m_len = len;
1726 m_copydata(m, 0, len, mtod(nm, caddr_t));
1727 m_freem(m);
1728 m = nm;
1729 }
1730 #ifndef __NO_STRICT_ALIGNMENT
1731 else {
1732 /*
1733 * The DP83820's receive buffers must be 4-byte
1734 * aligned. But this means that the data after
1735 * the Ethernet header is misaligned. To compensate,
1736 * we have artificially shortened the buffer size
1737 * in the descriptor, and we do an overlapping copy
1738 * of the data two bytes further in (in the first
1739 * buffer of the chain only).
1740 */
1741 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1742 m->m_len);
1743 m->m_data += 2;
1744 }
1745 #endif /* ! __NO_STRICT_ALIGNMENT */
1746
1747 /*
1748 * If VLANs are enabled, VLAN packets have been unwrapped
1749 * for us. Associate the tag with the packet.
1750 */
1751 if (sc->sc_ethercom.ec_nvlans != 0 &&
1752 (extsts & EXTSTS_VPKT) != 0) {
1753 struct mbuf *vtag;
1754
1755 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1756 if (vtag == NULL) {
1757 ifp->if_ierrors++;
1758 printf("%s: unable to allocate VLAN tag\n",
1759 sc->sc_dev.dv_xname);
1760 m_freem(m);
1761 continue;
1762 }
1763
1764 *mtod(vtag, int *) = ntohs(extsts & EXTSTS_VTCI);
1765 vtag->m_len = sizeof(int);
1766 }
1767
1768 /*
1769 * Set the incoming checksum information for the
1770 * packet.
1771 */
1772 if ((extsts & EXTSTS_IPPKT) != 0) {
1773 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1774 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1775 if (extsts & EXTSTS_Rx_IPERR)
1776 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1777 if (extsts & EXTSTS_TCPPKT) {
1778 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1779 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1780 if (extsts & EXTSTS_Rx_TCPERR)
1781 m->m_pkthdr.csum_flags |=
1782 M_CSUM_TCP_UDP_BAD;
1783 } else if (extsts & EXTSTS_UDPPKT) {
1784 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1785 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1786 if (extsts & EXTSTS_Rx_UDPERR)
1787 m->m_pkthdr.csum_flags |=
1788 M_CSUM_TCP_UDP_BAD;
1789 }
1790 }
1791
1792 ifp->if_ipackets++;
1793 m->m_flags |= M_HASFCS;
1794 m->m_pkthdr.rcvif = ifp;
1795 m->m_pkthdr.len = len;
1796
1797 #if NBPFILTER > 0
1798 /*
1799 * Pass this up to any BPF listeners, but only
1800 * pass if up the stack if it's for us.
1801 */
1802 if (ifp->if_bpf)
1803 bpf_mtap(ifp->if_bpf, m);
1804 #endif /* NBPFILTER > 0 */
1805
1806 /* Pass it on. */
1807 (*ifp->if_input)(ifp, m);
1808 }
1809
1810 /* Update the receive pointer. */
1811 sc->sc_rxptr = i;
1812 }
1813 #else /* ! DP83820 */
1814 /*
1815 * sip_rxintr:
1816 *
1817 * Helper; handle receive interrupts.
1818 */
1819 void
1820 SIP_DECL(rxintr)(struct sip_softc *sc)
1821 {
1822 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1823 struct sip_rxsoft *rxs;
1824 struct mbuf *m;
1825 u_int32_t cmdsts;
1826 int i, len;
1827
1828 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1829 rxs = &sc->sc_rxsoft[i];
1830
1831 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1832
1833 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1834
1835 /*
1836 * NOTE: OWN is set if owned by _consumer_. We're the
1837 * consumer of the receive ring, so if the bit is clear,
1838 * we have processed all of the packets.
1839 */
1840 if ((cmdsts & CMDSTS_OWN) == 0) {
1841 /*
1842 * We have processed all of the receive buffers.
1843 */
1844 break;
1845 }
1846
1847 /*
1848 * If any collisions were seen on the wire, count one.
1849 */
1850 if (cmdsts & CMDSTS_Rx_COL)
1851 ifp->if_collisions++;
1852
1853 /*
1854 * If an error occurred, update stats, clear the status
1855 * word, and leave the packet buffer in place. It will
1856 * simply be reused the next time the ring comes around.
1857 */
1858 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1859 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1860 ifp->if_ierrors++;
1861 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1862 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1863 /* Receive overrun handled elsewhere. */
1864 printf("%s: receive descriptor error\n",
1865 sc->sc_dev.dv_xname);
1866 }
1867 #define PRINTERR(bit, str) \
1868 if (cmdsts & (bit)) \
1869 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1870 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1871 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1872 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1873 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1874 #undef PRINTERR
1875 SIP_INIT_RXDESC(sc, i);
1876 continue;
1877 }
1878
1879 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1880 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1881
1882 /*
1883 * No errors; receive the packet. Note, the SiS 900
1884 * includes the CRC with every packet.
1885 */
1886 len = CMDSTS_SIZE(cmdsts);
1887
1888 #ifdef __NO_STRICT_ALIGNMENT
1889 /*
1890 * If the packet is small enough to fit in a
1891 * single header mbuf, allocate one and copy
1892 * the data into it. This greatly reduces
1893 * memory consumption when we receive lots
1894 * of small packets.
1895 *
1896 * Otherwise, we add a new buffer to the receive
1897 * chain. If this fails, we drop the packet and
1898 * recycle the old buffer.
1899 */
1900 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
1901 MGETHDR(m, M_DONTWAIT, MT_DATA);
1902 if (m == NULL)
1903 goto dropit;
1904 memcpy(mtod(m, caddr_t),
1905 mtod(rxs->rxs_mbuf, caddr_t), len);
1906 SIP_INIT_RXDESC(sc, i);
1907 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1908 rxs->rxs_dmamap->dm_mapsize,
1909 BUS_DMASYNC_PREREAD);
1910 } else {
1911 m = rxs->rxs_mbuf;
1912 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1913 dropit:
1914 ifp->if_ierrors++;
1915 SIP_INIT_RXDESC(sc, i);
1916 bus_dmamap_sync(sc->sc_dmat,
1917 rxs->rxs_dmamap, 0,
1918 rxs->rxs_dmamap->dm_mapsize,
1919 BUS_DMASYNC_PREREAD);
1920 continue;
1921 }
1922 }
1923 #else
1924 /*
1925 * The SiS 900's receive buffers must be 4-byte aligned.
1926 * But this means that the data after the Ethernet header
1927 * is misaligned. We must allocate a new buffer and
1928 * copy the data, shifted forward 2 bytes.
1929 */
1930 MGETHDR(m, M_DONTWAIT, MT_DATA);
1931 if (m == NULL) {
1932 dropit:
1933 ifp->if_ierrors++;
1934 SIP_INIT_RXDESC(sc, i);
1935 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1936 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1937 continue;
1938 }
1939 if (len > (MHLEN - 2)) {
1940 MCLGET(m, M_DONTWAIT);
1941 if ((m->m_flags & M_EXT) == 0) {
1942 m_freem(m);
1943 goto dropit;
1944 }
1945 }
1946 m->m_data += 2;
1947
1948 /*
1949 * Note that we use clusters for incoming frames, so the
1950 * buffer is virtually contiguous.
1951 */
1952 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
1953
1954 /* Allow the receive descriptor to continue using its mbuf. */
1955 SIP_INIT_RXDESC(sc, i);
1956 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1957 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1958 #endif /* __NO_STRICT_ALIGNMENT */
1959
1960 ifp->if_ipackets++;
1961 m->m_flags |= M_HASFCS;
1962 m->m_pkthdr.rcvif = ifp;
1963 m->m_pkthdr.len = m->m_len = len;
1964
1965 #if NBPFILTER > 0
1966 /*
1967 * Pass this up to any BPF listeners, but only
1968 * pass if up the stack if it's for us.
1969 */
1970 if (ifp->if_bpf)
1971 bpf_mtap(ifp->if_bpf, m);
1972 #endif /* NBPFILTER > 0 */
1973
1974 /* Pass it on. */
1975 (*ifp->if_input)(ifp, m);
1976 }
1977
1978 /* Update the receive pointer. */
1979 sc->sc_rxptr = i;
1980 }
1981 #endif /* DP83820 */
1982
1983 /*
1984 * sip_tick:
1985 *
1986 * One second timer, used to tick the MII.
1987 */
1988 void
1989 SIP_DECL(tick)(void *arg)
1990 {
1991 struct sip_softc *sc = arg;
1992 int s;
1993
1994 s = splnet();
1995 mii_tick(&sc->sc_mii);
1996 splx(s);
1997
1998 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
1999 }
2000
2001 /*
2002 * sip_reset:
2003 *
2004 * Perform a soft reset on the SiS 900.
2005 */
2006 void
2007 SIP_DECL(reset)(struct sip_softc *sc)
2008 {
2009 bus_space_tag_t st = sc->sc_st;
2010 bus_space_handle_t sh = sc->sc_sh;
2011 int i;
2012
2013 bus_space_write_4(st, sh, SIP_IER, 0);
2014 bus_space_write_4(st, sh, SIP_IMR, 0);
2015 bus_space_write_4(st, sh, SIP_RFCR, 0);
2016 bus_space_write_4(st, sh, SIP_CR, CR_RST);
2017
2018 for (i = 0; i < SIP_TIMEOUT; i++) {
2019 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
2020 break;
2021 delay(2);
2022 }
2023
2024 if (i == SIP_TIMEOUT)
2025 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
2026
2027 delay(1000);
2028
2029 #ifdef DP83820
2030 /*
2031 * Set the general purpose I/O bits. Do it here in case we
2032 * need to have GPIO set up to talk to the media interface.
2033 */
2034 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
2035 delay(1000);
2036 #endif /* DP83820 */
2037 }
2038
2039 /*
2040 * sip_init: [ ifnet interface function ]
2041 *
2042 * Initialize the interface. Must be called at splnet().
2043 */
2044 int
2045 SIP_DECL(init)(struct ifnet *ifp)
2046 {
2047 struct sip_softc *sc = ifp->if_softc;
2048 bus_space_tag_t st = sc->sc_st;
2049 bus_space_handle_t sh = sc->sc_sh;
2050 struct sip_txsoft *txs;
2051 struct sip_rxsoft *rxs;
2052 struct sip_desc *sipd;
2053 u_int32_t reg;
2054 int i, error = 0;
2055
2056 /*
2057 * Cancel any pending I/O.
2058 */
2059 SIP_DECL(stop)(ifp, 0);
2060
2061 /*
2062 * Reset the chip to a known state.
2063 */
2064 SIP_DECL(reset)(sc);
2065
2066 #if !defined(DP83820)
2067 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) {
2068 /*
2069 * DP83815 manual, page 78:
2070 * 4.4 Recommended Registers Configuration
2071 * For optimum performance of the DP83815, version noted
2072 * as DP83815CVNG (SRR = 203h), the listed register
2073 * modifications must be followed in sequence...
2074 *
2075 * It's not clear if this should be 302h or 203h because that
2076 * chip name is listed as SRR 302h in the description of the
2077 * SRR register. However, my revision 302h DP83815 on the
2078 * Netgear FA311 purchased in 02/2001 needs these settings
2079 * to avoid tons of errors in AcceptPerfectMatch (non-
2080 * IFF_PROMISC) mode. I do not know if other revisions need
2081 * this set or not. [briggs -- 09 March 2001]
2082 *
2083 * Note that only the low-order 12 bits of 0xe4 are documented
2084 * and that this sets reserved bits in that register.
2085 */
2086 reg = bus_space_read_4(st, sh, SIP_NS_SRR);
2087 if (reg == 0x302) {
2088 bus_space_write_4(st, sh, 0x00cc, 0x0001);
2089 bus_space_write_4(st, sh, 0x00e4, 0x189C);
2090 bus_space_write_4(st, sh, 0x00fc, 0x0000);
2091 bus_space_write_4(st, sh, 0x00f4, 0x5040);
2092 bus_space_write_4(st, sh, 0x00f8, 0x008c);
2093 }
2094 }
2095 #endif /* ! DP83820 */
2096
2097 /*
2098 * Initialize the transmit descriptor ring.
2099 */
2100 for (i = 0; i < SIP_NTXDESC; i++) {
2101 sipd = &sc->sc_txdescs[i];
2102 memset(sipd, 0, sizeof(struct sip_desc));
2103 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2104 }
2105 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2106 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2107 sc->sc_txfree = SIP_NTXDESC;
2108 sc->sc_txnext = 0;
2109 sc->sc_txwin = 0;
2110
2111 /*
2112 * Initialize the transmit job descriptors.
2113 */
2114 SIMPLEQ_INIT(&sc->sc_txfreeq);
2115 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2116 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2117 txs = &sc->sc_txsoft[i];
2118 txs->txs_mbuf = NULL;
2119 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2120 }
2121
2122 /*
2123 * Initialize the receive descriptor and receive job
2124 * descriptor rings.
2125 */
2126 for (i = 0; i < SIP_NRXDESC; i++) {
2127 rxs = &sc->sc_rxsoft[i];
2128 if (rxs->rxs_mbuf == NULL) {
2129 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2130 printf("%s: unable to allocate or map rx "
2131 "buffer %d, error = %d\n",
2132 sc->sc_dev.dv_xname, i, error);
2133 /*
2134 * XXX Should attempt to run with fewer receive
2135 * XXX buffers instead of just failing.
2136 */
2137 SIP_DECL(rxdrain)(sc);
2138 goto out;
2139 }
2140 } else
2141 SIP_INIT_RXDESC(sc, i);
2142 }
2143 sc->sc_rxptr = 0;
2144 #ifdef DP83820
2145 sc->sc_rxdiscard = 0;
2146 SIP_RXCHAIN_RESET(sc);
2147 #endif /* DP83820 */
2148
2149 /*
2150 * Set the configuration register; it's already initialized
2151 * in sip_attach().
2152 */
2153 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2154
2155 /*
2156 * Initialize the prototype TXCFG register.
2157 */
2158 #if defined(DP83820)
2159 sc->sc_txcfg = TXCFG_MXDMA_512;
2160 sc->sc_rxcfg = RXCFG_MXDMA_512;
2161 #else
2162 if ((SIP_SIS900_REV(sc, SIS_REV_635) ||
2163 SIP_SIS900_REV(sc, SIS_REV_900B)) &&
2164 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & CFG_EDBMASTEN)) {
2165 sc->sc_txcfg = TXCFG_MXDMA_64;
2166 sc->sc_rxcfg = RXCFG_MXDMA_64;
2167 } else {
2168 sc->sc_txcfg = TXCFG_MXDMA_512;
2169 sc->sc_rxcfg = RXCFG_MXDMA_512;
2170 }
2171 #endif /* DP83820 */
2172
2173 sc->sc_txcfg |= TXCFG_ATP |
2174 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2175 sc->sc_tx_drain_thresh;
2176 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2177
2178 /*
2179 * Initialize the receive drain threshold if we have never
2180 * done so.
2181 */
2182 if (sc->sc_rx_drain_thresh == 0) {
2183 /*
2184 * XXX This value should be tuned. This is set to the
2185 * maximum of 248 bytes, and we may be able to improve
2186 * performance by decreasing it (although we should never
2187 * set this value lower than 2; 14 bytes are required to
2188 * filter the packet).
2189 */
2190 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2191 }
2192
2193 /*
2194 * Initialize the prototype RXCFG register.
2195 */
2196 sc->sc_rxcfg |= (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2197 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2198
2199 #ifdef DP83820
2200 /*
2201 * Initialize the VLAN/IP receive control register.
2202 * We enable checksum computation on all incoming
2203 * packets, and do not reject packets w/ bad checksums.
2204 */
2205 reg = 0;
2206 if (ifp->if_capenable &
2207 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2208 reg |= VRCR_IPEN;
2209 if (sc->sc_ethercom.ec_nvlans != 0)
2210 reg |= VRCR_VTDEN|VRCR_VTREN;
2211 bus_space_write_4(st, sh, SIP_VRCR, reg);
2212
2213 /*
2214 * Initialize the VLAN/IP transmit control register.
2215 * We enable outgoing checksum computation on a
2216 * per-packet basis.
2217 */
2218 reg = 0;
2219 if (ifp->if_capenable &
2220 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2221 reg |= VTCR_PPCHK;
2222 if (sc->sc_ethercom.ec_nvlans != 0)
2223 reg |= VTCR_VPPTI;
2224 bus_space_write_4(st, sh, SIP_VTCR, reg);
2225
2226 /*
2227 * If we're using VLANs, initialize the VLAN data register.
2228 * To understand why we bswap the VLAN Ethertype, see section
2229 * 4.2.36 of the DP83820 manual.
2230 */
2231 if (sc->sc_ethercom.ec_nvlans != 0)
2232 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2233 #endif /* DP83820 */
2234
2235 /*
2236 * Give the transmit and receive rings to the chip.
2237 */
2238 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2239 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2240
2241 /*
2242 * Initialize the interrupt mask.
2243 */
2244 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2245 ISR_TXURN|ISR_TXDESC|ISR_TXIDLE|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2246 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2247
2248 /* Set up the receive filter. */
2249 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2250
2251 /*
2252 * Set the current media. Do this after initializing the prototype
2253 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2254 * control.
2255 */
2256 mii_mediachg(&sc->sc_mii);
2257
2258 /*
2259 * Enable interrupts.
2260 */
2261 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2262
2263 /*
2264 * Start the transmit and receive processes.
2265 */
2266 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2267
2268 /*
2269 * Start the one second MII clock.
2270 */
2271 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2272
2273 /*
2274 * ...all done!
2275 */
2276 ifp->if_flags |= IFF_RUNNING;
2277 ifp->if_flags &= ~IFF_OACTIVE;
2278
2279 out:
2280 if (error)
2281 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2282 return (error);
2283 }
2284
2285 /*
2286 * sip_drain:
2287 *
2288 * Drain the receive queue.
2289 */
2290 void
2291 SIP_DECL(rxdrain)(struct sip_softc *sc)
2292 {
2293 struct sip_rxsoft *rxs;
2294 int i;
2295
2296 for (i = 0; i < SIP_NRXDESC; i++) {
2297 rxs = &sc->sc_rxsoft[i];
2298 if (rxs->rxs_mbuf != NULL) {
2299 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2300 m_freem(rxs->rxs_mbuf);
2301 rxs->rxs_mbuf = NULL;
2302 }
2303 }
2304 }
2305
2306 /*
2307 * sip_stop: [ ifnet interface function ]
2308 *
2309 * Stop transmission on the interface.
2310 */
2311 void
2312 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2313 {
2314 struct sip_softc *sc = ifp->if_softc;
2315 bus_space_tag_t st = sc->sc_st;
2316 bus_space_handle_t sh = sc->sc_sh;
2317 struct sip_txsoft *txs;
2318 u_int32_t cmdsts = 0; /* DEBUG */
2319
2320 /*
2321 * Stop the one second clock.
2322 */
2323 callout_stop(&sc->sc_tick_ch);
2324
2325 /* Down the MII. */
2326 mii_down(&sc->sc_mii);
2327
2328 /*
2329 * Disable interrupts.
2330 */
2331 bus_space_write_4(st, sh, SIP_IER, 0);
2332
2333 /*
2334 * Stop receiver and transmitter.
2335 */
2336 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2337
2338 /*
2339 * Release any queued transmit buffers.
2340 */
2341 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2342 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2343 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2344 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2345 CMDSTS_INTR) == 0)
2346 printf("%s: sip_stop: last descriptor does not "
2347 "have INTR bit set\n", sc->sc_dev.dv_xname);
2348 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
2349 #ifdef DIAGNOSTIC
2350 if (txs->txs_mbuf == NULL) {
2351 printf("%s: dirty txsoft with no mbuf chain\n",
2352 sc->sc_dev.dv_xname);
2353 panic("sip_stop");
2354 }
2355 #endif
2356 cmdsts |= /* DEBUG */
2357 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2358 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2359 m_freem(txs->txs_mbuf);
2360 txs->txs_mbuf = NULL;
2361 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2362 }
2363
2364 if (disable)
2365 SIP_DECL(rxdrain)(sc);
2366
2367 /*
2368 * Mark the interface down and cancel the watchdog timer.
2369 */
2370 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2371 ifp->if_timer = 0;
2372
2373 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2374 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2375 printf("%s: sip_stop: no INTR bits set in dirty tx "
2376 "descriptors\n", sc->sc_dev.dv_xname);
2377 }
2378
2379 /*
2380 * sip_read_eeprom:
2381 *
2382 * Read data from the serial EEPROM.
2383 */
2384 void
2385 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2386 u_int16_t *data)
2387 {
2388 bus_space_tag_t st = sc->sc_st;
2389 bus_space_handle_t sh = sc->sc_sh;
2390 u_int16_t reg;
2391 int i, x;
2392
2393 for (i = 0; i < wordcnt; i++) {
2394 /* Send CHIP SELECT. */
2395 reg = EROMAR_EECS;
2396 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2397
2398 /* Shift in the READ opcode. */
2399 for (x = 3; x > 0; x--) {
2400 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2401 reg |= EROMAR_EEDI;
2402 else
2403 reg &= ~EROMAR_EEDI;
2404 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2405 bus_space_write_4(st, sh, SIP_EROMAR,
2406 reg | EROMAR_EESK);
2407 delay(4);
2408 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2409 delay(4);
2410 }
2411
2412 /* Shift in address. */
2413 for (x = 6; x > 0; x--) {
2414 if ((word + i) & (1 << (x - 1)))
2415 reg |= EROMAR_EEDI;
2416 else
2417 reg &= ~EROMAR_EEDI;
2418 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2419 bus_space_write_4(st, sh, SIP_EROMAR,
2420 reg | EROMAR_EESK);
2421 delay(4);
2422 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2423 delay(4);
2424 }
2425
2426 /* Shift out data. */
2427 reg = EROMAR_EECS;
2428 data[i] = 0;
2429 for (x = 16; x > 0; x--) {
2430 bus_space_write_4(st, sh, SIP_EROMAR,
2431 reg | EROMAR_EESK);
2432 delay(4);
2433 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2434 data[i] |= (1 << (x - 1));
2435 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2436 delay(4);
2437 }
2438
2439 /* Clear CHIP SELECT. */
2440 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2441 delay(4);
2442 }
2443 }
2444
2445 /*
2446 * sip_add_rxbuf:
2447 *
2448 * Add a receive buffer to the indicated descriptor.
2449 */
2450 int
2451 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2452 {
2453 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2454 struct mbuf *m;
2455 int error;
2456
2457 MGETHDR(m, M_DONTWAIT, MT_DATA);
2458 if (m == NULL)
2459 return (ENOBUFS);
2460
2461 MCLGET(m, M_DONTWAIT);
2462 if ((m->m_flags & M_EXT) == 0) {
2463 m_freem(m);
2464 return (ENOBUFS);
2465 }
2466
2467 #if defined(DP83820)
2468 m->m_len = SIP_RXBUF_LEN;
2469 #endif /* DP83820 */
2470
2471 if (rxs->rxs_mbuf != NULL)
2472 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2473
2474 rxs->rxs_mbuf = m;
2475
2476 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2477 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2478 BUS_DMA_READ|BUS_DMA_NOWAIT);
2479 if (error) {
2480 printf("%s: can't load rx DMA map %d, error = %d\n",
2481 sc->sc_dev.dv_xname, idx, error);
2482 panic("sip_add_rxbuf"); /* XXX */
2483 }
2484
2485 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2486 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2487
2488 SIP_INIT_RXDESC(sc, idx);
2489
2490 return (0);
2491 }
2492
2493 #if !defined(DP83820)
2494 /*
2495 * sip_sis900_set_filter:
2496 *
2497 * Set up the receive filter.
2498 */
2499 void
2500 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2501 {
2502 bus_space_tag_t st = sc->sc_st;
2503 bus_space_handle_t sh = sc->sc_sh;
2504 struct ethercom *ec = &sc->sc_ethercom;
2505 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2506 struct ether_multi *enm;
2507 u_int8_t *cp;
2508 struct ether_multistep step;
2509 u_int32_t crc, mchash[16];
2510
2511 /*
2512 * Initialize the prototype RFCR.
2513 */
2514 sc->sc_rfcr = RFCR_RFEN;
2515 if (ifp->if_flags & IFF_BROADCAST)
2516 sc->sc_rfcr |= RFCR_AAB;
2517 if (ifp->if_flags & IFF_PROMISC) {
2518 sc->sc_rfcr |= RFCR_AAP;
2519 goto allmulti;
2520 }
2521
2522 /*
2523 * Set up the multicast address filter by passing all multicast
2524 * addresses through a CRC generator, and then using the high-order
2525 * 6 bits as an index into the 128 bit multicast hash table (only
2526 * the lower 16 bits of each 32 bit multicast hash register are
2527 * valid). The high order bits select the register, while the
2528 * rest of the bits select the bit within the register.
2529 */
2530
2531 memset(mchash, 0, sizeof(mchash));
2532
2533 ETHER_FIRST_MULTI(step, ec, enm);
2534 while (enm != NULL) {
2535 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2536 /*
2537 * We must listen to a range of multicast addresses.
2538 * For now, just accept all multicasts, rather than
2539 * trying to set only those filter bits needed to match
2540 * the range. (At this time, the only use of address
2541 * ranges is for IP multicast routing, for which the
2542 * range is big enough to require all bits set.)
2543 */
2544 goto allmulti;
2545 }
2546
2547 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2548
2549 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2550 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2551 /* Just want the 8 most significant bits. */
2552 crc >>= 24;
2553 } else {
2554 /* Just want the 7 most significant bits. */
2555 crc >>= 25;
2556 }
2557
2558 /* Set the corresponding bit in the hash table. */
2559 mchash[crc >> 4] |= 1 << (crc & 0xf);
2560
2561 ETHER_NEXT_MULTI(step, enm);
2562 }
2563
2564 ifp->if_flags &= ~IFF_ALLMULTI;
2565 goto setit;
2566
2567 allmulti:
2568 ifp->if_flags |= IFF_ALLMULTI;
2569 sc->sc_rfcr |= RFCR_AAM;
2570
2571 setit:
2572 #define FILTER_EMIT(addr, data) \
2573 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2574 delay(1); \
2575 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2576 delay(1)
2577
2578 /*
2579 * Disable receive filter, and program the node address.
2580 */
2581 cp = LLADDR(ifp->if_sadl);
2582 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2583 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2584 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2585
2586 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2587 /*
2588 * Program the multicast hash table.
2589 */
2590 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2591 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2592 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2593 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2594 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2595 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2596 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2597 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2598 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2599 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2600 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]);
2601 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]);
2602 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]);
2603 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]);
2604 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]);
2605 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]);
2606 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]);
2607 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]);
2608 }
2609 }
2610 #undef FILTER_EMIT
2611
2612 /*
2613 * Re-enable the receiver filter.
2614 */
2615 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2616 }
2617 #endif /* ! DP83820 */
2618
2619 /*
2620 * sip_dp83815_set_filter:
2621 *
2622 * Set up the receive filter.
2623 */
2624 void
2625 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2626 {
2627 bus_space_tag_t st = sc->sc_st;
2628 bus_space_handle_t sh = sc->sc_sh;
2629 struct ethercom *ec = &sc->sc_ethercom;
2630 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2631 struct ether_multi *enm;
2632 u_int8_t *cp;
2633 struct ether_multistep step;
2634 u_int32_t crc, hash, slot, bit;
2635 #ifdef DP83820
2636 #define MCHASH_NWORDS 128
2637 #else
2638 #define MCHASH_NWORDS 32
2639 #endif /* DP83820 */
2640 u_int16_t mchash[MCHASH_NWORDS];
2641 int i;
2642
2643 /*
2644 * Initialize the prototype RFCR.
2645 * Enable the receive filter, and accept on
2646 * Perfect (destination address) Match
2647 * If IFF_BROADCAST, also accept all broadcast packets.
2648 * If IFF_PROMISC, accept all unicast packets (and later, set
2649 * IFF_ALLMULTI and accept all multicast, too).
2650 */
2651 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2652 if (ifp->if_flags & IFF_BROADCAST)
2653 sc->sc_rfcr |= RFCR_AAB;
2654 if (ifp->if_flags & IFF_PROMISC) {
2655 sc->sc_rfcr |= RFCR_AAP;
2656 goto allmulti;
2657 }
2658
2659 #ifdef DP83820
2660 /*
2661 * Set up the DP83820 multicast address filter by passing all multicast
2662 * addresses through a CRC generator, and then using the high-order
2663 * 11 bits as an index into the 2048 bit multicast hash table. The
2664 * high-order 7 bits select the slot, while the low-order 4 bits
2665 * select the bit within the slot. Note that only the low 16-bits
2666 * of each filter word are used, and there are 128 filter words.
2667 */
2668 #else
2669 /*
2670 * Set up the DP83815 multicast address filter by passing all multicast
2671 * addresses through a CRC generator, and then using the high-order
2672 * 9 bits as an index into the 512 bit multicast hash table. The
2673 * high-order 5 bits select the slot, while the low-order 4 bits
2674 * select the bit within the slot. Note that only the low 16-bits
2675 * of each filter word are used, and there are 32 filter words.
2676 */
2677 #endif /* DP83820 */
2678
2679 memset(mchash, 0, sizeof(mchash));
2680
2681 ifp->if_flags &= ~IFF_ALLMULTI;
2682 ETHER_FIRST_MULTI(step, ec, enm);
2683 if (enm == NULL)
2684 goto setit;
2685 while (enm != NULL) {
2686 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2687 /*
2688 * We must listen to a range of multicast addresses.
2689 * For now, just accept all multicasts, rather than
2690 * trying to set only those filter bits needed to match
2691 * the range. (At this time, the only use of address
2692 * ranges is for IP multicast routing, for which the
2693 * range is big enough to require all bits set.)
2694 */
2695 goto allmulti;
2696 }
2697
2698 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2699
2700 #ifdef DP83820
2701 /* Just want the 11 most significant bits. */
2702 hash = crc >> 21;
2703 #else
2704 /* Just want the 9 most significant bits. */
2705 hash = crc >> 23;
2706 #endif /* DP83820 */
2707
2708 slot = hash >> 4;
2709 bit = hash & 0xf;
2710
2711 /* Set the corresponding bit in the hash table. */
2712 mchash[slot] |= 1 << bit;
2713
2714 ETHER_NEXT_MULTI(step, enm);
2715 }
2716 sc->sc_rfcr |= RFCR_MHEN;
2717 goto setit;
2718
2719 allmulti:
2720 ifp->if_flags |= IFF_ALLMULTI;
2721 sc->sc_rfcr |= RFCR_AAM;
2722
2723 setit:
2724 #define FILTER_EMIT(addr, data) \
2725 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2726 delay(1); \
2727 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2728 delay(1)
2729
2730 /*
2731 * Disable receive filter, and program the node address.
2732 */
2733 cp = LLADDR(ifp->if_sadl);
2734 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
2735 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
2736 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
2737
2738 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2739 /*
2740 * Program the multicast hash table.
2741 */
2742 for (i = 0; i < MCHASH_NWORDS; i++) {
2743 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
2744 mchash[i]);
2745 }
2746 }
2747 #undef FILTER_EMIT
2748 #undef MCHASH_NWORDS
2749
2750 /*
2751 * Re-enable the receiver filter.
2752 */
2753 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2754 }
2755
2756 #if defined(DP83820)
2757 /*
2758 * sip_dp83820_mii_readreg: [mii interface function]
2759 *
2760 * Read a PHY register on the MII of the DP83820.
2761 */
2762 int
2763 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
2764 {
2765
2766 return (mii_bitbang_readreg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2767 phy, reg));
2768 }
2769
2770 /*
2771 * sip_dp83820_mii_writereg: [mii interface function]
2772 *
2773 * Write a PHY register on the MII of the DP83820.
2774 */
2775 void
2776 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
2777 {
2778
2779 mii_bitbang_writereg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2780 phy, reg, val);
2781 }
2782
2783 /*
2784 * sip_dp83815_mii_statchg: [mii interface function]
2785 *
2786 * Callback from MII layer when media changes.
2787 */
2788 void
2789 SIP_DECL(dp83820_mii_statchg)(struct device *self)
2790 {
2791 struct sip_softc *sc = (struct sip_softc *) self;
2792 u_int32_t cfg;
2793
2794 /*
2795 * Update TXCFG for full-duplex operation.
2796 */
2797 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2798 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2799 else
2800 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2801
2802 /*
2803 * Update RXCFG for full-duplex or loopback.
2804 */
2805 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2806 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2807 sc->sc_rxcfg |= RXCFG_ATX;
2808 else
2809 sc->sc_rxcfg &= ~RXCFG_ATX;
2810
2811 /*
2812 * Update CFG for MII/GMII.
2813 */
2814 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
2815 cfg = sc->sc_cfg | CFG_MODE_1000;
2816 else
2817 cfg = sc->sc_cfg;
2818
2819 /*
2820 * XXX 802.3x flow control.
2821 */
2822
2823 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
2824 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2825 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2826 }
2827
2828 /*
2829 * sip_dp83820_mii_bitbang_read: [mii bit-bang interface function]
2830 *
2831 * Read the MII serial port for the MII bit-bang module.
2832 */
2833 u_int32_t
2834 SIP_DECL(dp83820_mii_bitbang_read)(struct device *self)
2835 {
2836 struct sip_softc *sc = (void *) self;
2837
2838 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
2839 }
2840
2841 /*
2842 * sip_dp83820_mii_bitbang_write: [mii big-bang interface function]
2843 *
2844 * Write the MII serial port for the MII bit-bang module.
2845 */
2846 void
2847 SIP_DECL(dp83820_mii_bitbang_write)(struct device *self, u_int32_t val)
2848 {
2849 struct sip_softc *sc = (void *) self;
2850
2851 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
2852 }
2853 #else /* ! DP83820 */
2854 /*
2855 * sip_sis900_mii_readreg: [mii interface function]
2856 *
2857 * Read a PHY register on the MII.
2858 */
2859 int
2860 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
2861 {
2862 struct sip_softc *sc = (struct sip_softc *) self;
2863 u_int32_t enphy;
2864
2865 /*
2866 * The SiS 900 has only an internal PHY on the MII. Only allow
2867 * MII address 0.
2868 */
2869 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
2870 sc->sc_rev < SIS_REV_635 && phy != 0)
2871 return (0);
2872
2873 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2874 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
2875 ENPHY_RWCMD | ENPHY_ACCESS);
2876 do {
2877 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2878 } while (enphy & ENPHY_ACCESS);
2879 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
2880 }
2881
2882 /*
2883 * sip_sis900_mii_writereg: [mii interface function]
2884 *
2885 * Write a PHY register on the MII.
2886 */
2887 void
2888 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
2889 {
2890 struct sip_softc *sc = (struct sip_softc *) self;
2891 u_int32_t enphy;
2892
2893 /*
2894 * The SiS 900 has only an internal PHY on the MII. Only allow
2895 * MII address 0.
2896 */
2897 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
2898 sc->sc_rev < SIS_REV_635 && phy != 0)
2899 return;
2900
2901 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2902 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
2903 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
2904 do {
2905 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2906 } while (enphy & ENPHY_ACCESS);
2907 }
2908
2909 /*
2910 * sip_sis900_mii_statchg: [mii interface function]
2911 *
2912 * Callback from MII layer when media changes.
2913 */
2914 void
2915 SIP_DECL(sis900_mii_statchg)(struct device *self)
2916 {
2917 struct sip_softc *sc = (struct sip_softc *) self;
2918 u_int32_t flowctl;
2919
2920 /*
2921 * Update TXCFG for full-duplex operation.
2922 */
2923 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2924 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2925 else
2926 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2927
2928 /*
2929 * Update RXCFG for full-duplex or loopback.
2930 */
2931 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2932 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2933 sc->sc_rxcfg |= RXCFG_ATX;
2934 else
2935 sc->sc_rxcfg &= ~RXCFG_ATX;
2936
2937 /*
2938 * Update IMR for use of 802.3x flow control.
2939 */
2940 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
2941 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
2942 flowctl = FLOWCTL_FLOWEN;
2943 } else {
2944 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
2945 flowctl = 0;
2946 }
2947
2948 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2949 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2950 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
2951 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
2952 }
2953
2954 /*
2955 * sip_dp83815_mii_readreg: [mii interface function]
2956 *
2957 * Read a PHY register on the MII.
2958 */
2959 int
2960 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
2961 {
2962 struct sip_softc *sc = (struct sip_softc *) self;
2963 u_int32_t val;
2964
2965 /*
2966 * The DP83815 only has an internal PHY. Only allow
2967 * MII address 0.
2968 */
2969 if (phy != 0)
2970 return (0);
2971
2972 /*
2973 * Apparently, after a reset, the DP83815 can take a while
2974 * to respond. During this recovery period, the BMSR returns
2975 * a value of 0. Catch this -- it's not supposed to happen
2976 * (the BMSR has some hardcoded-to-1 bits), and wait for the
2977 * PHY to come back to life.
2978 *
2979 * This works out because the BMSR is the first register
2980 * read during the PHY probe process.
2981 */
2982 do {
2983 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
2984 } while (reg == MII_BMSR && val == 0);
2985
2986 return (val & 0xffff);
2987 }
2988
2989 /*
2990 * sip_dp83815_mii_writereg: [mii interface function]
2991 *
2992 * Write a PHY register to the MII.
2993 */
2994 void
2995 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
2996 {
2997 struct sip_softc *sc = (struct sip_softc *) self;
2998
2999 /*
3000 * The DP83815 only has an internal PHY. Only allow
3001 * MII address 0.
3002 */
3003 if (phy != 0)
3004 return;
3005
3006 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
3007 }
3008
3009 /*
3010 * sip_dp83815_mii_statchg: [mii interface function]
3011 *
3012 * Callback from MII layer when media changes.
3013 */
3014 void
3015 SIP_DECL(dp83815_mii_statchg)(struct device *self)
3016 {
3017 struct sip_softc *sc = (struct sip_softc *) self;
3018
3019 /*
3020 * Update TXCFG for full-duplex operation.
3021 */
3022 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3023 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3024 else
3025 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3026
3027 /*
3028 * Update RXCFG for full-duplex or loopback.
3029 */
3030 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3031 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3032 sc->sc_rxcfg |= RXCFG_ATX;
3033 else
3034 sc->sc_rxcfg &= ~RXCFG_ATX;
3035
3036 /*
3037 * XXX 802.3x flow control.
3038 */
3039
3040 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3041 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3042 }
3043 #endif /* DP83820 */
3044
3045 #if defined(DP83820)
3046 void
3047 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc,
3048 const struct pci_attach_args *pa, u_int8_t *enaddr)
3049 {
3050 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
3051 u_int8_t cksum, *e, match;
3052 int i;
3053
3054 /*
3055 * EEPROM data format for the DP83820 can be found in
3056 * the DP83820 manual, section 4.2.4.
3057 */
3058
3059 SIP_DECL(read_eeprom)(sc, 0,
3060 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
3061
3062 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
3063 match = ~(match - 1);
3064
3065 cksum = 0x55;
3066 e = (u_int8_t *) eeprom_data;
3067 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
3068 cksum += *e++;
3069
3070 if (cksum != match)
3071 printf("%s: Checksum (%x) mismatch (%x)",
3072 sc->sc_dev.dv_xname, cksum, match);
3073
3074 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
3075 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
3076 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
3077 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
3078 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
3079 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
3080
3081 /* Get the GPIOR bits. */
3082 sc->sc_gpior = eeprom_data[0x04];
3083
3084 /* Get various CFG related bits. */
3085 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_EXT_125)
3086 sc->sc_cfg |= CFG_EXT_125;
3087 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_M64ADDR)
3088 sc->sc_cfg |= CFG_M64ADDR;
3089 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_DATA64_EN)
3090 sc->sc_cfg |= CFG_DATA64_EN;
3091 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_T64ADDR)
3092 sc->sc_cfg |= CFG_T64ADDR;
3093 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_TBI_EN)
3094 sc->sc_cfg |= CFG_TBI_EN;
3095 }
3096 #else /* ! DP83820 */
3097 void
3098 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc,
3099 const struct pci_attach_args *pa, u_int8_t *enaddr)
3100 {
3101 u_int16_t myea[ETHER_ADDR_LEN / 2];
3102
3103 switch (sc->sc_rev) {
3104 case SIS_REV_630S:
3105 case SIS_REV_630E:
3106 case SIS_REV_630EA1:
3107 case SIS_REV_630ET:
3108 case SIS_REV_635:
3109 /*
3110 * The MAC address for the on-board Ethernet of
3111 * the SiS 630 chipset is in the NVRAM. Kick
3112 * the chip into re-loading it from NVRAM, and
3113 * read the MAC address out of the filter registers.
3114 */
3115 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3116
3117 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3118 RFCR_RFADDR_NODE0);
3119 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3120 0xffff;
3121
3122 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3123 RFCR_RFADDR_NODE2);
3124 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3125 0xffff;
3126
3127 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3128 RFCR_RFADDR_NODE4);
3129 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3130 0xffff;
3131 break;
3132
3133 default:
3134 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3135 sizeof(myea) / sizeof(myea[0]), myea);
3136 }
3137
3138 enaddr[0] = myea[0] & 0xff;
3139 enaddr[1] = myea[0] >> 8;
3140 enaddr[2] = myea[1] & 0xff;
3141 enaddr[3] = myea[1] >> 8;
3142 enaddr[4] = myea[2] & 0xff;
3143 enaddr[5] = myea[2] >> 8;
3144 }
3145
3146 /* Table and macro to bit-reverse an octet. */
3147 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3148 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3149
3150 void
3151 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc,
3152 const struct pci_attach_args *pa, u_int8_t *enaddr)
3153 {
3154 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3155 u_int8_t cksum, *e, match;
3156 int i;
3157
3158 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3159 sizeof(eeprom_data[0]), eeprom_data);
3160
3161 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3162 match = ~(match - 1);
3163
3164 cksum = 0x55;
3165 e = (u_int8_t *) eeprom_data;
3166 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3167 cksum += *e++;
3168 }
3169 if (cksum != match) {
3170 printf("%s: Checksum (%x) mismatch (%x)",
3171 sc->sc_dev.dv_xname, cksum, match);
3172 }
3173
3174 /*
3175 * Unrolled because it makes slightly more sense this way.
3176 * The DP83815 stores the MAC address in bit 0 of word 6
3177 * through bit 15 of word 8.
3178 */
3179 ea = &eeprom_data[6];
3180 enaddr[0] = ((*ea & 0x1) << 7);
3181 ea++;
3182 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3183 enaddr[1] = ((*ea & 0x1FE) >> 1);
3184 enaddr[2] = ((*ea & 0x1) << 7);
3185 ea++;
3186 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3187 enaddr[3] = ((*ea & 0x1FE) >> 1);
3188 enaddr[4] = ((*ea & 0x1) << 7);
3189 ea++;
3190 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3191 enaddr[5] = ((*ea & 0x1FE) >> 1);
3192
3193 /*
3194 * In case that's not weird enough, we also need to reverse
3195 * the bits in each byte. This all actually makes more sense
3196 * if you think about the EEPROM storage as an array of bits
3197 * being shifted into bytes, but that's not how we're looking
3198 * at it here...
3199 */
3200 for (i = 0; i < 6 ;i++)
3201 enaddr[i] = bbr(enaddr[i]);
3202 }
3203 #endif /* DP83820 */
3204
3205 /*
3206 * sip_mediastatus: [ifmedia interface function]
3207 *
3208 * Get the current interface media status.
3209 */
3210 void
3211 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3212 {
3213 struct sip_softc *sc = ifp->if_softc;
3214
3215 mii_pollstat(&sc->sc_mii);
3216 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3217 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3218 }
3219
3220 /*
3221 * sip_mediachange: [ifmedia interface function]
3222 *
3223 * Set hardware to newly-selected media.
3224 */
3225 int
3226 SIP_DECL(mediachange)(struct ifnet *ifp)
3227 {
3228 struct sip_softc *sc = ifp->if_softc;
3229
3230 if (ifp->if_flags & IFF_UP)
3231 mii_mediachg(&sc->sc_mii);
3232 return (0);
3233 }
3234