if_sip.c revision 1.75 1 /* $NetBSD: if_sip.c,v 1.75 2002/12/23 02:58:37 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Reduce the Rx interrupt load.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.75 2002/12/23 02:58:37 tsutsui Exp $");
84
85 #include "bpfilter.h"
86 #include "rnd.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/socket.h>
95 #include <sys/ioctl.h>
96 #include <sys/errno.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <machine/bus.h>
116 #include <machine/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #ifdef DP83820
122 #include <dev/mii/mii_bitbang.h>
123 #endif /* DP83820 */
124
125 #include <dev/pci/pcireg.h>
126 #include <dev/pci/pcivar.h>
127 #include <dev/pci/pcidevs.h>
128
129 #include <dev/pci/if_sipreg.h>
130
131 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
132 #define SIP_DECL(x) __CONCAT(gsip_,x)
133 #else /* SiS900 and DP83815 */
134 #define SIP_DECL(x) __CONCAT(sip_,x)
135 #endif
136
137 #define SIP_STR(x) __STRING(SIP_DECL(x))
138
139 /*
140 * Transmit descriptor list size. This is arbitrary, but allocate
141 * enough descriptors for 128 pending transmissions, and 8 segments
142 * per packet. This MUST work out to a power of 2.
143 */
144 #define SIP_NTXSEGS 16
145 #define SIP_NTXSEGS_ALLOC 8
146
147 #define SIP_TXQUEUELEN 256
148 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS_ALLOC)
149 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
150 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
151
152 #if defined(DP83020)
153 #define TX_DMAMAP_SIZE ETHER_MAX_LEN_JUMBO
154 #else
155 #define TX_DMAMAP_SIZE MCLBYTES
156 #endif
157
158 /*
159 * Receive descriptor list size. We have one Rx buffer per incoming
160 * packet, so this logic is a little simpler.
161 *
162 * Actually, on the DP83820, we allow the packet to consume more than
163 * one buffer, in order to support jumbo Ethernet frames. In that
164 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
165 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
166 * so we'd better be quick about handling receive interrupts.
167 */
168 #if defined(DP83820)
169 #define SIP_NRXDESC 256
170 #else
171 #define SIP_NRXDESC 128
172 #endif /* DP83820 */
173 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
174 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
175
176 /*
177 * Control structures are DMA'd to the SiS900 chip. We allocate them in
178 * a single clump that maps to a single DMA segment to make several things
179 * easier.
180 */
181 struct sip_control_data {
182 /*
183 * The transmit descriptors.
184 */
185 struct sip_desc scd_txdescs[SIP_NTXDESC];
186
187 /*
188 * The receive descriptors.
189 */
190 struct sip_desc scd_rxdescs[SIP_NRXDESC];
191 };
192
193 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
194 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
195 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
196
197 /*
198 * Software state for transmit jobs.
199 */
200 struct sip_txsoft {
201 struct mbuf *txs_mbuf; /* head of our mbuf chain */
202 bus_dmamap_t txs_dmamap; /* our DMA map */
203 int txs_firstdesc; /* first descriptor in packet */
204 int txs_lastdesc; /* last descriptor in packet */
205 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
206 };
207
208 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
209
210 /*
211 * Software state for receive jobs.
212 */
213 struct sip_rxsoft {
214 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
215 bus_dmamap_t rxs_dmamap; /* our DMA map */
216 };
217
218 /*
219 * Software state per device.
220 */
221 struct sip_softc {
222 struct device sc_dev; /* generic device information */
223 bus_space_tag_t sc_st; /* bus space tag */
224 bus_space_handle_t sc_sh; /* bus space handle */
225 bus_dma_tag_t sc_dmat; /* bus DMA tag */
226 struct ethercom sc_ethercom; /* ethernet common data */
227 void *sc_sdhook; /* shutdown hook */
228
229 const struct sip_product *sc_model; /* which model are we? */
230 int sc_rev; /* chip revision */
231
232 void *sc_ih; /* interrupt cookie */
233
234 struct mii_data sc_mii; /* MII/media information */
235
236 struct callout sc_tick_ch; /* tick callout */
237
238 bus_dmamap_t sc_cddmamap; /* control data DMA map */
239 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
240
241 /*
242 * Software state for transmit and receive descriptors.
243 */
244 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
245 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
246
247 /*
248 * Control data structures.
249 */
250 struct sip_control_data *sc_control_data;
251 #define sc_txdescs sc_control_data->scd_txdescs
252 #define sc_rxdescs sc_control_data->scd_rxdescs
253
254 #ifdef SIP_EVENT_COUNTERS
255 /*
256 * Event counters.
257 */
258 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
259 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
260 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
261 struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */
262 struct evcnt sc_ev_txiintr; /* Tx idle interrupts */
263 struct evcnt sc_ev_rxintr; /* Rx interrupts */
264 struct evcnt sc_ev_hiberr; /* HIBERR interrupts */
265 #ifdef DP83820
266 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
267 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
268 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
269 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
270 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
271 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
272 #endif /* DP83820 */
273 #endif /* SIP_EVENT_COUNTERS */
274
275 u_int32_t sc_txcfg; /* prototype TXCFG register */
276 u_int32_t sc_rxcfg; /* prototype RXCFG register */
277 u_int32_t sc_imr; /* prototype IMR register */
278 u_int32_t sc_rfcr; /* prototype RFCR register */
279
280 u_int32_t sc_cfg; /* prototype CFG register */
281
282 #ifdef DP83820
283 u_int32_t sc_gpior; /* prototype GPIOR register */
284 #endif /* DP83820 */
285
286 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
287 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
288
289 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
290
291 int sc_flags; /* misc. flags; see below */
292
293 int sc_txfree; /* number of free Tx descriptors */
294 int sc_txnext; /* next ready Tx descriptor */
295 int sc_txwin; /* Tx descriptors since last intr */
296
297 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
298 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
299
300 int sc_rxptr; /* next ready Rx descriptor/descsoft */
301 #if defined(DP83820)
302 int sc_rxdiscard;
303 int sc_rxlen;
304 struct mbuf *sc_rxhead;
305 struct mbuf *sc_rxtail;
306 struct mbuf **sc_rxtailp;
307 #endif /* DP83820 */
308
309 #if NRND > 0
310 rndsource_element_t rnd_source; /* random source */
311 #endif
312 };
313
314 /* sc_flags */
315 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
316
317 #ifdef DP83820
318 #define SIP_RXCHAIN_RESET(sc) \
319 do { \
320 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
321 *(sc)->sc_rxtailp = NULL; \
322 (sc)->sc_rxlen = 0; \
323 } while (/*CONSTCOND*/0)
324
325 #define SIP_RXCHAIN_LINK(sc, m) \
326 do { \
327 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
328 (sc)->sc_rxtailp = &(m)->m_next; \
329 } while (/*CONSTCOND*/0)
330 #endif /* DP83820 */
331
332 #ifdef SIP_EVENT_COUNTERS
333 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
334 #else
335 #define SIP_EVCNT_INCR(ev) /* nothing */
336 #endif
337
338 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
339 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
340
341 #define SIP_CDTXSYNC(sc, x, n, ops) \
342 do { \
343 int __x, __n; \
344 \
345 __x = (x); \
346 __n = (n); \
347 \
348 /* If it will wrap around, sync to the end of the ring. */ \
349 if ((__x + __n) > SIP_NTXDESC) { \
350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
351 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
352 (SIP_NTXDESC - __x), (ops)); \
353 __n -= (SIP_NTXDESC - __x); \
354 __x = 0; \
355 } \
356 \
357 /* Now sync whatever is left. */ \
358 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
359 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
360 } while (0)
361
362 #define SIP_CDRXSYNC(sc, x, ops) \
363 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
364 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
365
366 #ifdef DP83820
367 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
368 #define SIP_RXBUF_LEN (MCLBYTES - 4)
369 #else
370 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
371 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
372 #endif
373 #define SIP_INIT_RXDESC(sc, x) \
374 do { \
375 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
376 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
377 \
378 __sipd->sipd_link = \
379 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
380 __sipd->sipd_bufptr = \
381 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
382 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
383 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
384 SIP_INIT_RXDESC_EXTSTS \
385 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
386 } while (0)
387
388 #define SIP_CHIP_VERS(sc, v, p, r) \
389 ((sc)->sc_model->sip_vendor == (v) && \
390 (sc)->sc_model->sip_product == (p) && \
391 (sc)->sc_rev == (r))
392
393 #define SIP_CHIP_MODEL(sc, v, p) \
394 ((sc)->sc_model->sip_vendor == (v) && \
395 (sc)->sc_model->sip_product == (p))
396
397 #if !defined(DP83820)
398 #define SIP_SIS900_REV(sc, rev) \
399 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev))
400 #endif
401
402 #define SIP_TIMEOUT 1000
403
404 void SIP_DECL(start)(struct ifnet *);
405 void SIP_DECL(watchdog)(struct ifnet *);
406 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
407 int SIP_DECL(init)(struct ifnet *);
408 void SIP_DECL(stop)(struct ifnet *, int);
409
410 void SIP_DECL(shutdown)(void *);
411
412 void SIP_DECL(reset)(struct sip_softc *);
413 void SIP_DECL(rxdrain)(struct sip_softc *);
414 int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
415 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *);
416 void SIP_DECL(tick)(void *);
417
418 #if !defined(DP83820)
419 void SIP_DECL(sis900_set_filter)(struct sip_softc *);
420 #endif /* ! DP83820 */
421 void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
422
423 #if defined(DP83820)
424 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *,
425 const struct pci_attach_args *, u_int8_t *);
426 #else
427 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *,
428 const struct pci_attach_args *, u_int8_t *);
429 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *,
430 const struct pci_attach_args *, u_int8_t *);
431 #endif /* DP83820 */
432
433 int SIP_DECL(intr)(void *);
434 void SIP_DECL(txintr)(struct sip_softc *);
435 void SIP_DECL(rxintr)(struct sip_softc *);
436
437 #if defined(DP83820)
438 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
439 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
440 void SIP_DECL(dp83820_mii_statchg)(struct device *);
441 #else
442 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
443 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
444 void SIP_DECL(sis900_mii_statchg)(struct device *);
445
446 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
447 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
448 void SIP_DECL(dp83815_mii_statchg)(struct device *);
449 #endif /* DP83820 */
450
451 int SIP_DECL(mediachange)(struct ifnet *);
452 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
453
454 int SIP_DECL(match)(struct device *, struct cfdata *, void *);
455 void SIP_DECL(attach)(struct device *, struct device *, void *);
456
457 int SIP_DECL(copy_small) = 0;
458
459 #ifdef DP83820
460 CFATTACH_DECL(gsip, sizeof(struct sip_softc),
461 gsip_match, gsip_attach, NULL, NULL);
462 #else
463 CFATTACH_DECL(sip, sizeof(struct sip_softc),
464 sip_match, sip_attach, NULL, NULL);
465 #endif
466
467 /*
468 * Descriptions of the variants of the SiS900.
469 */
470 struct sip_variant {
471 int (*sipv_mii_readreg)(struct device *, int, int);
472 void (*sipv_mii_writereg)(struct device *, int, int, int);
473 void (*sipv_mii_statchg)(struct device *);
474 void (*sipv_set_filter)(struct sip_softc *);
475 void (*sipv_read_macaddr)(struct sip_softc *,
476 const struct pci_attach_args *, u_int8_t *);
477 };
478
479 #if defined(DP83820)
480 u_int32_t SIP_DECL(dp83820_mii_bitbang_read)(struct device *);
481 void SIP_DECL(dp83820_mii_bitbang_write)(struct device *, u_int32_t);
482
483 const struct mii_bitbang_ops SIP_DECL(dp83820_mii_bitbang_ops) = {
484 SIP_DECL(dp83820_mii_bitbang_read),
485 SIP_DECL(dp83820_mii_bitbang_write),
486 {
487 EROMAR_MDIO, /* MII_BIT_MDO */
488 EROMAR_MDIO, /* MII_BIT_MDI */
489 EROMAR_MDC, /* MII_BIT_MDC */
490 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
491 0, /* MII_BIT_DIR_PHY_HOST */
492 }
493 };
494 #endif /* DP83820 */
495
496 #if defined(DP83820)
497 const struct sip_variant SIP_DECL(variant_dp83820) = {
498 SIP_DECL(dp83820_mii_readreg),
499 SIP_DECL(dp83820_mii_writereg),
500 SIP_DECL(dp83820_mii_statchg),
501 SIP_DECL(dp83815_set_filter),
502 SIP_DECL(dp83820_read_macaddr),
503 };
504 #else
505 const struct sip_variant SIP_DECL(variant_sis900) = {
506 SIP_DECL(sis900_mii_readreg),
507 SIP_DECL(sis900_mii_writereg),
508 SIP_DECL(sis900_mii_statchg),
509 SIP_DECL(sis900_set_filter),
510 SIP_DECL(sis900_read_macaddr),
511 };
512
513 const struct sip_variant SIP_DECL(variant_dp83815) = {
514 SIP_DECL(dp83815_mii_readreg),
515 SIP_DECL(dp83815_mii_writereg),
516 SIP_DECL(dp83815_mii_statchg),
517 SIP_DECL(dp83815_set_filter),
518 SIP_DECL(dp83815_read_macaddr),
519 };
520 #endif /* DP83820 */
521
522 /*
523 * Devices supported by this driver.
524 */
525 const struct sip_product {
526 pci_vendor_id_t sip_vendor;
527 pci_product_id_t sip_product;
528 const char *sip_name;
529 const struct sip_variant *sip_variant;
530 } SIP_DECL(products)[] = {
531 #if defined(DP83820)
532 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
533 "NatSemi DP83820 Gigabit Ethernet",
534 &SIP_DECL(variant_dp83820) },
535 #else
536 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
537 "SiS 900 10/100 Ethernet",
538 &SIP_DECL(variant_sis900) },
539 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
540 "SiS 7016 10/100 Ethernet",
541 &SIP_DECL(variant_sis900) },
542
543 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
544 "NatSemi DP83815 10/100 Ethernet",
545 &SIP_DECL(variant_dp83815) },
546 #endif /* DP83820 */
547
548 { 0, 0,
549 NULL,
550 NULL },
551 };
552
553 static const struct sip_product *
554 SIP_DECL(lookup)(const struct pci_attach_args *pa)
555 {
556 const struct sip_product *sip;
557
558 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
559 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
560 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
561 return (sip);
562 }
563 return (NULL);
564 }
565
566 #ifdef DP83820
567 /*
568 * I really hate stupid hardware vendors. There's a bit in the EEPROM
569 * which indicates if the card can do 64-bit data transfers. Unfortunately,
570 * several vendors of 32-bit cards fail to clear this bit in the EEPROM,
571 * which means we try to use 64-bit data transfers on those cards if we
572 * happen to be plugged into a 32-bit slot.
573 *
574 * What we do is use this table of cards known to be 64-bit cards. If
575 * you have a 64-bit card who's subsystem ID is not listed in this table,
576 * send the output of "pcictl dump ..." of the device to me so that your
577 * card will use the 64-bit data path when plugged into a 64-bit slot.
578 *
579 * -- Jason R. Thorpe <thorpej (at) netbsd.org>
580 * June 30, 2002
581 */
582 static int
583 SIP_DECL(check_64bit)(const struct pci_attach_args *pa)
584 {
585 static const struct {
586 pci_vendor_id_t c64_vendor;
587 pci_product_id_t c64_product;
588 } card64[] = {
589 /* Asante GigaNIX */
590 { 0x128a, 0x0002 },
591
592 /* Accton EN1407-T, Planex GN-1000TE */
593 { 0x1113, 0x1407 },
594
595 /* Netgear GA-621 */
596 { 0x1385, 0x621a },
597
598 { 0, 0}
599 };
600 pcireg_t subsys;
601 int i;
602
603 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
604
605 for (i = 0; card64[i].c64_vendor != 0; i++) {
606 if (PCI_VENDOR(subsys) == card64[i].c64_vendor &&
607 PCI_PRODUCT(subsys) == card64[i].c64_product)
608 return (1);
609 }
610
611 return (0);
612 }
613 #endif /* DP83820 */
614
615 int
616 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
617 {
618 struct pci_attach_args *pa = aux;
619
620 if (SIP_DECL(lookup)(pa) != NULL)
621 return (1);
622
623 return (0);
624 }
625
626 void
627 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
628 {
629 struct sip_softc *sc = (struct sip_softc *) self;
630 struct pci_attach_args *pa = aux;
631 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
632 pci_chipset_tag_t pc = pa->pa_pc;
633 pci_intr_handle_t ih;
634 const char *intrstr = NULL;
635 bus_space_tag_t iot, memt;
636 bus_space_handle_t ioh, memh;
637 bus_dma_segment_t seg;
638 int ioh_valid, memh_valid;
639 int i, rseg, error;
640 const struct sip_product *sip;
641 pcireg_t pmode;
642 u_int8_t enaddr[ETHER_ADDR_LEN];
643 int pmreg;
644 #ifdef DP83820
645 pcireg_t memtype;
646 u_int32_t reg;
647 #endif /* DP83820 */
648
649 callout_init(&sc->sc_tick_ch);
650
651 sip = SIP_DECL(lookup)(pa);
652 if (sip == NULL) {
653 printf("\n");
654 panic(SIP_STR(attach) ": impossible");
655 }
656 sc->sc_rev = PCI_REVISION(pa->pa_class);
657
658 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev);
659
660 sc->sc_model = sip;
661
662 /*
663 * XXX Work-around broken PXE firmware on some boards.
664 *
665 * The DP83815 shares an address decoder with the MEM BAR
666 * and the ROM BAR. Make sure the ROM BAR is disabled,
667 * so that memory mapped access works.
668 */
669 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM,
670 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) &
671 ~PCI_MAPREG_ROM_ENABLE);
672
673 /*
674 * Map the device.
675 */
676 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
677 PCI_MAPREG_TYPE_IO, 0,
678 &iot, &ioh, NULL, NULL) == 0);
679 #ifdef DP83820
680 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
681 switch (memtype) {
682 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
683 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
684 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
685 memtype, 0, &memt, &memh, NULL, NULL) == 0);
686 break;
687 default:
688 memh_valid = 0;
689 }
690 #else
691 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
692 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
693 &memt, &memh, NULL, NULL) == 0);
694 #endif /* DP83820 */
695
696 if (memh_valid) {
697 sc->sc_st = memt;
698 sc->sc_sh = memh;
699 } else if (ioh_valid) {
700 sc->sc_st = iot;
701 sc->sc_sh = ioh;
702 } else {
703 printf("%s: unable to map device registers\n",
704 sc->sc_dev.dv_xname);
705 return;
706 }
707
708 sc->sc_dmat = pa->pa_dmat;
709
710 /*
711 * Make sure bus mastering is enabled. Also make sure
712 * Write/Invalidate is enabled if we're allowed to use it.
713 */
714 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
715 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY)
716 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE;
717 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
718 pmreg | PCI_COMMAND_MASTER_ENABLE);
719
720 /* Get it out of power save mode if needed. */
721 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
722 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
723 PCI_PMCSR_STATE_MASK;
724 if (pmode == PCI_PMCSR_STATE_D3) {
725 /*
726 * The card has lost all configuration data in
727 * this state, so punt.
728 */
729 printf("%s: unable to wake up from power state D3\n",
730 sc->sc_dev.dv_xname);
731 return;
732 }
733 if (pmode != PCI_PMCSR_STATE_D0) {
734 printf("%s: waking up from power state D%d\n",
735 sc->sc_dev.dv_xname, pmode);
736 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
737 PCI_PMCSR_STATE_D0);
738 }
739 }
740
741 /*
742 * Map and establish our interrupt.
743 */
744 if (pci_intr_map(pa, &ih)) {
745 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
746 return;
747 }
748 intrstr = pci_intr_string(pc, ih);
749 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
750 if (sc->sc_ih == NULL) {
751 printf("%s: unable to establish interrupt",
752 sc->sc_dev.dv_xname);
753 if (intrstr != NULL)
754 printf(" at %s", intrstr);
755 printf("\n");
756 return;
757 }
758 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
759
760 SIMPLEQ_INIT(&sc->sc_txfreeq);
761 SIMPLEQ_INIT(&sc->sc_txdirtyq);
762
763 /*
764 * Allocate the control data structures, and create and load the
765 * DMA map for it.
766 */
767 if ((error = bus_dmamem_alloc(sc->sc_dmat,
768 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
769 0)) != 0) {
770 printf("%s: unable to allocate control data, error = %d\n",
771 sc->sc_dev.dv_xname, error);
772 goto fail_0;
773 }
774
775 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
776 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
777 BUS_DMA_COHERENT)) != 0) {
778 printf("%s: unable to map control data, error = %d\n",
779 sc->sc_dev.dv_xname, error);
780 goto fail_1;
781 }
782
783 if ((error = bus_dmamap_create(sc->sc_dmat,
784 sizeof(struct sip_control_data), 1,
785 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
786 printf("%s: unable to create control data DMA map, "
787 "error = %d\n", sc->sc_dev.dv_xname, error);
788 goto fail_2;
789 }
790
791 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
792 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
793 0)) != 0) {
794 printf("%s: unable to load control data DMA map, error = %d\n",
795 sc->sc_dev.dv_xname, error);
796 goto fail_3;
797 }
798
799 /*
800 * Create the transmit buffer DMA maps.
801 */
802 for (i = 0; i < SIP_TXQUEUELEN; i++) {
803 if ((error = bus_dmamap_create(sc->sc_dmat, TX_DMAMAP_SIZE,
804 SIP_NTXSEGS, MCLBYTES, 0, 0,
805 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
806 printf("%s: unable to create tx DMA map %d, "
807 "error = %d\n", sc->sc_dev.dv_xname, i, error);
808 goto fail_4;
809 }
810 }
811
812 /*
813 * Create the receive buffer DMA maps.
814 */
815 for (i = 0; i < SIP_NRXDESC; i++) {
816 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
817 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
818 printf("%s: unable to create rx DMA map %d, "
819 "error = %d\n", sc->sc_dev.dv_xname, i, error);
820 goto fail_5;
821 }
822 sc->sc_rxsoft[i].rxs_mbuf = NULL;
823 }
824
825 /*
826 * Reset the chip to a known state.
827 */
828 SIP_DECL(reset)(sc);
829
830 /*
831 * Read the Ethernet address from the EEPROM. This might
832 * also fetch other stuff from the EEPROM and stash it
833 * in the softc.
834 */
835 sc->sc_cfg = 0;
836 #if !defined(DP83820)
837 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
838 SIP_SIS900_REV(sc,SIS_REV_900B))
839 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT);
840 #endif
841
842 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
843
844 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
845 ether_sprintf(enaddr));
846
847 /*
848 * Initialize the configuration register: aggressive PCI
849 * bus request algorithm, default backoff, default OW timer,
850 * default parity error detection.
851 *
852 * NOTE: "Big endian mode" is useless on the SiS900 and
853 * friends -- it affects packet data, not descriptors.
854 */
855 #ifdef DP83820
856 /*
857 * Cause the chip to load configuration data from the EEPROM.
858 */
859 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_PTSCR, PTSCR_EELOAD_EN);
860 for (i = 0; i < 10000; i++) {
861 delay(10);
862 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
863 PTSCR_EELOAD_EN) == 0)
864 break;
865 }
866 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
867 PTSCR_EELOAD_EN) {
868 printf("%s: timeout loading configuration from EEPROM\n",
869 sc->sc_dev.dv_xname);
870 return;
871 }
872
873 sc->sc_gpior = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_GPIOR);
874
875 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
876 if (reg & CFG_PCI64_DET) {
877 printf("%s: 64-bit PCI slot detected", sc->sc_dev.dv_xname);
878 /*
879 * Check to see if this card is 64-bit. If so, enable 64-bit
880 * data transfers.
881 *
882 * We can't use the DATA64_EN bit in the EEPROM, because
883 * vendors of 32-bit cards fail to clear that bit in many
884 * cases (yet the card still detects that it's in a 64-bit
885 * slot; go figure).
886 */
887 if (SIP_DECL(check_64bit)(pa)) {
888 sc->sc_cfg |= CFG_DATA64_EN;
889 printf(", using 64-bit data transfers");
890 }
891 printf("\n");
892 }
893
894 /*
895 * XXX Need some PCI flags indicating support for
896 * XXX 64-bit addressing.
897 */
898 #if 0
899 if (reg & CFG_M64ADDR)
900 sc->sc_cfg |= CFG_M64ADDR;
901 if (reg & CFG_T64ADDR)
902 sc->sc_cfg |= CFG_T64ADDR;
903 #endif
904
905 if (reg & (CFG_TBI_EN|CFG_EXT_125)) {
906 const char *sep = "";
907 printf("%s: using ", sc->sc_dev.dv_xname);
908 if (reg & CFG_EXT_125) {
909 sc->sc_cfg |= CFG_EXT_125;
910 printf("%s125MHz clock", sep);
911 sep = ", ";
912 }
913 if (reg & CFG_TBI_EN) {
914 sc->sc_cfg |= CFG_TBI_EN;
915 printf("%sten-bit interface", sep);
916 sep = ", ";
917 }
918 printf("\n");
919 }
920 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0 ||
921 (reg & CFG_MRM_DIS) != 0)
922 sc->sc_cfg |= CFG_MRM_DIS;
923 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0 ||
924 (reg & CFG_MWI_DIS) != 0)
925 sc->sc_cfg |= CFG_MWI_DIS;
926
927 /*
928 * Use the extended descriptor format on the DP83820. This
929 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
930 * checksumming.
931 */
932 sc->sc_cfg |= CFG_EXTSTS_EN;
933 #endif /* DP83820 */
934
935 /*
936 * Initialize our media structures and probe the MII.
937 */
938 sc->sc_mii.mii_ifp = ifp;
939 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
940 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
941 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
942 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, SIP_DECL(mediachange),
943 SIP_DECL(mediastatus));
944
945 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
946 MII_OFFSET_ANY, 0);
947 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
948 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
949 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
950 } else
951 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
952
953 ifp = &sc->sc_ethercom.ec_if;
954 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
955 ifp->if_softc = sc;
956 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
957 ifp->if_ioctl = SIP_DECL(ioctl);
958 ifp->if_start = SIP_DECL(start);
959 ifp->if_watchdog = SIP_DECL(watchdog);
960 ifp->if_init = SIP_DECL(init);
961 ifp->if_stop = SIP_DECL(stop);
962 IFQ_SET_READY(&ifp->if_snd);
963
964 /*
965 * We can support 802.1Q VLAN-sized frames.
966 */
967 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
968
969 #ifdef DP83820
970 /*
971 * And the DP83820 can do VLAN tagging in hardware, and
972 * support the jumbo Ethernet MTU.
973 */
974 sc->sc_ethercom.ec_capabilities |=
975 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
976
977 /*
978 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
979 * in hardware.
980 */
981 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
982 IFCAP_CSUM_UDPv4;
983 #endif /* DP83820 */
984
985 /*
986 * Attach the interface.
987 */
988 if_attach(ifp);
989 ether_ifattach(ifp, enaddr);
990 #if NRND > 0
991 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
992 RND_TYPE_NET, 0);
993 #endif
994
995 /*
996 * The number of bytes that must be available in
997 * the Tx FIFO before the bus master can DMA more
998 * data into the FIFO.
999 */
1000 sc->sc_tx_fill_thresh = 64 / 32;
1001
1002 /*
1003 * Start at a drain threshold of 512 bytes. We will
1004 * increase it if a DMA underrun occurs.
1005 *
1006 * XXX The minimum value of this variable should be
1007 * tuned. We may be able to improve performance
1008 * by starting with a lower value. That, however,
1009 * may trash the first few outgoing packets if the
1010 * PCI bus is saturated.
1011 */
1012 sc->sc_tx_drain_thresh = 1504 / 32;
1013
1014 /*
1015 * Initialize the Rx FIFO drain threshold.
1016 *
1017 * This is in units of 8 bytes.
1018 *
1019 * We should never set this value lower than 2; 14 bytes are
1020 * required to filter the packet.
1021 */
1022 sc->sc_rx_drain_thresh = 128 / 8;
1023
1024 #ifdef SIP_EVENT_COUNTERS
1025 /*
1026 * Attach event counters.
1027 */
1028 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1029 NULL, sc->sc_dev.dv_xname, "txsstall");
1030 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1031 NULL, sc->sc_dev.dv_xname, "txdstall");
1032 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR,
1033 NULL, sc->sc_dev.dv_xname, "txforceintr");
1034 evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR,
1035 NULL, sc->sc_dev.dv_xname, "txdintr");
1036 evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR,
1037 NULL, sc->sc_dev.dv_xname, "txiintr");
1038 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1039 NULL, sc->sc_dev.dv_xname, "rxintr");
1040 evcnt_attach_dynamic(&sc->sc_ev_hiberr, EVCNT_TYPE_INTR,
1041 NULL, sc->sc_dev.dv_xname, "hiberr");
1042 #ifdef DP83820
1043 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1044 NULL, sc->sc_dev.dv_xname, "rxipsum");
1045 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
1046 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
1047 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
1048 NULL, sc->sc_dev.dv_xname, "rxudpsum");
1049 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1050 NULL, sc->sc_dev.dv_xname, "txipsum");
1051 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
1052 NULL, sc->sc_dev.dv_xname, "txtcpsum");
1053 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
1054 NULL, sc->sc_dev.dv_xname, "txudpsum");
1055 #endif /* DP83820 */
1056 #endif /* SIP_EVENT_COUNTERS */
1057
1058 /*
1059 * Make sure the interface is shutdown during reboot.
1060 */
1061 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
1062 if (sc->sc_sdhook == NULL)
1063 printf("%s: WARNING: unable to establish shutdown hook\n",
1064 sc->sc_dev.dv_xname);
1065 return;
1066
1067 /*
1068 * Free any resources we've allocated during the failed attach
1069 * attempt. Do this in reverse order and fall through.
1070 */
1071 fail_5:
1072 for (i = 0; i < SIP_NRXDESC; i++) {
1073 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1074 bus_dmamap_destroy(sc->sc_dmat,
1075 sc->sc_rxsoft[i].rxs_dmamap);
1076 }
1077 fail_4:
1078 for (i = 0; i < SIP_TXQUEUELEN; i++) {
1079 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1080 bus_dmamap_destroy(sc->sc_dmat,
1081 sc->sc_txsoft[i].txs_dmamap);
1082 }
1083 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1084 fail_3:
1085 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1086 fail_2:
1087 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1088 sizeof(struct sip_control_data));
1089 fail_1:
1090 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1091 fail_0:
1092 return;
1093 }
1094
1095 /*
1096 * sip_shutdown:
1097 *
1098 * Make sure the interface is stopped at reboot time.
1099 */
1100 void
1101 SIP_DECL(shutdown)(void *arg)
1102 {
1103 struct sip_softc *sc = arg;
1104
1105 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
1106 }
1107
1108 /*
1109 * sip_start: [ifnet interface function]
1110 *
1111 * Start packet transmission on the interface.
1112 */
1113 void
1114 SIP_DECL(start)(struct ifnet *ifp)
1115 {
1116 struct sip_softc *sc = ifp->if_softc;
1117 struct mbuf *m0, *m;
1118 struct sip_txsoft *txs;
1119 bus_dmamap_t dmamap;
1120 int error, nexttx, lasttx, seg;
1121 int ofree = sc->sc_txfree;
1122 #if 0
1123 int firsttx = sc->sc_txnext;
1124 #endif
1125 #ifdef DP83820
1126 u_int32_t extsts;
1127 #endif
1128
1129 /*
1130 * If we've been told to pause, don't transmit any more packets.
1131 */
1132 if (sc->sc_flags & SIPF_PAUSED)
1133 ifp->if_flags |= IFF_OACTIVE;
1134
1135 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1136 return;
1137
1138 /*
1139 * Loop through the send queue, setting up transmit descriptors
1140 * until we drain the queue, or use up all available transmit
1141 * descriptors.
1142 */
1143 for (;;) {
1144 /* Get a work queue entry. */
1145 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1146 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
1147 break;
1148 }
1149
1150 /*
1151 * Grab a packet off the queue.
1152 */
1153 IFQ_POLL(&ifp->if_snd, m0);
1154 if (m0 == NULL)
1155 break;
1156 #ifndef DP83820
1157 m = NULL;
1158 #endif
1159
1160 dmamap = txs->txs_dmamap;
1161
1162 #ifdef DP83820
1163 /*
1164 * Load the DMA map. If this fails, the packet either
1165 * didn't fit in the allotted number of segments, or we
1166 * were short on resources. For the too-many-segments
1167 * case, we simply report an error and drop the packet,
1168 * since we can't sanely copy a jumbo packet to a single
1169 * buffer.
1170 */
1171 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1172 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1173 if (error) {
1174 if (error == EFBIG) {
1175 printf("%s: Tx packet consumes too many "
1176 "DMA segments, dropping...\n",
1177 sc->sc_dev.dv_xname);
1178 IFQ_DEQUEUE(&ifp->if_snd, m0);
1179 m_freem(m0);
1180 continue;
1181 }
1182 /*
1183 * Short on resources, just stop for now.
1184 */
1185 break;
1186 }
1187 #else /* DP83820 */
1188 /*
1189 * Load the DMA map. If this fails, the packet either
1190 * didn't fit in the alloted number of segments, or we
1191 * were short on resources. In this case, we'll copy
1192 * and try again.
1193 */
1194 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1195 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1196 MGETHDR(m, M_DONTWAIT, MT_DATA);
1197 if (m == NULL) {
1198 printf("%s: unable to allocate Tx mbuf\n",
1199 sc->sc_dev.dv_xname);
1200 break;
1201 }
1202 if (m0->m_pkthdr.len > MHLEN) {
1203 MCLGET(m, M_DONTWAIT);
1204 if ((m->m_flags & M_EXT) == 0) {
1205 printf("%s: unable to allocate Tx "
1206 "cluster\n", sc->sc_dev.dv_xname);
1207 m_freem(m);
1208 break;
1209 }
1210 }
1211 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1212 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1213 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1214 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1215 if (error) {
1216 printf("%s: unable to load Tx buffer, "
1217 "error = %d\n", sc->sc_dev.dv_xname, error);
1218 break;
1219 }
1220 }
1221 #endif /* DP83820 */
1222
1223 /*
1224 * Ensure we have enough descriptors free to describe
1225 * the packet. Note, we always reserve one descriptor
1226 * at the end of the ring as a termination point, to
1227 * prevent wrap-around.
1228 */
1229 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1230 /*
1231 * Not enough free descriptors to transmit this
1232 * packet. We haven't committed anything yet,
1233 * so just unload the DMA map, put the packet
1234 * back on the queue, and punt. Notify the upper
1235 * layer that there are not more slots left.
1236 *
1237 * XXX We could allocate an mbuf and copy, but
1238 * XXX is it worth it?
1239 */
1240 ifp->if_flags |= IFF_OACTIVE;
1241 bus_dmamap_unload(sc->sc_dmat, dmamap);
1242 #ifndef DP83820
1243 if (m != NULL)
1244 m_freem(m);
1245 #endif
1246 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1247 break;
1248 }
1249
1250 IFQ_DEQUEUE(&ifp->if_snd, m0);
1251 #ifndef DP83820
1252 if (m != NULL) {
1253 m_freem(m0);
1254 m0 = m;
1255 }
1256 #endif
1257
1258 /*
1259 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1260 */
1261
1262 /* Sync the DMA map. */
1263 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1264 BUS_DMASYNC_PREWRITE);
1265
1266 /*
1267 * Initialize the transmit descriptors.
1268 */
1269 for (nexttx = lasttx = sc->sc_txnext, seg = 0;
1270 seg < dmamap->dm_nsegs;
1271 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1272 /*
1273 * If this is the first descriptor we're
1274 * enqueueing, don't set the OWN bit just
1275 * yet. That could cause a race condition.
1276 * We'll do it below.
1277 */
1278 sc->sc_txdescs[nexttx].sipd_bufptr =
1279 htole32(dmamap->dm_segs[seg].ds_addr);
1280 sc->sc_txdescs[nexttx].sipd_cmdsts =
1281 htole32((nexttx == sc->sc_txnext ? 0 : CMDSTS_OWN) |
1282 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1283 #ifdef DP83820
1284 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1285 #endif /* DP83820 */
1286 lasttx = nexttx;
1287 }
1288
1289 /* Clear the MORE bit on the last segment. */
1290 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1291
1292 /*
1293 * If we're in the interrupt delay window, delay the
1294 * interrupt.
1295 */
1296 if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) {
1297 SIP_EVCNT_INCR(&sc->sc_ev_txforceintr);
1298 sc->sc_txdescs[lasttx].sipd_cmdsts |=
1299 htole32(CMDSTS_INTR);
1300 sc->sc_txwin = 0;
1301 }
1302
1303 #ifdef DP83820
1304 /*
1305 * If VLANs are enabled and the packet has a VLAN tag, set
1306 * up the descriptor to encapsulate the packet for us.
1307 *
1308 * This apparently has to be on the last descriptor of
1309 * the packet.
1310 */
1311 if (sc->sc_ethercom.ec_nvlans != 0 &&
1312 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1313 sc->sc_txdescs[lasttx].sipd_extsts |=
1314 htole32(EXTSTS_VPKT |
1315 htons(*mtod(m, int *) & EXTSTS_VTCI));
1316 }
1317
1318 /*
1319 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1320 * checksumming, set up the descriptor to do this work
1321 * for us.
1322 *
1323 * This apparently has to be on the first descriptor of
1324 * the packet.
1325 *
1326 * Byte-swap constants so the compiler can optimize.
1327 */
1328 extsts = 0;
1329 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1330 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1331 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1332 extsts |= htole32(EXTSTS_IPPKT);
1333 }
1334 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1335 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1336 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1337 extsts |= htole32(EXTSTS_TCPPKT);
1338 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1339 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1340 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1341 extsts |= htole32(EXTSTS_UDPPKT);
1342 }
1343 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1344 #endif /* DP83820 */
1345
1346 /* Sync the descriptors we're using. */
1347 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1348 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1349
1350 /*
1351 * The entire packet is set up. Give the first descrptor
1352 * to the chip now.
1353 */
1354 sc->sc_txdescs[sc->sc_txnext].sipd_cmdsts |=
1355 htole32(CMDSTS_OWN);
1356 SIP_CDTXSYNC(sc, sc->sc_txnext, 1,
1357 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1358
1359 /*
1360 * Store a pointer to the packet so we can free it later,
1361 * and remember what txdirty will be once the packet is
1362 * done.
1363 */
1364 txs->txs_mbuf = m0;
1365 txs->txs_firstdesc = sc->sc_txnext;
1366 txs->txs_lastdesc = lasttx;
1367
1368 /* Advance the tx pointer. */
1369 sc->sc_txfree -= dmamap->dm_nsegs;
1370 sc->sc_txnext = nexttx;
1371
1372 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1373 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1374
1375 #if NBPFILTER > 0
1376 /*
1377 * Pass the packet to any BPF listeners.
1378 */
1379 if (ifp->if_bpf)
1380 bpf_mtap(ifp->if_bpf, m0);
1381 #endif /* NBPFILTER > 0 */
1382 }
1383
1384 if (txs == NULL || sc->sc_txfree == 0) {
1385 /* No more slots left; notify upper layer. */
1386 ifp->if_flags |= IFF_OACTIVE;
1387 }
1388
1389 if (sc->sc_txfree != ofree) {
1390 /*
1391 * Start the transmit process. Note, the manual says
1392 * that if there are no pending transmissions in the
1393 * chip's internal queue (indicated by TXE being clear),
1394 * then the driver software must set the TXDP to the
1395 * first descriptor to be transmitted. However, if we
1396 * do this, it causes serious performance degredation on
1397 * the DP83820 under load, not setting TXDP doesn't seem
1398 * to adversely affect the SiS 900 or DP83815.
1399 *
1400 * Well, I guess it wouldn't be the first time a manual
1401 * has lied -- and they could be speaking of the NULL-
1402 * terminated descriptor list case, rather than OWN-
1403 * terminated rings.
1404 */
1405 #if 0
1406 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1407 CR_TXE) == 0) {
1408 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1409 SIP_CDTXADDR(sc, firsttx));
1410 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1411 }
1412 #else
1413 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1414 #endif
1415
1416 /* Set a watchdog timer in case the chip flakes out. */
1417 ifp->if_timer = 5;
1418 }
1419 }
1420
1421 /*
1422 * sip_watchdog: [ifnet interface function]
1423 *
1424 * Watchdog timer handler.
1425 */
1426 void
1427 SIP_DECL(watchdog)(struct ifnet *ifp)
1428 {
1429 struct sip_softc *sc = ifp->if_softc;
1430
1431 /*
1432 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1433 * If we get a timeout, try and sweep up transmit descriptors.
1434 * If we manage to sweep them all up, ignore the lack of
1435 * interrupt.
1436 */
1437 SIP_DECL(txintr)(sc);
1438
1439 if (sc->sc_txfree != SIP_NTXDESC) {
1440 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1441 ifp->if_oerrors++;
1442
1443 /* Reset the interface. */
1444 (void) SIP_DECL(init)(ifp);
1445 } else if (ifp->if_flags & IFF_DEBUG)
1446 printf("%s: recovered from device timeout\n",
1447 sc->sc_dev.dv_xname);
1448
1449 /* Try to get more packets going. */
1450 SIP_DECL(start)(ifp);
1451 }
1452
1453 /*
1454 * sip_ioctl: [ifnet interface function]
1455 *
1456 * Handle control requests from the operator.
1457 */
1458 int
1459 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1460 {
1461 struct sip_softc *sc = ifp->if_softc;
1462 struct ifreq *ifr = (struct ifreq *)data;
1463 int s, error;
1464
1465 s = splnet();
1466
1467 switch (cmd) {
1468 case SIOCSIFMEDIA:
1469 case SIOCGIFMEDIA:
1470 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1471 break;
1472
1473 default:
1474 error = ether_ioctl(ifp, cmd, data);
1475 if (error == ENETRESET) {
1476 /*
1477 * Multicast list has changed; set the hardware filter
1478 * accordingly.
1479 */
1480 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1481 error = 0;
1482 }
1483 break;
1484 }
1485
1486 /* Try to get more packets going. */
1487 SIP_DECL(start)(ifp);
1488
1489 splx(s);
1490 return (error);
1491 }
1492
1493 /*
1494 * sip_intr:
1495 *
1496 * Interrupt service routine.
1497 */
1498 int
1499 SIP_DECL(intr)(void *arg)
1500 {
1501 struct sip_softc *sc = arg;
1502 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1503 u_int32_t isr;
1504 int handled = 0;
1505
1506 for (;;) {
1507 /* Reading clears interrupt. */
1508 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1509 if ((isr & sc->sc_imr) == 0)
1510 break;
1511
1512 #if NRND > 0
1513 if (RND_ENABLED(&sc->rnd_source))
1514 rnd_add_uint32(&sc->rnd_source, isr);
1515 #endif
1516
1517 handled = 1;
1518
1519 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1520 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1521
1522 /* Grab any new packets. */
1523 SIP_DECL(rxintr)(sc);
1524
1525 if (isr & ISR_RXORN) {
1526 printf("%s: receive FIFO overrun\n",
1527 sc->sc_dev.dv_xname);
1528
1529 /* XXX adjust rx_drain_thresh? */
1530 }
1531
1532 if (isr & ISR_RXIDLE) {
1533 printf("%s: receive ring overrun\n",
1534 sc->sc_dev.dv_xname);
1535
1536 /* Get the receive process going again. */
1537 bus_space_write_4(sc->sc_st, sc->sc_sh,
1538 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1539 bus_space_write_4(sc->sc_st, sc->sc_sh,
1540 SIP_CR, CR_RXE);
1541 }
1542 }
1543
1544 if (isr & (ISR_TXURN|ISR_TXDESC|ISR_TXIDLE)) {
1545 #ifdef SIP_EVENT_COUNTERS
1546 if (isr & ISR_TXDESC)
1547 SIP_EVCNT_INCR(&sc->sc_ev_txdintr);
1548 else if (isr & ISR_TXIDLE)
1549 SIP_EVCNT_INCR(&sc->sc_ev_txiintr);
1550 #endif
1551
1552 /* Sweep up transmit descriptors. */
1553 SIP_DECL(txintr)(sc);
1554
1555 if (isr & ISR_TXURN) {
1556 u_int32_t thresh;
1557
1558 printf("%s: transmit FIFO underrun",
1559 sc->sc_dev.dv_xname);
1560
1561 thresh = sc->sc_tx_drain_thresh + 1;
1562 if (thresh <= TXCFG_DRTH &&
1563 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1564 (sc->sc_tx_fill_thresh * 32))) {
1565 printf("; increasing Tx drain "
1566 "threshold to %u bytes\n",
1567 thresh * 32);
1568 sc->sc_tx_drain_thresh = thresh;
1569 (void) SIP_DECL(init)(ifp);
1570 } else {
1571 (void) SIP_DECL(init)(ifp);
1572 printf("\n");
1573 }
1574 }
1575 }
1576
1577 #if !defined(DP83820)
1578 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1579 if (isr & ISR_PAUSE_ST) {
1580 sc->sc_flags |= SIPF_PAUSED;
1581 ifp->if_flags |= IFF_OACTIVE;
1582 }
1583 if (isr & ISR_PAUSE_END) {
1584 sc->sc_flags &= ~SIPF_PAUSED;
1585 ifp->if_flags &= ~IFF_OACTIVE;
1586 }
1587 }
1588 #endif /* ! DP83820 */
1589
1590 if (isr & ISR_HIBERR) {
1591 int want_init = 0;
1592
1593 SIP_EVCNT_INCR(&sc->sc_ev_hiberr);
1594
1595 #define PRINTERR(bit, str) \
1596 do { \
1597 if ((isr & (bit)) != 0) { \
1598 if ((ifp->if_flags & IFF_DEBUG) != 0) \
1599 printf("%s: %s\n", \
1600 sc->sc_dev.dv_xname, str); \
1601 want_init = 1; \
1602 } \
1603 } while (/*CONSTCOND*/0)
1604
1605 PRINTERR(ISR_DPERR, "parity error");
1606 PRINTERR(ISR_SSERR, "system error");
1607 PRINTERR(ISR_RMABT, "master abort");
1608 PRINTERR(ISR_RTABT, "target abort");
1609 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1610 /*
1611 * Ignore:
1612 * Tx reset complete
1613 * Rx reset complete
1614 */
1615 if (want_init)
1616 (void) SIP_DECL(init)(ifp);
1617 #undef PRINTERR
1618 }
1619 }
1620
1621 /* Try to get more packets going. */
1622 SIP_DECL(start)(ifp);
1623
1624 return (handled);
1625 }
1626
1627 /*
1628 * sip_txintr:
1629 *
1630 * Helper; handle transmit interrupts.
1631 */
1632 void
1633 SIP_DECL(txintr)(struct sip_softc *sc)
1634 {
1635 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1636 struct sip_txsoft *txs;
1637 u_int32_t cmdsts;
1638
1639 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1640 ifp->if_flags &= ~IFF_OACTIVE;
1641
1642 /*
1643 * Go through our Tx list and free mbufs for those
1644 * frames which have been transmitted.
1645 */
1646 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1647 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1648 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1649
1650 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1651 if (cmdsts & CMDSTS_OWN)
1652 break;
1653
1654 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1655
1656 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1657
1658 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1659 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1660 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1661 m_freem(txs->txs_mbuf);
1662 txs->txs_mbuf = NULL;
1663
1664 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1665
1666 /*
1667 * Check for errors and collisions.
1668 */
1669 if (cmdsts &
1670 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1671 ifp->if_oerrors++;
1672 if (cmdsts & CMDSTS_Tx_EC)
1673 ifp->if_collisions += 16;
1674 if (ifp->if_flags & IFF_DEBUG) {
1675 if (cmdsts & CMDSTS_Tx_ED)
1676 printf("%s: excessive deferral\n",
1677 sc->sc_dev.dv_xname);
1678 if (cmdsts & CMDSTS_Tx_EC)
1679 printf("%s: excessive collisions\n",
1680 sc->sc_dev.dv_xname);
1681 }
1682 } else {
1683 /* Packet was transmitted successfully. */
1684 ifp->if_opackets++;
1685 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1686 }
1687 }
1688
1689 /*
1690 * If there are no more pending transmissions, cancel the watchdog
1691 * timer.
1692 */
1693 if (txs == NULL) {
1694 ifp->if_timer = 0;
1695 sc->sc_txwin = 0;
1696 }
1697 }
1698
1699 #if defined(DP83820)
1700 /*
1701 * sip_rxintr:
1702 *
1703 * Helper; handle receive interrupts.
1704 */
1705 void
1706 SIP_DECL(rxintr)(struct sip_softc *sc)
1707 {
1708 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1709 struct sip_rxsoft *rxs;
1710 struct mbuf *m, *tailm;
1711 u_int32_t cmdsts, extsts;
1712 int i, len;
1713
1714 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1715 rxs = &sc->sc_rxsoft[i];
1716
1717 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1718
1719 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1720 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1721
1722 /*
1723 * NOTE: OWN is set if owned by _consumer_. We're the
1724 * consumer of the receive ring, so if the bit is clear,
1725 * we have processed all of the packets.
1726 */
1727 if ((cmdsts & CMDSTS_OWN) == 0) {
1728 /*
1729 * We have processed all of the receive buffers.
1730 */
1731 break;
1732 }
1733
1734 if (__predict_false(sc->sc_rxdiscard)) {
1735 SIP_INIT_RXDESC(sc, i);
1736 if ((cmdsts & CMDSTS_MORE) == 0) {
1737 /* Reset our state. */
1738 sc->sc_rxdiscard = 0;
1739 }
1740 continue;
1741 }
1742
1743 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1744 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1745
1746 m = rxs->rxs_mbuf;
1747
1748 /*
1749 * Add a new receive buffer to the ring.
1750 */
1751 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1752 /*
1753 * Failed, throw away what we've done so
1754 * far, and discard the rest of the packet.
1755 */
1756 ifp->if_ierrors++;
1757 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1758 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1759 SIP_INIT_RXDESC(sc, i);
1760 if (cmdsts & CMDSTS_MORE)
1761 sc->sc_rxdiscard = 1;
1762 if (sc->sc_rxhead != NULL)
1763 m_freem(sc->sc_rxhead);
1764 SIP_RXCHAIN_RESET(sc);
1765 continue;
1766 }
1767
1768 SIP_RXCHAIN_LINK(sc, m);
1769
1770 /*
1771 * If this is not the end of the packet, keep
1772 * looking.
1773 */
1774 if (cmdsts & CMDSTS_MORE) {
1775 sc->sc_rxlen += m->m_len;
1776 continue;
1777 }
1778
1779 /*
1780 * Okay, we have the entire packet now...
1781 */
1782 *sc->sc_rxtailp = NULL;
1783 m = sc->sc_rxhead;
1784 tailm = sc->sc_rxtail;
1785
1786 SIP_RXCHAIN_RESET(sc);
1787
1788 /*
1789 * If an error occurred, update stats and drop the packet.
1790 */
1791 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1792 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1793 ifp->if_ierrors++;
1794 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1795 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1796 /* Receive overrun handled elsewhere. */
1797 printf("%s: receive descriptor error\n",
1798 sc->sc_dev.dv_xname);
1799 }
1800 #define PRINTERR(bit, str) \
1801 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
1802 (cmdsts & (bit)) != 0) \
1803 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1804 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1805 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1806 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1807 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1808 #undef PRINTERR
1809 m_freem(m);
1810 continue;
1811 }
1812
1813 /*
1814 * No errors.
1815 *
1816 * Note, the DP83820 includes the CRC with
1817 * every packet.
1818 */
1819 len = CMDSTS_SIZE(cmdsts);
1820 tailm->m_len = len - sc->sc_rxlen;
1821
1822 /*
1823 * If the packet is small enough to fit in a
1824 * single header mbuf, allocate one and copy
1825 * the data into it. This greatly reduces
1826 * memory consumption when we receive lots
1827 * of small packets.
1828 */
1829 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1830 struct mbuf *nm;
1831 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1832 if (nm == NULL) {
1833 ifp->if_ierrors++;
1834 m_freem(m);
1835 continue;
1836 }
1837 nm->m_data += 2;
1838 nm->m_pkthdr.len = nm->m_len = len;
1839 m_copydata(m, 0, len, mtod(nm, caddr_t));
1840 m_freem(m);
1841 m = nm;
1842 }
1843 #ifndef __NO_STRICT_ALIGNMENT
1844 else {
1845 /*
1846 * The DP83820's receive buffers must be 4-byte
1847 * aligned. But this means that the data after
1848 * the Ethernet header is misaligned. To compensate,
1849 * we have artificially shortened the buffer size
1850 * in the descriptor, and we do an overlapping copy
1851 * of the data two bytes further in (in the first
1852 * buffer of the chain only).
1853 */
1854 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1855 m->m_len);
1856 m->m_data += 2;
1857 }
1858 #endif /* ! __NO_STRICT_ALIGNMENT */
1859
1860 /*
1861 * If VLANs are enabled, VLAN packets have been unwrapped
1862 * for us. Associate the tag with the packet.
1863 */
1864 if (sc->sc_ethercom.ec_nvlans != 0 &&
1865 (extsts & EXTSTS_VPKT) != 0) {
1866 struct mbuf *vtag;
1867
1868 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1869 if (vtag == NULL) {
1870 ifp->if_ierrors++;
1871 printf("%s: unable to allocate VLAN tag\n",
1872 sc->sc_dev.dv_xname);
1873 m_freem(m);
1874 continue;
1875 }
1876
1877 *mtod(vtag, int *) = ntohs(extsts & EXTSTS_VTCI);
1878 vtag->m_len = sizeof(int);
1879 }
1880
1881 /*
1882 * Set the incoming checksum information for the
1883 * packet.
1884 */
1885 if ((extsts & EXTSTS_IPPKT) != 0) {
1886 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1887 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1888 if (extsts & EXTSTS_Rx_IPERR)
1889 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1890 if (extsts & EXTSTS_TCPPKT) {
1891 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1892 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1893 if (extsts & EXTSTS_Rx_TCPERR)
1894 m->m_pkthdr.csum_flags |=
1895 M_CSUM_TCP_UDP_BAD;
1896 } else if (extsts & EXTSTS_UDPPKT) {
1897 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1898 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1899 if (extsts & EXTSTS_Rx_UDPERR)
1900 m->m_pkthdr.csum_flags |=
1901 M_CSUM_TCP_UDP_BAD;
1902 }
1903 }
1904
1905 ifp->if_ipackets++;
1906 m->m_flags |= M_HASFCS;
1907 m->m_pkthdr.rcvif = ifp;
1908 m->m_pkthdr.len = len;
1909
1910 #if NBPFILTER > 0
1911 /*
1912 * Pass this up to any BPF listeners, but only
1913 * pass if up the stack if it's for us.
1914 */
1915 if (ifp->if_bpf)
1916 bpf_mtap(ifp->if_bpf, m);
1917 #endif /* NBPFILTER > 0 */
1918
1919 /* Pass it on. */
1920 (*ifp->if_input)(ifp, m);
1921 }
1922
1923 /* Update the receive pointer. */
1924 sc->sc_rxptr = i;
1925 }
1926 #else /* ! DP83820 */
1927 /*
1928 * sip_rxintr:
1929 *
1930 * Helper; handle receive interrupts.
1931 */
1932 void
1933 SIP_DECL(rxintr)(struct sip_softc *sc)
1934 {
1935 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1936 struct sip_rxsoft *rxs;
1937 struct mbuf *m;
1938 u_int32_t cmdsts;
1939 int i, len;
1940
1941 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1942 rxs = &sc->sc_rxsoft[i];
1943
1944 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1945
1946 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1947
1948 /*
1949 * NOTE: OWN is set if owned by _consumer_. We're the
1950 * consumer of the receive ring, so if the bit is clear,
1951 * we have processed all of the packets.
1952 */
1953 if ((cmdsts & CMDSTS_OWN) == 0) {
1954 /*
1955 * We have processed all of the receive buffers.
1956 */
1957 break;
1958 }
1959
1960 /*
1961 * If any collisions were seen on the wire, count one.
1962 */
1963 if (cmdsts & CMDSTS_Rx_COL)
1964 ifp->if_collisions++;
1965
1966 /*
1967 * If an error occurred, update stats, clear the status
1968 * word, and leave the packet buffer in place. It will
1969 * simply be reused the next time the ring comes around.
1970 */
1971 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1972 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1973 ifp->if_ierrors++;
1974 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1975 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1976 /* Receive overrun handled elsewhere. */
1977 printf("%s: receive descriptor error\n",
1978 sc->sc_dev.dv_xname);
1979 }
1980 #define PRINTERR(bit, str) \
1981 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
1982 (cmdsts & (bit)) != 0) \
1983 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1984 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1985 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1986 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1987 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1988 #undef PRINTERR
1989 SIP_INIT_RXDESC(sc, i);
1990 continue;
1991 }
1992
1993 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1994 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1995
1996 /*
1997 * No errors; receive the packet. Note, the SiS 900
1998 * includes the CRC with every packet.
1999 */
2000 len = CMDSTS_SIZE(cmdsts);
2001
2002 #ifdef __NO_STRICT_ALIGNMENT
2003 /*
2004 * If the packet is small enough to fit in a
2005 * single header mbuf, allocate one and copy
2006 * the data into it. This greatly reduces
2007 * memory consumption when we receive lots
2008 * of small packets.
2009 *
2010 * Otherwise, we add a new buffer to the receive
2011 * chain. If this fails, we drop the packet and
2012 * recycle the old buffer.
2013 */
2014 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
2015 MGETHDR(m, M_DONTWAIT, MT_DATA);
2016 if (m == NULL)
2017 goto dropit;
2018 memcpy(mtod(m, caddr_t),
2019 mtod(rxs->rxs_mbuf, caddr_t), len);
2020 SIP_INIT_RXDESC(sc, i);
2021 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2022 rxs->rxs_dmamap->dm_mapsize,
2023 BUS_DMASYNC_PREREAD);
2024 } else {
2025 m = rxs->rxs_mbuf;
2026 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
2027 dropit:
2028 ifp->if_ierrors++;
2029 SIP_INIT_RXDESC(sc, i);
2030 bus_dmamap_sync(sc->sc_dmat,
2031 rxs->rxs_dmamap, 0,
2032 rxs->rxs_dmamap->dm_mapsize,
2033 BUS_DMASYNC_PREREAD);
2034 continue;
2035 }
2036 }
2037 #else
2038 /*
2039 * The SiS 900's receive buffers must be 4-byte aligned.
2040 * But this means that the data after the Ethernet header
2041 * is misaligned. We must allocate a new buffer and
2042 * copy the data, shifted forward 2 bytes.
2043 */
2044 MGETHDR(m, M_DONTWAIT, MT_DATA);
2045 if (m == NULL) {
2046 dropit:
2047 ifp->if_ierrors++;
2048 SIP_INIT_RXDESC(sc, i);
2049 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2050 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2051 continue;
2052 }
2053 if (len > (MHLEN - 2)) {
2054 MCLGET(m, M_DONTWAIT);
2055 if ((m->m_flags & M_EXT) == 0) {
2056 m_freem(m);
2057 goto dropit;
2058 }
2059 }
2060 m->m_data += 2;
2061
2062 /*
2063 * Note that we use clusters for incoming frames, so the
2064 * buffer is virtually contiguous.
2065 */
2066 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
2067
2068 /* Allow the receive descriptor to continue using its mbuf. */
2069 SIP_INIT_RXDESC(sc, i);
2070 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2071 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2072 #endif /* __NO_STRICT_ALIGNMENT */
2073
2074 ifp->if_ipackets++;
2075 m->m_flags |= M_HASFCS;
2076 m->m_pkthdr.rcvif = ifp;
2077 m->m_pkthdr.len = m->m_len = len;
2078
2079 #if NBPFILTER > 0
2080 /*
2081 * Pass this up to any BPF listeners, but only
2082 * pass if up the stack if it's for us.
2083 */
2084 if (ifp->if_bpf)
2085 bpf_mtap(ifp->if_bpf, m);
2086 #endif /* NBPFILTER > 0 */
2087
2088 /* Pass it on. */
2089 (*ifp->if_input)(ifp, m);
2090 }
2091
2092 /* Update the receive pointer. */
2093 sc->sc_rxptr = i;
2094 }
2095 #endif /* DP83820 */
2096
2097 /*
2098 * sip_tick:
2099 *
2100 * One second timer, used to tick the MII.
2101 */
2102 void
2103 SIP_DECL(tick)(void *arg)
2104 {
2105 struct sip_softc *sc = arg;
2106 int s;
2107
2108 s = splnet();
2109 mii_tick(&sc->sc_mii);
2110 splx(s);
2111
2112 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2113 }
2114
2115 /*
2116 * sip_reset:
2117 *
2118 * Perform a soft reset on the SiS 900.
2119 */
2120 void
2121 SIP_DECL(reset)(struct sip_softc *sc)
2122 {
2123 bus_space_tag_t st = sc->sc_st;
2124 bus_space_handle_t sh = sc->sc_sh;
2125 int i;
2126
2127 bus_space_write_4(st, sh, SIP_IER, 0);
2128 bus_space_write_4(st, sh, SIP_IMR, 0);
2129 bus_space_write_4(st, sh, SIP_RFCR, 0);
2130 bus_space_write_4(st, sh, SIP_CR, CR_RST);
2131
2132 for (i = 0; i < SIP_TIMEOUT; i++) {
2133 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
2134 break;
2135 delay(2);
2136 }
2137
2138 if (i == SIP_TIMEOUT)
2139 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
2140
2141 delay(1000);
2142
2143 #ifdef DP83820
2144 /*
2145 * Set the general purpose I/O bits. Do it here in case we
2146 * need to have GPIO set up to talk to the media interface.
2147 */
2148 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
2149 delay(1000);
2150 #endif /* DP83820 */
2151 }
2152
2153 /*
2154 * sip_init: [ ifnet interface function ]
2155 *
2156 * Initialize the interface. Must be called at splnet().
2157 */
2158 int
2159 SIP_DECL(init)(struct ifnet *ifp)
2160 {
2161 struct sip_softc *sc = ifp->if_softc;
2162 bus_space_tag_t st = sc->sc_st;
2163 bus_space_handle_t sh = sc->sc_sh;
2164 struct sip_txsoft *txs;
2165 struct sip_rxsoft *rxs;
2166 struct sip_desc *sipd;
2167 u_int32_t reg;
2168 int i, error = 0;
2169
2170 /*
2171 * Cancel any pending I/O.
2172 */
2173 SIP_DECL(stop)(ifp, 0);
2174
2175 /*
2176 * Reset the chip to a known state.
2177 */
2178 SIP_DECL(reset)(sc);
2179
2180 #if !defined(DP83820)
2181 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) {
2182 /*
2183 * DP83815 manual, page 78:
2184 * 4.4 Recommended Registers Configuration
2185 * For optimum performance of the DP83815, version noted
2186 * as DP83815CVNG (SRR = 203h), the listed register
2187 * modifications must be followed in sequence...
2188 *
2189 * It's not clear if this should be 302h or 203h because that
2190 * chip name is listed as SRR 302h in the description of the
2191 * SRR register. However, my revision 302h DP83815 on the
2192 * Netgear FA311 purchased in 02/2001 needs these settings
2193 * to avoid tons of errors in AcceptPerfectMatch (non-
2194 * IFF_PROMISC) mode. I do not know if other revisions need
2195 * this set or not. [briggs -- 09 March 2001]
2196 *
2197 * Note that only the low-order 12 bits of 0xe4 are documented
2198 * and that this sets reserved bits in that register.
2199 */
2200 reg = bus_space_read_4(st, sh, SIP_NS_SRR);
2201 if (reg == 0x302) {
2202 bus_space_write_4(st, sh, 0x00cc, 0x0001);
2203 bus_space_write_4(st, sh, 0x00e4, 0x189C);
2204 bus_space_write_4(st, sh, 0x00fc, 0x0000);
2205 bus_space_write_4(st, sh, 0x00f4, 0x5040);
2206 bus_space_write_4(st, sh, 0x00f8, 0x008c);
2207 }
2208 }
2209 #endif /* ! DP83820 */
2210
2211 /*
2212 * Initialize the transmit descriptor ring.
2213 */
2214 for (i = 0; i < SIP_NTXDESC; i++) {
2215 sipd = &sc->sc_txdescs[i];
2216 memset(sipd, 0, sizeof(struct sip_desc));
2217 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2218 }
2219 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2220 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2221 sc->sc_txfree = SIP_NTXDESC;
2222 sc->sc_txnext = 0;
2223 sc->sc_txwin = 0;
2224
2225 /*
2226 * Initialize the transmit job descriptors.
2227 */
2228 SIMPLEQ_INIT(&sc->sc_txfreeq);
2229 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2230 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2231 txs = &sc->sc_txsoft[i];
2232 txs->txs_mbuf = NULL;
2233 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2234 }
2235
2236 /*
2237 * Initialize the receive descriptor and receive job
2238 * descriptor rings.
2239 */
2240 for (i = 0; i < SIP_NRXDESC; i++) {
2241 rxs = &sc->sc_rxsoft[i];
2242 if (rxs->rxs_mbuf == NULL) {
2243 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2244 printf("%s: unable to allocate or map rx "
2245 "buffer %d, error = %d\n",
2246 sc->sc_dev.dv_xname, i, error);
2247 /*
2248 * XXX Should attempt to run with fewer receive
2249 * XXX buffers instead of just failing.
2250 */
2251 SIP_DECL(rxdrain)(sc);
2252 goto out;
2253 }
2254 } else
2255 SIP_INIT_RXDESC(sc, i);
2256 }
2257 sc->sc_rxptr = 0;
2258 #ifdef DP83820
2259 sc->sc_rxdiscard = 0;
2260 SIP_RXCHAIN_RESET(sc);
2261 #endif /* DP83820 */
2262
2263 /*
2264 * Set the configuration register; it's already initialized
2265 * in sip_attach().
2266 */
2267 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2268
2269 /*
2270 * Initialize the prototype TXCFG register.
2271 */
2272 #if defined(DP83820)
2273 sc->sc_txcfg = TXCFG_MXDMA_512;
2274 sc->sc_rxcfg = RXCFG_MXDMA_512;
2275 #else
2276 if ((SIP_SIS900_REV(sc, SIS_REV_635) ||
2277 SIP_SIS900_REV(sc, SIS_REV_900B)) &&
2278 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & CFG_EDBMASTEN)) {
2279 sc->sc_txcfg = TXCFG_MXDMA_64;
2280 sc->sc_rxcfg = RXCFG_MXDMA_64;
2281 } else {
2282 sc->sc_txcfg = TXCFG_MXDMA_512;
2283 sc->sc_rxcfg = RXCFG_MXDMA_512;
2284 }
2285 #endif /* DP83820 */
2286
2287 sc->sc_txcfg |= TXCFG_ATP |
2288 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2289 sc->sc_tx_drain_thresh;
2290 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2291
2292 /*
2293 * Initialize the receive drain threshold if we have never
2294 * done so.
2295 */
2296 if (sc->sc_rx_drain_thresh == 0) {
2297 /*
2298 * XXX This value should be tuned. This is set to the
2299 * maximum of 248 bytes, and we may be able to improve
2300 * performance by decreasing it (although we should never
2301 * set this value lower than 2; 14 bytes are required to
2302 * filter the packet).
2303 */
2304 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2305 }
2306
2307 /*
2308 * Initialize the prototype RXCFG register.
2309 */
2310 sc->sc_rxcfg |= (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2311 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2312
2313 #ifdef DP83820
2314 /*
2315 * Initialize the VLAN/IP receive control register.
2316 * We enable checksum computation on all incoming
2317 * packets, and do not reject packets w/ bad checksums.
2318 */
2319 reg = 0;
2320 if (ifp->if_capenable &
2321 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2322 reg |= VRCR_IPEN;
2323 if (sc->sc_ethercom.ec_nvlans != 0)
2324 reg |= VRCR_VTDEN|VRCR_VTREN;
2325 bus_space_write_4(st, sh, SIP_VRCR, reg);
2326
2327 /*
2328 * Initialize the VLAN/IP transmit control register.
2329 * We enable outgoing checksum computation on a
2330 * per-packet basis.
2331 */
2332 reg = 0;
2333 if (ifp->if_capenable &
2334 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2335 reg |= VTCR_PPCHK;
2336 if (sc->sc_ethercom.ec_nvlans != 0)
2337 reg |= VTCR_VPPTI;
2338 bus_space_write_4(st, sh, SIP_VTCR, reg);
2339
2340 /*
2341 * If we're using VLANs, initialize the VLAN data register.
2342 * To understand why we bswap the VLAN Ethertype, see section
2343 * 4.2.36 of the DP83820 manual.
2344 */
2345 if (sc->sc_ethercom.ec_nvlans != 0)
2346 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2347 #endif /* DP83820 */
2348
2349 /*
2350 * Give the transmit and receive rings to the chip.
2351 */
2352 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2353 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2354
2355 /*
2356 * Initialize the interrupt mask.
2357 */
2358 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2359 ISR_TXURN|ISR_TXDESC|ISR_TXIDLE|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2360 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2361
2362 /* Set up the receive filter. */
2363 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2364
2365 /*
2366 * Set the current media. Do this after initializing the prototype
2367 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2368 * control.
2369 */
2370 mii_mediachg(&sc->sc_mii);
2371
2372 /*
2373 * Enable interrupts.
2374 */
2375 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2376
2377 /*
2378 * Start the transmit and receive processes.
2379 */
2380 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2381
2382 /*
2383 * Start the one second MII clock.
2384 */
2385 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2386
2387 /*
2388 * ...all done!
2389 */
2390 ifp->if_flags |= IFF_RUNNING;
2391 ifp->if_flags &= ~IFF_OACTIVE;
2392
2393 out:
2394 if (error)
2395 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2396 return (error);
2397 }
2398
2399 /*
2400 * sip_drain:
2401 *
2402 * Drain the receive queue.
2403 */
2404 void
2405 SIP_DECL(rxdrain)(struct sip_softc *sc)
2406 {
2407 struct sip_rxsoft *rxs;
2408 int i;
2409
2410 for (i = 0; i < SIP_NRXDESC; i++) {
2411 rxs = &sc->sc_rxsoft[i];
2412 if (rxs->rxs_mbuf != NULL) {
2413 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2414 m_freem(rxs->rxs_mbuf);
2415 rxs->rxs_mbuf = NULL;
2416 }
2417 }
2418 }
2419
2420 /*
2421 * sip_stop: [ ifnet interface function ]
2422 *
2423 * Stop transmission on the interface.
2424 */
2425 void
2426 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2427 {
2428 struct sip_softc *sc = ifp->if_softc;
2429 bus_space_tag_t st = sc->sc_st;
2430 bus_space_handle_t sh = sc->sc_sh;
2431 struct sip_txsoft *txs;
2432 u_int32_t cmdsts = 0; /* DEBUG */
2433
2434 /*
2435 * Stop the one second clock.
2436 */
2437 callout_stop(&sc->sc_tick_ch);
2438
2439 /* Down the MII. */
2440 mii_down(&sc->sc_mii);
2441
2442 /*
2443 * Disable interrupts.
2444 */
2445 bus_space_write_4(st, sh, SIP_IER, 0);
2446
2447 /*
2448 * Stop receiver and transmitter.
2449 */
2450 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2451
2452 /*
2453 * Release any queued transmit buffers.
2454 */
2455 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2456 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2457 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2458 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2459 CMDSTS_INTR) == 0)
2460 printf("%s: sip_stop: last descriptor does not "
2461 "have INTR bit set\n", sc->sc_dev.dv_xname);
2462 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
2463 #ifdef DIAGNOSTIC
2464 if (txs->txs_mbuf == NULL) {
2465 printf("%s: dirty txsoft with no mbuf chain\n",
2466 sc->sc_dev.dv_xname);
2467 panic("sip_stop");
2468 }
2469 #endif
2470 cmdsts |= /* DEBUG */
2471 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2472 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2473 m_freem(txs->txs_mbuf);
2474 txs->txs_mbuf = NULL;
2475 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2476 }
2477
2478 if (disable)
2479 SIP_DECL(rxdrain)(sc);
2480
2481 /*
2482 * Mark the interface down and cancel the watchdog timer.
2483 */
2484 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2485 ifp->if_timer = 0;
2486
2487 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2488 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2489 printf("%s: sip_stop: no INTR bits set in dirty tx "
2490 "descriptors\n", sc->sc_dev.dv_xname);
2491 }
2492
2493 /*
2494 * sip_read_eeprom:
2495 *
2496 * Read data from the serial EEPROM.
2497 */
2498 void
2499 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2500 u_int16_t *data)
2501 {
2502 bus_space_tag_t st = sc->sc_st;
2503 bus_space_handle_t sh = sc->sc_sh;
2504 u_int16_t reg;
2505 int i, x;
2506
2507 for (i = 0; i < wordcnt; i++) {
2508 /* Send CHIP SELECT. */
2509 reg = EROMAR_EECS;
2510 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2511
2512 /* Shift in the READ opcode. */
2513 for (x = 3; x > 0; x--) {
2514 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2515 reg |= EROMAR_EEDI;
2516 else
2517 reg &= ~EROMAR_EEDI;
2518 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2519 bus_space_write_4(st, sh, SIP_EROMAR,
2520 reg | EROMAR_EESK);
2521 delay(4);
2522 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2523 delay(4);
2524 }
2525
2526 /* Shift in address. */
2527 for (x = 6; x > 0; x--) {
2528 if ((word + i) & (1 << (x - 1)))
2529 reg |= EROMAR_EEDI;
2530 else
2531 reg &= ~EROMAR_EEDI;
2532 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2533 bus_space_write_4(st, sh, SIP_EROMAR,
2534 reg | EROMAR_EESK);
2535 delay(4);
2536 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2537 delay(4);
2538 }
2539
2540 /* Shift out data. */
2541 reg = EROMAR_EECS;
2542 data[i] = 0;
2543 for (x = 16; x > 0; x--) {
2544 bus_space_write_4(st, sh, SIP_EROMAR,
2545 reg | EROMAR_EESK);
2546 delay(4);
2547 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2548 data[i] |= (1 << (x - 1));
2549 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2550 delay(4);
2551 }
2552
2553 /* Clear CHIP SELECT. */
2554 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2555 delay(4);
2556 }
2557 }
2558
2559 /*
2560 * sip_add_rxbuf:
2561 *
2562 * Add a receive buffer to the indicated descriptor.
2563 */
2564 int
2565 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2566 {
2567 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2568 struct mbuf *m;
2569 int error;
2570
2571 MGETHDR(m, M_DONTWAIT, MT_DATA);
2572 if (m == NULL)
2573 return (ENOBUFS);
2574
2575 MCLGET(m, M_DONTWAIT);
2576 if ((m->m_flags & M_EXT) == 0) {
2577 m_freem(m);
2578 return (ENOBUFS);
2579 }
2580
2581 #if defined(DP83820)
2582 m->m_len = SIP_RXBUF_LEN;
2583 #endif /* DP83820 */
2584
2585 if (rxs->rxs_mbuf != NULL)
2586 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2587
2588 rxs->rxs_mbuf = m;
2589
2590 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2591 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2592 BUS_DMA_READ|BUS_DMA_NOWAIT);
2593 if (error) {
2594 printf("%s: can't load rx DMA map %d, error = %d\n",
2595 sc->sc_dev.dv_xname, idx, error);
2596 panic("sip_add_rxbuf"); /* XXX */
2597 }
2598
2599 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2600 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2601
2602 SIP_INIT_RXDESC(sc, idx);
2603
2604 return (0);
2605 }
2606
2607 #if !defined(DP83820)
2608 /*
2609 * sip_sis900_set_filter:
2610 *
2611 * Set up the receive filter.
2612 */
2613 void
2614 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2615 {
2616 bus_space_tag_t st = sc->sc_st;
2617 bus_space_handle_t sh = sc->sc_sh;
2618 struct ethercom *ec = &sc->sc_ethercom;
2619 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2620 struct ether_multi *enm;
2621 u_int8_t *cp;
2622 struct ether_multistep step;
2623 u_int32_t crc, mchash[16];
2624
2625 /*
2626 * Initialize the prototype RFCR.
2627 */
2628 sc->sc_rfcr = RFCR_RFEN;
2629 if (ifp->if_flags & IFF_BROADCAST)
2630 sc->sc_rfcr |= RFCR_AAB;
2631 if (ifp->if_flags & IFF_PROMISC) {
2632 sc->sc_rfcr |= RFCR_AAP;
2633 goto allmulti;
2634 }
2635
2636 /*
2637 * Set up the multicast address filter by passing all multicast
2638 * addresses through a CRC generator, and then using the high-order
2639 * 6 bits as an index into the 128 bit multicast hash table (only
2640 * the lower 16 bits of each 32 bit multicast hash register are
2641 * valid). The high order bits select the register, while the
2642 * rest of the bits select the bit within the register.
2643 */
2644
2645 memset(mchash, 0, sizeof(mchash));
2646
2647 ETHER_FIRST_MULTI(step, ec, enm);
2648 while (enm != NULL) {
2649 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2650 /*
2651 * We must listen to a range of multicast addresses.
2652 * For now, just accept all multicasts, rather than
2653 * trying to set only those filter bits needed to match
2654 * the range. (At this time, the only use of address
2655 * ranges is for IP multicast routing, for which the
2656 * range is big enough to require all bits set.)
2657 */
2658 goto allmulti;
2659 }
2660
2661 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2662
2663 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2664 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2665 /* Just want the 8 most significant bits. */
2666 crc >>= 24;
2667 } else {
2668 /* Just want the 7 most significant bits. */
2669 crc >>= 25;
2670 }
2671
2672 /* Set the corresponding bit in the hash table. */
2673 mchash[crc >> 4] |= 1 << (crc & 0xf);
2674
2675 ETHER_NEXT_MULTI(step, enm);
2676 }
2677
2678 ifp->if_flags &= ~IFF_ALLMULTI;
2679 goto setit;
2680
2681 allmulti:
2682 ifp->if_flags |= IFF_ALLMULTI;
2683 sc->sc_rfcr |= RFCR_AAM;
2684
2685 setit:
2686 #define FILTER_EMIT(addr, data) \
2687 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2688 delay(1); \
2689 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2690 delay(1)
2691
2692 /*
2693 * Disable receive filter, and program the node address.
2694 */
2695 cp = LLADDR(ifp->if_sadl);
2696 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2697 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2698 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2699
2700 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2701 /*
2702 * Program the multicast hash table.
2703 */
2704 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2705 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2706 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2707 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2708 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2709 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2710 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2711 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2712 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2713 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2714 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]);
2715 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]);
2716 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]);
2717 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]);
2718 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]);
2719 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]);
2720 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]);
2721 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]);
2722 }
2723 }
2724 #undef FILTER_EMIT
2725
2726 /*
2727 * Re-enable the receiver filter.
2728 */
2729 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2730 }
2731 #endif /* ! DP83820 */
2732
2733 /*
2734 * sip_dp83815_set_filter:
2735 *
2736 * Set up the receive filter.
2737 */
2738 void
2739 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2740 {
2741 bus_space_tag_t st = sc->sc_st;
2742 bus_space_handle_t sh = sc->sc_sh;
2743 struct ethercom *ec = &sc->sc_ethercom;
2744 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2745 struct ether_multi *enm;
2746 u_int8_t *cp;
2747 struct ether_multistep step;
2748 u_int32_t crc, hash, slot, bit;
2749 #ifdef DP83820
2750 #define MCHASH_NWORDS 128
2751 #else
2752 #define MCHASH_NWORDS 32
2753 #endif /* DP83820 */
2754 u_int16_t mchash[MCHASH_NWORDS];
2755 int i;
2756
2757 /*
2758 * Initialize the prototype RFCR.
2759 * Enable the receive filter, and accept on
2760 * Perfect (destination address) Match
2761 * If IFF_BROADCAST, also accept all broadcast packets.
2762 * If IFF_PROMISC, accept all unicast packets (and later, set
2763 * IFF_ALLMULTI and accept all multicast, too).
2764 */
2765 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2766 if (ifp->if_flags & IFF_BROADCAST)
2767 sc->sc_rfcr |= RFCR_AAB;
2768 if (ifp->if_flags & IFF_PROMISC) {
2769 sc->sc_rfcr |= RFCR_AAP;
2770 goto allmulti;
2771 }
2772
2773 #ifdef DP83820
2774 /*
2775 * Set up the DP83820 multicast address filter by passing all multicast
2776 * addresses through a CRC generator, and then using the high-order
2777 * 11 bits as an index into the 2048 bit multicast hash table. The
2778 * high-order 7 bits select the slot, while the low-order 4 bits
2779 * select the bit within the slot. Note that only the low 16-bits
2780 * of each filter word are used, and there are 128 filter words.
2781 */
2782 #else
2783 /*
2784 * Set up the DP83815 multicast address filter by passing all multicast
2785 * addresses through a CRC generator, and then using the high-order
2786 * 9 bits as an index into the 512 bit multicast hash table. The
2787 * high-order 5 bits select the slot, while the low-order 4 bits
2788 * select the bit within the slot. Note that only the low 16-bits
2789 * of each filter word are used, and there are 32 filter words.
2790 */
2791 #endif /* DP83820 */
2792
2793 memset(mchash, 0, sizeof(mchash));
2794
2795 ifp->if_flags &= ~IFF_ALLMULTI;
2796 ETHER_FIRST_MULTI(step, ec, enm);
2797 if (enm == NULL)
2798 goto setit;
2799 while (enm != NULL) {
2800 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2801 /*
2802 * We must listen to a range of multicast addresses.
2803 * For now, just accept all multicasts, rather than
2804 * trying to set only those filter bits needed to match
2805 * the range. (At this time, the only use of address
2806 * ranges is for IP multicast routing, for which the
2807 * range is big enough to require all bits set.)
2808 */
2809 goto allmulti;
2810 }
2811
2812 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2813
2814 #ifdef DP83820
2815 /* Just want the 11 most significant bits. */
2816 hash = crc >> 21;
2817 #else
2818 /* Just want the 9 most significant bits. */
2819 hash = crc >> 23;
2820 #endif /* DP83820 */
2821
2822 slot = hash >> 4;
2823 bit = hash & 0xf;
2824
2825 /* Set the corresponding bit in the hash table. */
2826 mchash[slot] |= 1 << bit;
2827
2828 ETHER_NEXT_MULTI(step, enm);
2829 }
2830 sc->sc_rfcr |= RFCR_MHEN;
2831 goto setit;
2832
2833 allmulti:
2834 ifp->if_flags |= IFF_ALLMULTI;
2835 sc->sc_rfcr |= RFCR_AAM;
2836
2837 setit:
2838 #define FILTER_EMIT(addr, data) \
2839 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2840 delay(1); \
2841 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2842 delay(1)
2843
2844 /*
2845 * Disable receive filter, and program the node address.
2846 */
2847 cp = LLADDR(ifp->if_sadl);
2848 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
2849 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
2850 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
2851
2852 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2853 /*
2854 * Program the multicast hash table.
2855 */
2856 for (i = 0; i < MCHASH_NWORDS; i++) {
2857 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
2858 mchash[i]);
2859 }
2860 }
2861 #undef FILTER_EMIT
2862 #undef MCHASH_NWORDS
2863
2864 /*
2865 * Re-enable the receiver filter.
2866 */
2867 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2868 }
2869
2870 #if defined(DP83820)
2871 /*
2872 * sip_dp83820_mii_readreg: [mii interface function]
2873 *
2874 * Read a PHY register on the MII of the DP83820.
2875 */
2876 int
2877 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
2878 {
2879 struct sip_softc *sc = (void *) self;
2880
2881 if (sc->sc_cfg & CFG_TBI_EN) {
2882 bus_addr_t tbireg;
2883 int rv;
2884
2885 if (phy != 0)
2886 return (0);
2887
2888 switch (reg) {
2889 case MII_BMCR: tbireg = SIP_TBICR; break;
2890 case MII_BMSR: tbireg = SIP_TBISR; break;
2891 case MII_ANAR: tbireg = SIP_TANAR; break;
2892 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
2893 case MII_ANER: tbireg = SIP_TANER; break;
2894 case MII_EXTSR:
2895 /*
2896 * Don't even bother reading the TESR register.
2897 * The manual documents that the device has
2898 * 1000baseX full/half capability, but the
2899 * register itself seems read back 0 on some
2900 * boards. Just hard-code the result.
2901 */
2902 return (EXTSR_1000XFDX|EXTSR_1000XHDX);
2903
2904 default:
2905 return (0);
2906 }
2907
2908 rv = bus_space_read_4(sc->sc_st, sc->sc_sh, tbireg) & 0xffff;
2909 if (tbireg == SIP_TBISR) {
2910 /* LINK and ACOMP are switched! */
2911 int val = rv;
2912
2913 rv = 0;
2914 if (val & TBISR_MR_LINK_STATUS)
2915 rv |= BMSR_LINK;
2916 if (val & TBISR_MR_AN_COMPLETE)
2917 rv |= BMSR_ACOMP;
2918
2919 /*
2920 * The manual claims this register reads back 0
2921 * on hard and soft reset. But we want to let
2922 * the gentbi driver know that we support auto-
2923 * negotiation, so hard-code this bit in the
2924 * result.
2925 */
2926 rv |= BMSR_ANEG | BMSR_EXTSTAT;
2927 }
2928
2929 return (rv);
2930 }
2931
2932 return (mii_bitbang_readreg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2933 phy, reg));
2934 }
2935
2936 /*
2937 * sip_dp83820_mii_writereg: [mii interface function]
2938 *
2939 * Write a PHY register on the MII of the DP83820.
2940 */
2941 void
2942 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
2943 {
2944 struct sip_softc *sc = (void *) self;
2945
2946 if (sc->sc_cfg & CFG_TBI_EN) {
2947 bus_addr_t tbireg;
2948
2949 if (phy != 0)
2950 return;
2951
2952 switch (reg) {
2953 case MII_BMCR: tbireg = SIP_TBICR; break;
2954 case MII_ANAR: tbireg = SIP_TANAR; break;
2955 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
2956 default:
2957 return;
2958 }
2959
2960 bus_space_write_4(sc->sc_st, sc->sc_sh, tbireg, val);
2961 return;
2962 }
2963
2964 mii_bitbang_writereg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2965 phy, reg, val);
2966 }
2967
2968 /*
2969 * sip_dp83815_mii_statchg: [mii interface function]
2970 *
2971 * Callback from MII layer when media changes.
2972 */
2973 void
2974 SIP_DECL(dp83820_mii_statchg)(struct device *self)
2975 {
2976 struct sip_softc *sc = (struct sip_softc *) self;
2977 u_int32_t cfg;
2978
2979 /*
2980 * Update TXCFG for full-duplex operation.
2981 */
2982 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2983 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2984 else
2985 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2986
2987 /*
2988 * Update RXCFG for full-duplex or loopback.
2989 */
2990 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2991 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2992 sc->sc_rxcfg |= RXCFG_ATX;
2993 else
2994 sc->sc_rxcfg &= ~RXCFG_ATX;
2995
2996 /*
2997 * Update CFG for MII/GMII.
2998 */
2999 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
3000 cfg = sc->sc_cfg | CFG_MODE_1000;
3001 else
3002 cfg = sc->sc_cfg;
3003
3004 /*
3005 * XXX 802.3x flow control.
3006 */
3007
3008 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
3009 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3010 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3011 }
3012
3013 /*
3014 * sip_dp83820_mii_bitbang_read: [mii bit-bang interface function]
3015 *
3016 * Read the MII serial port for the MII bit-bang module.
3017 */
3018 u_int32_t
3019 SIP_DECL(dp83820_mii_bitbang_read)(struct device *self)
3020 {
3021 struct sip_softc *sc = (void *) self;
3022
3023 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
3024 }
3025
3026 /*
3027 * sip_dp83820_mii_bitbang_write: [mii big-bang interface function]
3028 *
3029 * Write the MII serial port for the MII bit-bang module.
3030 */
3031 void
3032 SIP_DECL(dp83820_mii_bitbang_write)(struct device *self, u_int32_t val)
3033 {
3034 struct sip_softc *sc = (void *) self;
3035
3036 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
3037 }
3038 #else /* ! DP83820 */
3039 /*
3040 * sip_sis900_mii_readreg: [mii interface function]
3041 *
3042 * Read a PHY register on the MII.
3043 */
3044 int
3045 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
3046 {
3047 struct sip_softc *sc = (struct sip_softc *) self;
3048 u_int32_t enphy;
3049
3050 /*
3051 * The SiS 900 has only an internal PHY on the MII. Only allow
3052 * MII address 0.
3053 */
3054 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
3055 sc->sc_rev < SIS_REV_635 && phy != 0)
3056 return (0);
3057
3058 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3059 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
3060 ENPHY_RWCMD | ENPHY_ACCESS);
3061 do {
3062 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3063 } while (enphy & ENPHY_ACCESS);
3064 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
3065 }
3066
3067 /*
3068 * sip_sis900_mii_writereg: [mii interface function]
3069 *
3070 * Write a PHY register on the MII.
3071 */
3072 void
3073 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
3074 {
3075 struct sip_softc *sc = (struct sip_softc *) self;
3076 u_int32_t enphy;
3077
3078 /*
3079 * The SiS 900 has only an internal PHY on the MII. Only allow
3080 * MII address 0.
3081 */
3082 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
3083 sc->sc_rev < SIS_REV_635 && phy != 0)
3084 return;
3085
3086 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3087 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
3088 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
3089 do {
3090 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3091 } while (enphy & ENPHY_ACCESS);
3092 }
3093
3094 /*
3095 * sip_sis900_mii_statchg: [mii interface function]
3096 *
3097 * Callback from MII layer when media changes.
3098 */
3099 void
3100 SIP_DECL(sis900_mii_statchg)(struct device *self)
3101 {
3102 struct sip_softc *sc = (struct sip_softc *) self;
3103 u_int32_t flowctl;
3104
3105 /*
3106 * Update TXCFG for full-duplex operation.
3107 */
3108 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3109 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3110 else
3111 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3112
3113 /*
3114 * Update RXCFG for full-duplex or loopback.
3115 */
3116 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3117 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3118 sc->sc_rxcfg |= RXCFG_ATX;
3119 else
3120 sc->sc_rxcfg &= ~RXCFG_ATX;
3121
3122 /*
3123 * Update IMR for use of 802.3x flow control.
3124 */
3125 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
3126 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
3127 flowctl = FLOWCTL_FLOWEN;
3128 } else {
3129 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
3130 flowctl = 0;
3131 }
3132
3133 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3134 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3135 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
3136 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
3137 }
3138
3139 /*
3140 * sip_dp83815_mii_readreg: [mii interface function]
3141 *
3142 * Read a PHY register on the MII.
3143 */
3144 int
3145 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
3146 {
3147 struct sip_softc *sc = (struct sip_softc *) self;
3148 u_int32_t val;
3149
3150 /*
3151 * The DP83815 only has an internal PHY. Only allow
3152 * MII address 0.
3153 */
3154 if (phy != 0)
3155 return (0);
3156
3157 /*
3158 * Apparently, after a reset, the DP83815 can take a while
3159 * to respond. During this recovery period, the BMSR returns
3160 * a value of 0. Catch this -- it's not supposed to happen
3161 * (the BMSR has some hardcoded-to-1 bits), and wait for the
3162 * PHY to come back to life.
3163 *
3164 * This works out because the BMSR is the first register
3165 * read during the PHY probe process.
3166 */
3167 do {
3168 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
3169 } while (reg == MII_BMSR && val == 0);
3170
3171 return (val & 0xffff);
3172 }
3173
3174 /*
3175 * sip_dp83815_mii_writereg: [mii interface function]
3176 *
3177 * Write a PHY register to the MII.
3178 */
3179 void
3180 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
3181 {
3182 struct sip_softc *sc = (struct sip_softc *) self;
3183
3184 /*
3185 * The DP83815 only has an internal PHY. Only allow
3186 * MII address 0.
3187 */
3188 if (phy != 0)
3189 return;
3190
3191 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
3192 }
3193
3194 /*
3195 * sip_dp83815_mii_statchg: [mii interface function]
3196 *
3197 * Callback from MII layer when media changes.
3198 */
3199 void
3200 SIP_DECL(dp83815_mii_statchg)(struct device *self)
3201 {
3202 struct sip_softc *sc = (struct sip_softc *) self;
3203
3204 /*
3205 * Update TXCFG for full-duplex operation.
3206 */
3207 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3208 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3209 else
3210 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3211
3212 /*
3213 * Update RXCFG for full-duplex or loopback.
3214 */
3215 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3216 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3217 sc->sc_rxcfg |= RXCFG_ATX;
3218 else
3219 sc->sc_rxcfg &= ~RXCFG_ATX;
3220
3221 /*
3222 * XXX 802.3x flow control.
3223 */
3224
3225 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3226 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3227 }
3228 #endif /* DP83820 */
3229
3230 #if defined(DP83820)
3231 void
3232 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc,
3233 const struct pci_attach_args *pa, u_int8_t *enaddr)
3234 {
3235 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
3236 u_int8_t cksum, *e, match;
3237 int i;
3238
3239 /*
3240 * EEPROM data format for the DP83820 can be found in
3241 * the DP83820 manual, section 4.2.4.
3242 */
3243
3244 SIP_DECL(read_eeprom)(sc, 0,
3245 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
3246
3247 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
3248 match = ~(match - 1);
3249
3250 cksum = 0x55;
3251 e = (u_int8_t *) eeprom_data;
3252 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
3253 cksum += *e++;
3254
3255 if (cksum != match)
3256 printf("%s: Checksum (%x) mismatch (%x)",
3257 sc->sc_dev.dv_xname, cksum, match);
3258
3259 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
3260 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
3261 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
3262 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
3263 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
3264 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
3265 }
3266 #else /* ! DP83820 */
3267 void
3268 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc,
3269 const struct pci_attach_args *pa, u_int8_t *enaddr)
3270 {
3271 u_int16_t myea[ETHER_ADDR_LEN / 2];
3272
3273 switch (sc->sc_rev) {
3274 case SIS_REV_630S:
3275 case SIS_REV_630E:
3276 case SIS_REV_630EA1:
3277 case SIS_REV_630ET:
3278 case SIS_REV_635:
3279 /*
3280 * The MAC address for the on-board Ethernet of
3281 * the SiS 630 chipset is in the NVRAM. Kick
3282 * the chip into re-loading it from NVRAM, and
3283 * read the MAC address out of the filter registers.
3284 */
3285 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3286
3287 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3288 RFCR_RFADDR_NODE0);
3289 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3290 0xffff;
3291
3292 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3293 RFCR_RFADDR_NODE2);
3294 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3295 0xffff;
3296
3297 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3298 RFCR_RFADDR_NODE4);
3299 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3300 0xffff;
3301 break;
3302
3303 default:
3304 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3305 sizeof(myea) / sizeof(myea[0]), myea);
3306 }
3307
3308 enaddr[0] = myea[0] & 0xff;
3309 enaddr[1] = myea[0] >> 8;
3310 enaddr[2] = myea[1] & 0xff;
3311 enaddr[3] = myea[1] >> 8;
3312 enaddr[4] = myea[2] & 0xff;
3313 enaddr[5] = myea[2] >> 8;
3314 }
3315
3316 /* Table and macro to bit-reverse an octet. */
3317 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3318 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3319
3320 void
3321 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc,
3322 const struct pci_attach_args *pa, u_int8_t *enaddr)
3323 {
3324 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3325 u_int8_t cksum, *e, match;
3326 int i;
3327
3328 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3329 sizeof(eeprom_data[0]), eeprom_data);
3330
3331 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3332 match = ~(match - 1);
3333
3334 cksum = 0x55;
3335 e = (u_int8_t *) eeprom_data;
3336 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3337 cksum += *e++;
3338 }
3339 if (cksum != match) {
3340 printf("%s: Checksum (%x) mismatch (%x)",
3341 sc->sc_dev.dv_xname, cksum, match);
3342 }
3343
3344 /*
3345 * Unrolled because it makes slightly more sense this way.
3346 * The DP83815 stores the MAC address in bit 0 of word 6
3347 * through bit 15 of word 8.
3348 */
3349 ea = &eeprom_data[6];
3350 enaddr[0] = ((*ea & 0x1) << 7);
3351 ea++;
3352 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3353 enaddr[1] = ((*ea & 0x1FE) >> 1);
3354 enaddr[2] = ((*ea & 0x1) << 7);
3355 ea++;
3356 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3357 enaddr[3] = ((*ea & 0x1FE) >> 1);
3358 enaddr[4] = ((*ea & 0x1) << 7);
3359 ea++;
3360 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3361 enaddr[5] = ((*ea & 0x1FE) >> 1);
3362
3363 /*
3364 * In case that's not weird enough, we also need to reverse
3365 * the bits in each byte. This all actually makes more sense
3366 * if you think about the EEPROM storage as an array of bits
3367 * being shifted into bytes, but that's not how we're looking
3368 * at it here...
3369 */
3370 for (i = 0; i < 6 ;i++)
3371 enaddr[i] = bbr(enaddr[i]);
3372 }
3373 #endif /* DP83820 */
3374
3375 /*
3376 * sip_mediastatus: [ifmedia interface function]
3377 *
3378 * Get the current interface media status.
3379 */
3380 void
3381 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3382 {
3383 struct sip_softc *sc = ifp->if_softc;
3384
3385 mii_pollstat(&sc->sc_mii);
3386 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3387 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3388 }
3389
3390 /*
3391 * sip_mediachange: [ifmedia interface function]
3392 *
3393 * Set hardware to newly-selected media.
3394 */
3395 int
3396 SIP_DECL(mediachange)(struct ifnet *ifp)
3397 {
3398 struct sip_softc *sc = ifp->if_softc;
3399
3400 if (ifp->if_flags & IFF_UP)
3401 mii_mediachg(&sc->sc_mii);
3402 return (0);
3403 }
3404