if_sip.c revision 1.76 1 /* $NetBSD: if_sip.c,v 1.76 2003/01/17 08:11:50 itojun Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Reduce the Rx interrupt load.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.76 2003/01/17 08:11:50 itojun Exp $");
84
85 #include "bpfilter.h"
86 #include "rnd.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/socket.h>
95 #include <sys/ioctl.h>
96 #include <sys/errno.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <machine/bus.h>
116 #include <machine/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #ifdef DP83820
122 #include <dev/mii/mii_bitbang.h>
123 #endif /* DP83820 */
124
125 #include <dev/pci/pcireg.h>
126 #include <dev/pci/pcivar.h>
127 #include <dev/pci/pcidevs.h>
128
129 #include <dev/pci/if_sipreg.h>
130
131 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
132 #define SIP_DECL(x) __CONCAT(gsip_,x)
133 #else /* SiS900 and DP83815 */
134 #define SIP_DECL(x) __CONCAT(sip_,x)
135 #endif
136
137 #define SIP_STR(x) __STRING(SIP_DECL(x))
138
139 /*
140 * Transmit descriptor list size. This is arbitrary, but allocate
141 * enough descriptors for 128 pending transmissions, and 8 segments
142 * per packet. This MUST work out to a power of 2.
143 */
144 #define SIP_NTXSEGS 16
145 #define SIP_NTXSEGS_ALLOC 8
146
147 #define SIP_TXQUEUELEN 256
148 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS_ALLOC)
149 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
150 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
151
152 #if defined(DP83020)
153 #define TX_DMAMAP_SIZE ETHER_MAX_LEN_JUMBO
154 #else
155 #define TX_DMAMAP_SIZE MCLBYTES
156 #endif
157
158 /*
159 * Receive descriptor list size. We have one Rx buffer per incoming
160 * packet, so this logic is a little simpler.
161 *
162 * Actually, on the DP83820, we allow the packet to consume more than
163 * one buffer, in order to support jumbo Ethernet frames. In that
164 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
165 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
166 * so we'd better be quick about handling receive interrupts.
167 */
168 #if defined(DP83820)
169 #define SIP_NRXDESC 256
170 #else
171 #define SIP_NRXDESC 128
172 #endif /* DP83820 */
173 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
174 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
175
176 /*
177 * Control structures are DMA'd to the SiS900 chip. We allocate them in
178 * a single clump that maps to a single DMA segment to make several things
179 * easier.
180 */
181 struct sip_control_data {
182 /*
183 * The transmit descriptors.
184 */
185 struct sip_desc scd_txdescs[SIP_NTXDESC];
186
187 /*
188 * The receive descriptors.
189 */
190 struct sip_desc scd_rxdescs[SIP_NRXDESC];
191 };
192
193 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
194 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
195 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
196
197 /*
198 * Software state for transmit jobs.
199 */
200 struct sip_txsoft {
201 struct mbuf *txs_mbuf; /* head of our mbuf chain */
202 bus_dmamap_t txs_dmamap; /* our DMA map */
203 int txs_firstdesc; /* first descriptor in packet */
204 int txs_lastdesc; /* last descriptor in packet */
205 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
206 };
207
208 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
209
210 /*
211 * Software state for receive jobs.
212 */
213 struct sip_rxsoft {
214 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
215 bus_dmamap_t rxs_dmamap; /* our DMA map */
216 };
217
218 /*
219 * Software state per device.
220 */
221 struct sip_softc {
222 struct device sc_dev; /* generic device information */
223 bus_space_tag_t sc_st; /* bus space tag */
224 bus_space_handle_t sc_sh; /* bus space handle */
225 bus_dma_tag_t sc_dmat; /* bus DMA tag */
226 struct ethercom sc_ethercom; /* ethernet common data */
227 void *sc_sdhook; /* shutdown hook */
228
229 const struct sip_product *sc_model; /* which model are we? */
230 int sc_rev; /* chip revision */
231
232 void *sc_ih; /* interrupt cookie */
233
234 struct mii_data sc_mii; /* MII/media information */
235
236 struct callout sc_tick_ch; /* tick callout */
237
238 bus_dmamap_t sc_cddmamap; /* control data DMA map */
239 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
240
241 /*
242 * Software state for transmit and receive descriptors.
243 */
244 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
245 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
246
247 /*
248 * Control data structures.
249 */
250 struct sip_control_data *sc_control_data;
251 #define sc_txdescs sc_control_data->scd_txdescs
252 #define sc_rxdescs sc_control_data->scd_rxdescs
253
254 #ifdef SIP_EVENT_COUNTERS
255 /*
256 * Event counters.
257 */
258 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
259 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
260 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
261 struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */
262 struct evcnt sc_ev_txiintr; /* Tx idle interrupts */
263 struct evcnt sc_ev_rxintr; /* Rx interrupts */
264 struct evcnt sc_ev_hiberr; /* HIBERR interrupts */
265 #ifdef DP83820
266 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
267 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
268 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
269 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
270 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
271 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
272 #endif /* DP83820 */
273 #endif /* SIP_EVENT_COUNTERS */
274
275 u_int32_t sc_txcfg; /* prototype TXCFG register */
276 u_int32_t sc_rxcfg; /* prototype RXCFG register */
277 u_int32_t sc_imr; /* prototype IMR register */
278 u_int32_t sc_rfcr; /* prototype RFCR register */
279
280 u_int32_t sc_cfg; /* prototype CFG register */
281
282 #ifdef DP83820
283 u_int32_t sc_gpior; /* prototype GPIOR register */
284 #endif /* DP83820 */
285
286 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
287 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
288
289 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
290
291 int sc_flags; /* misc. flags; see below */
292
293 int sc_txfree; /* number of free Tx descriptors */
294 int sc_txnext; /* next ready Tx descriptor */
295 int sc_txwin; /* Tx descriptors since last intr */
296
297 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
298 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
299
300 int sc_rxptr; /* next ready Rx descriptor/descsoft */
301 #if defined(DP83820)
302 int sc_rxdiscard;
303 int sc_rxlen;
304 struct mbuf *sc_rxhead;
305 struct mbuf *sc_rxtail;
306 struct mbuf **sc_rxtailp;
307 #endif /* DP83820 */
308
309 #if NRND > 0
310 rndsource_element_t rnd_source; /* random source */
311 #endif
312 };
313
314 /* sc_flags */
315 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
316
317 #ifdef DP83820
318 #define SIP_RXCHAIN_RESET(sc) \
319 do { \
320 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
321 *(sc)->sc_rxtailp = NULL; \
322 (sc)->sc_rxlen = 0; \
323 } while (/*CONSTCOND*/0)
324
325 #define SIP_RXCHAIN_LINK(sc, m) \
326 do { \
327 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
328 (sc)->sc_rxtailp = &(m)->m_next; \
329 } while (/*CONSTCOND*/0)
330 #endif /* DP83820 */
331
332 #ifdef SIP_EVENT_COUNTERS
333 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
334 #else
335 #define SIP_EVCNT_INCR(ev) /* nothing */
336 #endif
337
338 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
339 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
340
341 #define SIP_CDTXSYNC(sc, x, n, ops) \
342 do { \
343 int __x, __n; \
344 \
345 __x = (x); \
346 __n = (n); \
347 \
348 /* If it will wrap around, sync to the end of the ring. */ \
349 if ((__x + __n) > SIP_NTXDESC) { \
350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
351 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
352 (SIP_NTXDESC - __x), (ops)); \
353 __n -= (SIP_NTXDESC - __x); \
354 __x = 0; \
355 } \
356 \
357 /* Now sync whatever is left. */ \
358 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
359 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
360 } while (0)
361
362 #define SIP_CDRXSYNC(sc, x, ops) \
363 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
364 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
365
366 #ifdef DP83820
367 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
368 #define SIP_RXBUF_LEN (MCLBYTES - 4)
369 #else
370 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
371 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
372 #endif
373 #define SIP_INIT_RXDESC(sc, x) \
374 do { \
375 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
376 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
377 \
378 __sipd->sipd_link = \
379 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
380 __sipd->sipd_bufptr = \
381 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
382 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
383 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
384 SIP_INIT_RXDESC_EXTSTS \
385 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
386 } while (0)
387
388 #define SIP_CHIP_VERS(sc, v, p, r) \
389 ((sc)->sc_model->sip_vendor == (v) && \
390 (sc)->sc_model->sip_product == (p) && \
391 (sc)->sc_rev == (r))
392
393 #define SIP_CHIP_MODEL(sc, v, p) \
394 ((sc)->sc_model->sip_vendor == (v) && \
395 (sc)->sc_model->sip_product == (p))
396
397 #if !defined(DP83820)
398 #define SIP_SIS900_REV(sc, rev) \
399 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev))
400 #endif
401
402 #define SIP_TIMEOUT 1000
403
404 void SIP_DECL(start)(struct ifnet *);
405 void SIP_DECL(watchdog)(struct ifnet *);
406 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
407 int SIP_DECL(init)(struct ifnet *);
408 void SIP_DECL(stop)(struct ifnet *, int);
409
410 void SIP_DECL(shutdown)(void *);
411
412 void SIP_DECL(reset)(struct sip_softc *);
413 void SIP_DECL(rxdrain)(struct sip_softc *);
414 int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
415 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *);
416 void SIP_DECL(tick)(void *);
417
418 #if !defined(DP83820)
419 void SIP_DECL(sis900_set_filter)(struct sip_softc *);
420 #endif /* ! DP83820 */
421 void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
422
423 #if defined(DP83820)
424 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *,
425 const struct pci_attach_args *, u_int8_t *);
426 #else
427 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *,
428 const struct pci_attach_args *, u_int8_t *);
429 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *,
430 const struct pci_attach_args *, u_int8_t *);
431 #endif /* DP83820 */
432
433 int SIP_DECL(intr)(void *);
434 void SIP_DECL(txintr)(struct sip_softc *);
435 void SIP_DECL(rxintr)(struct sip_softc *);
436
437 #if defined(DP83820)
438 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
439 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
440 void SIP_DECL(dp83820_mii_statchg)(struct device *);
441 #else
442 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
443 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
444 void SIP_DECL(sis900_mii_statchg)(struct device *);
445
446 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
447 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
448 void SIP_DECL(dp83815_mii_statchg)(struct device *);
449 #endif /* DP83820 */
450
451 int SIP_DECL(mediachange)(struct ifnet *);
452 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
453
454 int SIP_DECL(match)(struct device *, struct cfdata *, void *);
455 void SIP_DECL(attach)(struct device *, struct device *, void *);
456
457 int SIP_DECL(copy_small) = 0;
458
459 #ifdef DP83820
460 CFATTACH_DECL(gsip, sizeof(struct sip_softc),
461 gsip_match, gsip_attach, NULL, NULL);
462 #else
463 CFATTACH_DECL(sip, sizeof(struct sip_softc),
464 sip_match, sip_attach, NULL, NULL);
465 #endif
466
467 /*
468 * Descriptions of the variants of the SiS900.
469 */
470 struct sip_variant {
471 int (*sipv_mii_readreg)(struct device *, int, int);
472 void (*sipv_mii_writereg)(struct device *, int, int, int);
473 void (*sipv_mii_statchg)(struct device *);
474 void (*sipv_set_filter)(struct sip_softc *);
475 void (*sipv_read_macaddr)(struct sip_softc *,
476 const struct pci_attach_args *, u_int8_t *);
477 };
478
479 #if defined(DP83820)
480 u_int32_t SIP_DECL(dp83820_mii_bitbang_read)(struct device *);
481 void SIP_DECL(dp83820_mii_bitbang_write)(struct device *, u_int32_t);
482
483 const struct mii_bitbang_ops SIP_DECL(dp83820_mii_bitbang_ops) = {
484 SIP_DECL(dp83820_mii_bitbang_read),
485 SIP_DECL(dp83820_mii_bitbang_write),
486 {
487 EROMAR_MDIO, /* MII_BIT_MDO */
488 EROMAR_MDIO, /* MII_BIT_MDI */
489 EROMAR_MDC, /* MII_BIT_MDC */
490 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
491 0, /* MII_BIT_DIR_PHY_HOST */
492 }
493 };
494 #endif /* DP83820 */
495
496 #if defined(DP83820)
497 const struct sip_variant SIP_DECL(variant_dp83820) = {
498 SIP_DECL(dp83820_mii_readreg),
499 SIP_DECL(dp83820_mii_writereg),
500 SIP_DECL(dp83820_mii_statchg),
501 SIP_DECL(dp83815_set_filter),
502 SIP_DECL(dp83820_read_macaddr),
503 };
504 #else
505 const struct sip_variant SIP_DECL(variant_sis900) = {
506 SIP_DECL(sis900_mii_readreg),
507 SIP_DECL(sis900_mii_writereg),
508 SIP_DECL(sis900_mii_statchg),
509 SIP_DECL(sis900_set_filter),
510 SIP_DECL(sis900_read_macaddr),
511 };
512
513 const struct sip_variant SIP_DECL(variant_dp83815) = {
514 SIP_DECL(dp83815_mii_readreg),
515 SIP_DECL(dp83815_mii_writereg),
516 SIP_DECL(dp83815_mii_statchg),
517 SIP_DECL(dp83815_set_filter),
518 SIP_DECL(dp83815_read_macaddr),
519 };
520 #endif /* DP83820 */
521
522 /*
523 * Devices supported by this driver.
524 */
525 const struct sip_product {
526 pci_vendor_id_t sip_vendor;
527 pci_product_id_t sip_product;
528 const char *sip_name;
529 const struct sip_variant *sip_variant;
530 } SIP_DECL(products)[] = {
531 #if defined(DP83820)
532 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
533 "NatSemi DP83820 Gigabit Ethernet",
534 &SIP_DECL(variant_dp83820) },
535 #else
536 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
537 "SiS 900 10/100 Ethernet",
538 &SIP_DECL(variant_sis900) },
539 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
540 "SiS 7016 10/100 Ethernet",
541 &SIP_DECL(variant_sis900) },
542
543 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
544 "NatSemi DP83815 10/100 Ethernet",
545 &SIP_DECL(variant_dp83815) },
546 #endif /* DP83820 */
547
548 { 0, 0,
549 NULL,
550 NULL },
551 };
552
553 static const struct sip_product *
554 SIP_DECL(lookup)(const struct pci_attach_args *pa)
555 {
556 const struct sip_product *sip;
557
558 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
559 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
560 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
561 return (sip);
562 }
563 return (NULL);
564 }
565
566 #ifdef DP83820
567 /*
568 * I really hate stupid hardware vendors. There's a bit in the EEPROM
569 * which indicates if the card can do 64-bit data transfers. Unfortunately,
570 * several vendors of 32-bit cards fail to clear this bit in the EEPROM,
571 * which means we try to use 64-bit data transfers on those cards if we
572 * happen to be plugged into a 32-bit slot.
573 *
574 * What we do is use this table of cards known to be 64-bit cards. If
575 * you have a 64-bit card who's subsystem ID is not listed in this table,
576 * send the output of "pcictl dump ..." of the device to me so that your
577 * card will use the 64-bit data path when plugged into a 64-bit slot.
578 *
579 * -- Jason R. Thorpe <thorpej (at) netbsd.org>
580 * June 30, 2002
581 */
582 static int
583 SIP_DECL(check_64bit)(const struct pci_attach_args *pa)
584 {
585 static const struct {
586 pci_vendor_id_t c64_vendor;
587 pci_product_id_t c64_product;
588 } card64[] = {
589 /* Asante GigaNIX */
590 { 0x128a, 0x0002 },
591
592 /* Accton EN1407-T, Planex GN-1000TE */
593 { 0x1113, 0x1407 },
594
595 /* Netgear GA-621 */
596 { 0x1385, 0x621a },
597
598 { 0, 0}
599 };
600 pcireg_t subsys;
601 int i;
602
603 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
604
605 for (i = 0; card64[i].c64_vendor != 0; i++) {
606 if (PCI_VENDOR(subsys) == card64[i].c64_vendor &&
607 PCI_PRODUCT(subsys) == card64[i].c64_product)
608 return (1);
609 }
610
611 return (0);
612 }
613 #endif /* DP83820 */
614
615 int
616 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
617 {
618 struct pci_attach_args *pa = aux;
619
620 if (SIP_DECL(lookup)(pa) != NULL)
621 return (1);
622
623 return (0);
624 }
625
626 void
627 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
628 {
629 struct sip_softc *sc = (struct sip_softc *) self;
630 struct pci_attach_args *pa = aux;
631 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
632 pci_chipset_tag_t pc = pa->pa_pc;
633 pci_intr_handle_t ih;
634 const char *intrstr = NULL;
635 bus_space_tag_t iot, memt;
636 bus_space_handle_t ioh, memh;
637 bus_dma_segment_t seg;
638 int ioh_valid, memh_valid;
639 int i, rseg, error;
640 const struct sip_product *sip;
641 pcireg_t pmode;
642 u_int8_t enaddr[ETHER_ADDR_LEN];
643 int pmreg;
644 #ifdef DP83820
645 pcireg_t memtype;
646 u_int32_t reg;
647 #endif /* DP83820 */
648
649 callout_init(&sc->sc_tick_ch);
650
651 sip = SIP_DECL(lookup)(pa);
652 if (sip == NULL) {
653 printf("\n");
654 panic(SIP_STR(attach) ": impossible");
655 }
656 sc->sc_rev = PCI_REVISION(pa->pa_class);
657
658 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev);
659
660 sc->sc_model = sip;
661
662 /*
663 * XXX Work-around broken PXE firmware on some boards.
664 *
665 * The DP83815 shares an address decoder with the MEM BAR
666 * and the ROM BAR. Make sure the ROM BAR is disabled,
667 * so that memory mapped access works.
668 */
669 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM,
670 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) &
671 ~PCI_MAPREG_ROM_ENABLE);
672
673 /*
674 * Map the device.
675 */
676 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
677 PCI_MAPREG_TYPE_IO, 0,
678 &iot, &ioh, NULL, NULL) == 0);
679 #ifdef DP83820
680 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
681 switch (memtype) {
682 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
683 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
684 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
685 memtype, 0, &memt, &memh, NULL, NULL) == 0);
686 break;
687 default:
688 memh_valid = 0;
689 }
690 #else
691 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
692 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
693 &memt, &memh, NULL, NULL) == 0);
694 #endif /* DP83820 */
695
696 if (memh_valid) {
697 sc->sc_st = memt;
698 sc->sc_sh = memh;
699 } else if (ioh_valid) {
700 sc->sc_st = iot;
701 sc->sc_sh = ioh;
702 } else {
703 printf("%s: unable to map device registers\n",
704 sc->sc_dev.dv_xname);
705 return;
706 }
707
708 sc->sc_dmat = pa->pa_dmat;
709
710 /*
711 * Make sure bus mastering is enabled. Also make sure
712 * Write/Invalidate is enabled if we're allowed to use it.
713 */
714 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
715 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY)
716 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE;
717 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
718 pmreg | PCI_COMMAND_MASTER_ENABLE);
719
720 /* Get it out of power save mode if needed. */
721 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
722 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
723 PCI_PMCSR_STATE_MASK;
724 if (pmode == PCI_PMCSR_STATE_D3) {
725 /*
726 * The card has lost all configuration data in
727 * this state, so punt.
728 */
729 printf("%s: unable to wake up from power state D3\n",
730 sc->sc_dev.dv_xname);
731 return;
732 }
733 if (pmode != PCI_PMCSR_STATE_D0) {
734 printf("%s: waking up from power state D%d\n",
735 sc->sc_dev.dv_xname, pmode);
736 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
737 PCI_PMCSR_STATE_D0);
738 }
739 }
740
741 /*
742 * Map and establish our interrupt.
743 */
744 if (pci_intr_map(pa, &ih)) {
745 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
746 return;
747 }
748 intrstr = pci_intr_string(pc, ih);
749 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
750 if (sc->sc_ih == NULL) {
751 printf("%s: unable to establish interrupt",
752 sc->sc_dev.dv_xname);
753 if (intrstr != NULL)
754 printf(" at %s", intrstr);
755 printf("\n");
756 return;
757 }
758 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
759
760 SIMPLEQ_INIT(&sc->sc_txfreeq);
761 SIMPLEQ_INIT(&sc->sc_txdirtyq);
762
763 /*
764 * Allocate the control data structures, and create and load the
765 * DMA map for it.
766 */
767 if ((error = bus_dmamem_alloc(sc->sc_dmat,
768 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
769 0)) != 0) {
770 printf("%s: unable to allocate control data, error = %d\n",
771 sc->sc_dev.dv_xname, error);
772 goto fail_0;
773 }
774
775 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
776 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
777 BUS_DMA_COHERENT)) != 0) {
778 printf("%s: unable to map control data, error = %d\n",
779 sc->sc_dev.dv_xname, error);
780 goto fail_1;
781 }
782
783 if ((error = bus_dmamap_create(sc->sc_dmat,
784 sizeof(struct sip_control_data), 1,
785 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
786 printf("%s: unable to create control data DMA map, "
787 "error = %d\n", sc->sc_dev.dv_xname, error);
788 goto fail_2;
789 }
790
791 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
792 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
793 0)) != 0) {
794 printf("%s: unable to load control data DMA map, error = %d\n",
795 sc->sc_dev.dv_xname, error);
796 goto fail_3;
797 }
798
799 /*
800 * Create the transmit buffer DMA maps.
801 */
802 for (i = 0; i < SIP_TXQUEUELEN; i++) {
803 if ((error = bus_dmamap_create(sc->sc_dmat, TX_DMAMAP_SIZE,
804 SIP_NTXSEGS, MCLBYTES, 0, 0,
805 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
806 printf("%s: unable to create tx DMA map %d, "
807 "error = %d\n", sc->sc_dev.dv_xname, i, error);
808 goto fail_4;
809 }
810 }
811
812 /*
813 * Create the receive buffer DMA maps.
814 */
815 for (i = 0; i < SIP_NRXDESC; i++) {
816 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
817 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
818 printf("%s: unable to create rx DMA map %d, "
819 "error = %d\n", sc->sc_dev.dv_xname, i, error);
820 goto fail_5;
821 }
822 sc->sc_rxsoft[i].rxs_mbuf = NULL;
823 }
824
825 /*
826 * Reset the chip to a known state.
827 */
828 SIP_DECL(reset)(sc);
829
830 /*
831 * Read the Ethernet address from the EEPROM. This might
832 * also fetch other stuff from the EEPROM and stash it
833 * in the softc.
834 */
835 sc->sc_cfg = 0;
836 #if !defined(DP83820)
837 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
838 SIP_SIS900_REV(sc,SIS_REV_900B))
839 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT);
840 #endif
841
842 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
843
844 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
845 ether_sprintf(enaddr));
846
847 /*
848 * Initialize the configuration register: aggressive PCI
849 * bus request algorithm, default backoff, default OW timer,
850 * default parity error detection.
851 *
852 * NOTE: "Big endian mode" is useless on the SiS900 and
853 * friends -- it affects packet data, not descriptors.
854 */
855 #ifdef DP83820
856 /*
857 * Cause the chip to load configuration data from the EEPROM.
858 */
859 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_PTSCR, PTSCR_EELOAD_EN);
860 for (i = 0; i < 10000; i++) {
861 delay(10);
862 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
863 PTSCR_EELOAD_EN) == 0)
864 break;
865 }
866 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
867 PTSCR_EELOAD_EN) {
868 printf("%s: timeout loading configuration from EEPROM\n",
869 sc->sc_dev.dv_xname);
870 return;
871 }
872
873 sc->sc_gpior = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_GPIOR);
874
875 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
876 if (reg & CFG_PCI64_DET) {
877 printf("%s: 64-bit PCI slot detected", sc->sc_dev.dv_xname);
878 /*
879 * Check to see if this card is 64-bit. If so, enable 64-bit
880 * data transfers.
881 *
882 * We can't use the DATA64_EN bit in the EEPROM, because
883 * vendors of 32-bit cards fail to clear that bit in many
884 * cases (yet the card still detects that it's in a 64-bit
885 * slot; go figure).
886 */
887 if (SIP_DECL(check_64bit)(pa)) {
888 sc->sc_cfg |= CFG_DATA64_EN;
889 printf(", using 64-bit data transfers");
890 }
891 printf("\n");
892 }
893
894 /*
895 * XXX Need some PCI flags indicating support for
896 * XXX 64-bit addressing.
897 */
898 #if 0
899 if (reg & CFG_M64ADDR)
900 sc->sc_cfg |= CFG_M64ADDR;
901 if (reg & CFG_T64ADDR)
902 sc->sc_cfg |= CFG_T64ADDR;
903 #endif
904
905 if (reg & (CFG_TBI_EN|CFG_EXT_125)) {
906 const char *sep = "";
907 printf("%s: using ", sc->sc_dev.dv_xname);
908 if (reg & CFG_EXT_125) {
909 sc->sc_cfg |= CFG_EXT_125;
910 printf("%s125MHz clock", sep);
911 sep = ", ";
912 }
913 if (reg & CFG_TBI_EN) {
914 sc->sc_cfg |= CFG_TBI_EN;
915 printf("%sten-bit interface", sep);
916 sep = ", ";
917 }
918 printf("\n");
919 }
920 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0 ||
921 (reg & CFG_MRM_DIS) != 0)
922 sc->sc_cfg |= CFG_MRM_DIS;
923 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0 ||
924 (reg & CFG_MWI_DIS) != 0)
925 sc->sc_cfg |= CFG_MWI_DIS;
926
927 /*
928 * Use the extended descriptor format on the DP83820. This
929 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
930 * checksumming.
931 */
932 sc->sc_cfg |= CFG_EXTSTS_EN;
933 #endif /* DP83820 */
934
935 /*
936 * Initialize our media structures and probe the MII.
937 */
938 sc->sc_mii.mii_ifp = ifp;
939 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
940 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
941 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
942 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, SIP_DECL(mediachange),
943 SIP_DECL(mediastatus));
944
945 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
946 MII_OFFSET_ANY, 0);
947 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
948 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
949 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
950 } else
951 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
952
953 ifp = &sc->sc_ethercom.ec_if;
954 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
955 ifp->if_softc = sc;
956 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
957 ifp->if_ioctl = SIP_DECL(ioctl);
958 ifp->if_start = SIP_DECL(start);
959 ifp->if_watchdog = SIP_DECL(watchdog);
960 ifp->if_init = SIP_DECL(init);
961 ifp->if_stop = SIP_DECL(stop);
962 IFQ_SET_READY(&ifp->if_snd);
963
964 /*
965 * We can support 802.1Q VLAN-sized frames.
966 */
967 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
968
969 #ifdef DP83820
970 /*
971 * And the DP83820 can do VLAN tagging in hardware, and
972 * support the jumbo Ethernet MTU.
973 */
974 sc->sc_ethercom.ec_capabilities |=
975 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
976
977 /*
978 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
979 * in hardware.
980 */
981 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
982 IFCAP_CSUM_UDPv4;
983 #endif /* DP83820 */
984
985 /*
986 * Attach the interface.
987 */
988 if_attach(ifp);
989 ether_ifattach(ifp, enaddr);
990 #if NRND > 0
991 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
992 RND_TYPE_NET, 0);
993 #endif
994
995 /*
996 * The number of bytes that must be available in
997 * the Tx FIFO before the bus master can DMA more
998 * data into the FIFO.
999 */
1000 sc->sc_tx_fill_thresh = 64 / 32;
1001
1002 /*
1003 * Start at a drain threshold of 512 bytes. We will
1004 * increase it if a DMA underrun occurs.
1005 *
1006 * XXX The minimum value of this variable should be
1007 * tuned. We may be able to improve performance
1008 * by starting with a lower value. That, however,
1009 * may trash the first few outgoing packets if the
1010 * PCI bus is saturated.
1011 */
1012 sc->sc_tx_drain_thresh = 1504 / 32;
1013
1014 /*
1015 * Initialize the Rx FIFO drain threshold.
1016 *
1017 * This is in units of 8 bytes.
1018 *
1019 * We should never set this value lower than 2; 14 bytes are
1020 * required to filter the packet.
1021 */
1022 sc->sc_rx_drain_thresh = 128 / 8;
1023
1024 #ifdef SIP_EVENT_COUNTERS
1025 /*
1026 * Attach event counters.
1027 */
1028 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1029 NULL, sc->sc_dev.dv_xname, "txsstall");
1030 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1031 NULL, sc->sc_dev.dv_xname, "txdstall");
1032 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR,
1033 NULL, sc->sc_dev.dv_xname, "txforceintr");
1034 evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR,
1035 NULL, sc->sc_dev.dv_xname, "txdintr");
1036 evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR,
1037 NULL, sc->sc_dev.dv_xname, "txiintr");
1038 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1039 NULL, sc->sc_dev.dv_xname, "rxintr");
1040 evcnt_attach_dynamic(&sc->sc_ev_hiberr, EVCNT_TYPE_INTR,
1041 NULL, sc->sc_dev.dv_xname, "hiberr");
1042 #ifdef DP83820
1043 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1044 NULL, sc->sc_dev.dv_xname, "rxipsum");
1045 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
1046 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
1047 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
1048 NULL, sc->sc_dev.dv_xname, "rxudpsum");
1049 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1050 NULL, sc->sc_dev.dv_xname, "txipsum");
1051 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
1052 NULL, sc->sc_dev.dv_xname, "txtcpsum");
1053 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
1054 NULL, sc->sc_dev.dv_xname, "txudpsum");
1055 #endif /* DP83820 */
1056 #endif /* SIP_EVENT_COUNTERS */
1057
1058 /*
1059 * Make sure the interface is shutdown during reboot.
1060 */
1061 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
1062 if (sc->sc_sdhook == NULL)
1063 printf("%s: WARNING: unable to establish shutdown hook\n",
1064 sc->sc_dev.dv_xname);
1065 return;
1066
1067 /*
1068 * Free any resources we've allocated during the failed attach
1069 * attempt. Do this in reverse order and fall through.
1070 */
1071 fail_5:
1072 for (i = 0; i < SIP_NRXDESC; i++) {
1073 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1074 bus_dmamap_destroy(sc->sc_dmat,
1075 sc->sc_rxsoft[i].rxs_dmamap);
1076 }
1077 fail_4:
1078 for (i = 0; i < SIP_TXQUEUELEN; i++) {
1079 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1080 bus_dmamap_destroy(sc->sc_dmat,
1081 sc->sc_txsoft[i].txs_dmamap);
1082 }
1083 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1084 fail_3:
1085 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1086 fail_2:
1087 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1088 sizeof(struct sip_control_data));
1089 fail_1:
1090 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1091 fail_0:
1092 return;
1093 }
1094
1095 /*
1096 * sip_shutdown:
1097 *
1098 * Make sure the interface is stopped at reboot time.
1099 */
1100 void
1101 SIP_DECL(shutdown)(void *arg)
1102 {
1103 struct sip_softc *sc = arg;
1104
1105 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
1106 }
1107
1108 /*
1109 * sip_start: [ifnet interface function]
1110 *
1111 * Start packet transmission on the interface.
1112 */
1113 void
1114 SIP_DECL(start)(struct ifnet *ifp)
1115 {
1116 struct sip_softc *sc = ifp->if_softc;
1117 struct mbuf *m0, *m;
1118 struct sip_txsoft *txs;
1119 bus_dmamap_t dmamap;
1120 int error, nexttx, lasttx, seg;
1121 int ofree = sc->sc_txfree;
1122 #if 0
1123 int firsttx = sc->sc_txnext;
1124 #endif
1125 #ifdef DP83820
1126 struct m_tag *mtag;
1127 u_int32_t extsts;
1128 #endif
1129
1130 /*
1131 * If we've been told to pause, don't transmit any more packets.
1132 */
1133 if (sc->sc_flags & SIPF_PAUSED)
1134 ifp->if_flags |= IFF_OACTIVE;
1135
1136 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1137 return;
1138
1139 /*
1140 * Loop through the send queue, setting up transmit descriptors
1141 * until we drain the queue, or use up all available transmit
1142 * descriptors.
1143 */
1144 for (;;) {
1145 /* Get a work queue entry. */
1146 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1147 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
1148 break;
1149 }
1150
1151 /*
1152 * Grab a packet off the queue.
1153 */
1154 IFQ_POLL(&ifp->if_snd, m0);
1155 if (m0 == NULL)
1156 break;
1157 #ifndef DP83820
1158 m = NULL;
1159 #endif
1160
1161 dmamap = txs->txs_dmamap;
1162
1163 #ifdef DP83820
1164 /*
1165 * Load the DMA map. If this fails, the packet either
1166 * didn't fit in the allotted number of segments, or we
1167 * were short on resources. For the too-many-segments
1168 * case, we simply report an error and drop the packet,
1169 * since we can't sanely copy a jumbo packet to a single
1170 * buffer.
1171 */
1172 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1173 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1174 if (error) {
1175 if (error == EFBIG) {
1176 printf("%s: Tx packet consumes too many "
1177 "DMA segments, dropping...\n",
1178 sc->sc_dev.dv_xname);
1179 IFQ_DEQUEUE(&ifp->if_snd, m0);
1180 m_freem(m0);
1181 continue;
1182 }
1183 /*
1184 * Short on resources, just stop for now.
1185 */
1186 break;
1187 }
1188 #else /* DP83820 */
1189 /*
1190 * Load the DMA map. If this fails, the packet either
1191 * didn't fit in the alloted number of segments, or we
1192 * were short on resources. In this case, we'll copy
1193 * and try again.
1194 */
1195 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1196 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1197 MGETHDR(m, M_DONTWAIT, MT_DATA);
1198 if (m == NULL) {
1199 printf("%s: unable to allocate Tx mbuf\n",
1200 sc->sc_dev.dv_xname);
1201 break;
1202 }
1203 if (m0->m_pkthdr.len > MHLEN) {
1204 MCLGET(m, M_DONTWAIT);
1205 if ((m->m_flags & M_EXT) == 0) {
1206 printf("%s: unable to allocate Tx "
1207 "cluster\n", sc->sc_dev.dv_xname);
1208 m_freem(m);
1209 break;
1210 }
1211 }
1212 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1213 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1214 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1215 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1216 if (error) {
1217 printf("%s: unable to load Tx buffer, "
1218 "error = %d\n", sc->sc_dev.dv_xname, error);
1219 break;
1220 }
1221 }
1222 #endif /* DP83820 */
1223
1224 /*
1225 * Ensure we have enough descriptors free to describe
1226 * the packet. Note, we always reserve one descriptor
1227 * at the end of the ring as a termination point, to
1228 * prevent wrap-around.
1229 */
1230 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1231 /*
1232 * Not enough free descriptors to transmit this
1233 * packet. We haven't committed anything yet,
1234 * so just unload the DMA map, put the packet
1235 * back on the queue, and punt. Notify the upper
1236 * layer that there are not more slots left.
1237 *
1238 * XXX We could allocate an mbuf and copy, but
1239 * XXX is it worth it?
1240 */
1241 ifp->if_flags |= IFF_OACTIVE;
1242 bus_dmamap_unload(sc->sc_dmat, dmamap);
1243 #ifndef DP83820
1244 if (m != NULL)
1245 m_freem(m);
1246 #endif
1247 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1248 break;
1249 }
1250
1251 IFQ_DEQUEUE(&ifp->if_snd, m0);
1252 #ifndef DP83820
1253 if (m != NULL) {
1254 m_freem(m0);
1255 m0 = m;
1256 }
1257 #endif
1258
1259 /*
1260 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1261 */
1262
1263 /* Sync the DMA map. */
1264 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1265 BUS_DMASYNC_PREWRITE);
1266
1267 /*
1268 * Initialize the transmit descriptors.
1269 */
1270 for (nexttx = lasttx = sc->sc_txnext, seg = 0;
1271 seg < dmamap->dm_nsegs;
1272 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1273 /*
1274 * If this is the first descriptor we're
1275 * enqueueing, don't set the OWN bit just
1276 * yet. That could cause a race condition.
1277 * We'll do it below.
1278 */
1279 sc->sc_txdescs[nexttx].sipd_bufptr =
1280 htole32(dmamap->dm_segs[seg].ds_addr);
1281 sc->sc_txdescs[nexttx].sipd_cmdsts =
1282 htole32((nexttx == sc->sc_txnext ? 0 : CMDSTS_OWN) |
1283 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1284 #ifdef DP83820
1285 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1286 #endif /* DP83820 */
1287 lasttx = nexttx;
1288 }
1289
1290 /* Clear the MORE bit on the last segment. */
1291 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1292
1293 /*
1294 * If we're in the interrupt delay window, delay the
1295 * interrupt.
1296 */
1297 if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) {
1298 SIP_EVCNT_INCR(&sc->sc_ev_txforceintr);
1299 sc->sc_txdescs[lasttx].sipd_cmdsts |=
1300 htole32(CMDSTS_INTR);
1301 sc->sc_txwin = 0;
1302 }
1303
1304 #ifdef DP83820
1305 /*
1306 * If VLANs are enabled and the packet has a VLAN tag, set
1307 * up the descriptor to encapsulate the packet for us.
1308 *
1309 * This apparently has to be on the last descriptor of
1310 * the packet.
1311 */
1312 if (sc->sc_ethercom.ec_nvlans != 0 &&
1313 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1314 sc->sc_txdescs[lasttx].sipd_extsts |=
1315 htole32(EXTSTS_VPKT |
1316 htons(*mtod(m, int *) & EXTSTS_VTCI));
1317 }
1318
1319 /*
1320 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1321 * checksumming, set up the descriptor to do this work
1322 * for us.
1323 *
1324 * This apparently has to be on the first descriptor of
1325 * the packet.
1326 *
1327 * Byte-swap constants so the compiler can optimize.
1328 */
1329 extsts = 0;
1330 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1331 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1332 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1333 extsts |= htole32(EXTSTS_IPPKT);
1334 }
1335 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1336 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1337 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1338 extsts |= htole32(EXTSTS_TCPPKT);
1339 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1340 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1341 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1342 extsts |= htole32(EXTSTS_UDPPKT);
1343 }
1344 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1345 #endif /* DP83820 */
1346
1347 /* Sync the descriptors we're using. */
1348 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1349 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1350
1351 /*
1352 * The entire packet is set up. Give the first descrptor
1353 * to the chip now.
1354 */
1355 sc->sc_txdescs[sc->sc_txnext].sipd_cmdsts |=
1356 htole32(CMDSTS_OWN);
1357 SIP_CDTXSYNC(sc, sc->sc_txnext, 1,
1358 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1359
1360 /*
1361 * Store a pointer to the packet so we can free it later,
1362 * and remember what txdirty will be once the packet is
1363 * done.
1364 */
1365 txs->txs_mbuf = m0;
1366 txs->txs_firstdesc = sc->sc_txnext;
1367 txs->txs_lastdesc = lasttx;
1368
1369 /* Advance the tx pointer. */
1370 sc->sc_txfree -= dmamap->dm_nsegs;
1371 sc->sc_txnext = nexttx;
1372
1373 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1374 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1375
1376 #if NBPFILTER > 0
1377 /*
1378 * Pass the packet to any BPF listeners.
1379 */
1380 if (ifp->if_bpf)
1381 bpf_mtap(ifp->if_bpf, m0);
1382 #endif /* NBPFILTER > 0 */
1383 }
1384
1385 if (txs == NULL || sc->sc_txfree == 0) {
1386 /* No more slots left; notify upper layer. */
1387 ifp->if_flags |= IFF_OACTIVE;
1388 }
1389
1390 if (sc->sc_txfree != ofree) {
1391 /*
1392 * Start the transmit process. Note, the manual says
1393 * that if there are no pending transmissions in the
1394 * chip's internal queue (indicated by TXE being clear),
1395 * then the driver software must set the TXDP to the
1396 * first descriptor to be transmitted. However, if we
1397 * do this, it causes serious performance degredation on
1398 * the DP83820 under load, not setting TXDP doesn't seem
1399 * to adversely affect the SiS 900 or DP83815.
1400 *
1401 * Well, I guess it wouldn't be the first time a manual
1402 * has lied -- and they could be speaking of the NULL-
1403 * terminated descriptor list case, rather than OWN-
1404 * terminated rings.
1405 */
1406 #if 0
1407 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1408 CR_TXE) == 0) {
1409 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1410 SIP_CDTXADDR(sc, firsttx));
1411 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1412 }
1413 #else
1414 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1415 #endif
1416
1417 /* Set a watchdog timer in case the chip flakes out. */
1418 ifp->if_timer = 5;
1419 }
1420 }
1421
1422 /*
1423 * sip_watchdog: [ifnet interface function]
1424 *
1425 * Watchdog timer handler.
1426 */
1427 void
1428 SIP_DECL(watchdog)(struct ifnet *ifp)
1429 {
1430 struct sip_softc *sc = ifp->if_softc;
1431
1432 /*
1433 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1434 * If we get a timeout, try and sweep up transmit descriptors.
1435 * If we manage to sweep them all up, ignore the lack of
1436 * interrupt.
1437 */
1438 SIP_DECL(txintr)(sc);
1439
1440 if (sc->sc_txfree != SIP_NTXDESC) {
1441 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1442 ifp->if_oerrors++;
1443
1444 /* Reset the interface. */
1445 (void) SIP_DECL(init)(ifp);
1446 } else if (ifp->if_flags & IFF_DEBUG)
1447 printf("%s: recovered from device timeout\n",
1448 sc->sc_dev.dv_xname);
1449
1450 /* Try to get more packets going. */
1451 SIP_DECL(start)(ifp);
1452 }
1453
1454 /*
1455 * sip_ioctl: [ifnet interface function]
1456 *
1457 * Handle control requests from the operator.
1458 */
1459 int
1460 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1461 {
1462 struct sip_softc *sc = ifp->if_softc;
1463 struct ifreq *ifr = (struct ifreq *)data;
1464 int s, error;
1465
1466 s = splnet();
1467
1468 switch (cmd) {
1469 case SIOCSIFMEDIA:
1470 case SIOCGIFMEDIA:
1471 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1472 break;
1473
1474 default:
1475 error = ether_ioctl(ifp, cmd, data);
1476 if (error == ENETRESET) {
1477 /*
1478 * Multicast list has changed; set the hardware filter
1479 * accordingly.
1480 */
1481 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1482 error = 0;
1483 }
1484 break;
1485 }
1486
1487 /* Try to get more packets going. */
1488 SIP_DECL(start)(ifp);
1489
1490 splx(s);
1491 return (error);
1492 }
1493
1494 /*
1495 * sip_intr:
1496 *
1497 * Interrupt service routine.
1498 */
1499 int
1500 SIP_DECL(intr)(void *arg)
1501 {
1502 struct sip_softc *sc = arg;
1503 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1504 u_int32_t isr;
1505 int handled = 0;
1506
1507 for (;;) {
1508 /* Reading clears interrupt. */
1509 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1510 if ((isr & sc->sc_imr) == 0)
1511 break;
1512
1513 #if NRND > 0
1514 if (RND_ENABLED(&sc->rnd_source))
1515 rnd_add_uint32(&sc->rnd_source, isr);
1516 #endif
1517
1518 handled = 1;
1519
1520 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1521 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1522
1523 /* Grab any new packets. */
1524 SIP_DECL(rxintr)(sc);
1525
1526 if (isr & ISR_RXORN) {
1527 printf("%s: receive FIFO overrun\n",
1528 sc->sc_dev.dv_xname);
1529
1530 /* XXX adjust rx_drain_thresh? */
1531 }
1532
1533 if (isr & ISR_RXIDLE) {
1534 printf("%s: receive ring overrun\n",
1535 sc->sc_dev.dv_xname);
1536
1537 /* Get the receive process going again. */
1538 bus_space_write_4(sc->sc_st, sc->sc_sh,
1539 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1540 bus_space_write_4(sc->sc_st, sc->sc_sh,
1541 SIP_CR, CR_RXE);
1542 }
1543 }
1544
1545 if (isr & (ISR_TXURN|ISR_TXDESC|ISR_TXIDLE)) {
1546 #ifdef SIP_EVENT_COUNTERS
1547 if (isr & ISR_TXDESC)
1548 SIP_EVCNT_INCR(&sc->sc_ev_txdintr);
1549 else if (isr & ISR_TXIDLE)
1550 SIP_EVCNT_INCR(&sc->sc_ev_txiintr);
1551 #endif
1552
1553 /* Sweep up transmit descriptors. */
1554 SIP_DECL(txintr)(sc);
1555
1556 if (isr & ISR_TXURN) {
1557 u_int32_t thresh;
1558
1559 printf("%s: transmit FIFO underrun",
1560 sc->sc_dev.dv_xname);
1561
1562 thresh = sc->sc_tx_drain_thresh + 1;
1563 if (thresh <= TXCFG_DRTH &&
1564 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1565 (sc->sc_tx_fill_thresh * 32))) {
1566 printf("; increasing Tx drain "
1567 "threshold to %u bytes\n",
1568 thresh * 32);
1569 sc->sc_tx_drain_thresh = thresh;
1570 (void) SIP_DECL(init)(ifp);
1571 } else {
1572 (void) SIP_DECL(init)(ifp);
1573 printf("\n");
1574 }
1575 }
1576 }
1577
1578 #if !defined(DP83820)
1579 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1580 if (isr & ISR_PAUSE_ST) {
1581 sc->sc_flags |= SIPF_PAUSED;
1582 ifp->if_flags |= IFF_OACTIVE;
1583 }
1584 if (isr & ISR_PAUSE_END) {
1585 sc->sc_flags &= ~SIPF_PAUSED;
1586 ifp->if_flags &= ~IFF_OACTIVE;
1587 }
1588 }
1589 #endif /* ! DP83820 */
1590
1591 if (isr & ISR_HIBERR) {
1592 int want_init = 0;
1593
1594 SIP_EVCNT_INCR(&sc->sc_ev_hiberr);
1595
1596 #define PRINTERR(bit, str) \
1597 do { \
1598 if ((isr & (bit)) != 0) { \
1599 if ((ifp->if_flags & IFF_DEBUG) != 0) \
1600 printf("%s: %s\n", \
1601 sc->sc_dev.dv_xname, str); \
1602 want_init = 1; \
1603 } \
1604 } while (/*CONSTCOND*/0)
1605
1606 PRINTERR(ISR_DPERR, "parity error");
1607 PRINTERR(ISR_SSERR, "system error");
1608 PRINTERR(ISR_RMABT, "master abort");
1609 PRINTERR(ISR_RTABT, "target abort");
1610 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1611 /*
1612 * Ignore:
1613 * Tx reset complete
1614 * Rx reset complete
1615 */
1616 if (want_init)
1617 (void) SIP_DECL(init)(ifp);
1618 #undef PRINTERR
1619 }
1620 }
1621
1622 /* Try to get more packets going. */
1623 SIP_DECL(start)(ifp);
1624
1625 return (handled);
1626 }
1627
1628 /*
1629 * sip_txintr:
1630 *
1631 * Helper; handle transmit interrupts.
1632 */
1633 void
1634 SIP_DECL(txintr)(struct sip_softc *sc)
1635 {
1636 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1637 struct sip_txsoft *txs;
1638 u_int32_t cmdsts;
1639
1640 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1641 ifp->if_flags &= ~IFF_OACTIVE;
1642
1643 /*
1644 * Go through our Tx list and free mbufs for those
1645 * frames which have been transmitted.
1646 */
1647 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1648 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1649 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1650
1651 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1652 if (cmdsts & CMDSTS_OWN)
1653 break;
1654
1655 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1656
1657 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1658
1659 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1660 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1661 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1662 m_freem(txs->txs_mbuf);
1663 txs->txs_mbuf = NULL;
1664
1665 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1666
1667 /*
1668 * Check for errors and collisions.
1669 */
1670 if (cmdsts &
1671 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1672 ifp->if_oerrors++;
1673 if (cmdsts & CMDSTS_Tx_EC)
1674 ifp->if_collisions += 16;
1675 if (ifp->if_flags & IFF_DEBUG) {
1676 if (cmdsts & CMDSTS_Tx_ED)
1677 printf("%s: excessive deferral\n",
1678 sc->sc_dev.dv_xname);
1679 if (cmdsts & CMDSTS_Tx_EC)
1680 printf("%s: excessive collisions\n",
1681 sc->sc_dev.dv_xname);
1682 }
1683 } else {
1684 /* Packet was transmitted successfully. */
1685 ifp->if_opackets++;
1686 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1687 }
1688 }
1689
1690 /*
1691 * If there are no more pending transmissions, cancel the watchdog
1692 * timer.
1693 */
1694 if (txs == NULL) {
1695 ifp->if_timer = 0;
1696 sc->sc_txwin = 0;
1697 }
1698 }
1699
1700 #if defined(DP83820)
1701 /*
1702 * sip_rxintr:
1703 *
1704 * Helper; handle receive interrupts.
1705 */
1706 void
1707 SIP_DECL(rxintr)(struct sip_softc *sc)
1708 {
1709 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1710 struct sip_rxsoft *rxs;
1711 struct mbuf *m, *tailm;
1712 u_int32_t cmdsts, extsts;
1713 int i, len;
1714
1715 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1716 rxs = &sc->sc_rxsoft[i];
1717
1718 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1719
1720 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1721 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1722
1723 /*
1724 * NOTE: OWN is set if owned by _consumer_. We're the
1725 * consumer of the receive ring, so if the bit is clear,
1726 * we have processed all of the packets.
1727 */
1728 if ((cmdsts & CMDSTS_OWN) == 0) {
1729 /*
1730 * We have processed all of the receive buffers.
1731 */
1732 break;
1733 }
1734
1735 if (__predict_false(sc->sc_rxdiscard)) {
1736 SIP_INIT_RXDESC(sc, i);
1737 if ((cmdsts & CMDSTS_MORE) == 0) {
1738 /* Reset our state. */
1739 sc->sc_rxdiscard = 0;
1740 }
1741 continue;
1742 }
1743
1744 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1745 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1746
1747 m = rxs->rxs_mbuf;
1748
1749 /*
1750 * Add a new receive buffer to the ring.
1751 */
1752 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1753 /*
1754 * Failed, throw away what we've done so
1755 * far, and discard the rest of the packet.
1756 */
1757 ifp->if_ierrors++;
1758 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1759 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1760 SIP_INIT_RXDESC(sc, i);
1761 if (cmdsts & CMDSTS_MORE)
1762 sc->sc_rxdiscard = 1;
1763 if (sc->sc_rxhead != NULL)
1764 m_freem(sc->sc_rxhead);
1765 SIP_RXCHAIN_RESET(sc);
1766 continue;
1767 }
1768
1769 SIP_RXCHAIN_LINK(sc, m);
1770
1771 /*
1772 * If this is not the end of the packet, keep
1773 * looking.
1774 */
1775 if (cmdsts & CMDSTS_MORE) {
1776 sc->sc_rxlen += m->m_len;
1777 continue;
1778 }
1779
1780 /*
1781 * Okay, we have the entire packet now...
1782 */
1783 *sc->sc_rxtailp = NULL;
1784 m = sc->sc_rxhead;
1785 tailm = sc->sc_rxtail;
1786
1787 SIP_RXCHAIN_RESET(sc);
1788
1789 /*
1790 * If an error occurred, update stats and drop the packet.
1791 */
1792 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1793 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1794 ifp->if_ierrors++;
1795 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1796 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1797 /* Receive overrun handled elsewhere. */
1798 printf("%s: receive descriptor error\n",
1799 sc->sc_dev.dv_xname);
1800 }
1801 #define PRINTERR(bit, str) \
1802 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
1803 (cmdsts & (bit)) != 0) \
1804 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1805 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1806 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1807 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1808 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1809 #undef PRINTERR
1810 m_freem(m);
1811 continue;
1812 }
1813
1814 /*
1815 * No errors.
1816 *
1817 * Note, the DP83820 includes the CRC with
1818 * every packet.
1819 */
1820 len = CMDSTS_SIZE(cmdsts);
1821 tailm->m_len = len - sc->sc_rxlen;
1822
1823 /*
1824 * If the packet is small enough to fit in a
1825 * single header mbuf, allocate one and copy
1826 * the data into it. This greatly reduces
1827 * memory consumption when we receive lots
1828 * of small packets.
1829 */
1830 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1831 struct mbuf *nm;
1832 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1833 if (nm == NULL) {
1834 ifp->if_ierrors++;
1835 m_freem(m);
1836 continue;
1837 }
1838 nm->m_data += 2;
1839 nm->m_pkthdr.len = nm->m_len = len;
1840 m_copydata(m, 0, len, mtod(nm, caddr_t));
1841 m_freem(m);
1842 m = nm;
1843 }
1844 #ifndef __NO_STRICT_ALIGNMENT
1845 else {
1846 /*
1847 * The DP83820's receive buffers must be 4-byte
1848 * aligned. But this means that the data after
1849 * the Ethernet header is misaligned. To compensate,
1850 * we have artificially shortened the buffer size
1851 * in the descriptor, and we do an overlapping copy
1852 * of the data two bytes further in (in the first
1853 * buffer of the chain only).
1854 */
1855 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1856 m->m_len);
1857 m->m_data += 2;
1858 }
1859 #endif /* ! __NO_STRICT_ALIGNMENT */
1860
1861 /*
1862 * If VLANs are enabled, VLAN packets have been unwrapped
1863 * for us. Associate the tag with the packet.
1864 */
1865 if (sc->sc_ethercom.ec_nvlans != 0 &&
1866 (extsts & EXTSTS_VPKT) != 0) {
1867 struct m_tag *vtag;
1868
1869 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1870 M_NOWAIT);
1871 if (vtag == NULL) {
1872 ifp->if_ierrors++;
1873 printf("%s: unable to allocate VLAN tag\n",
1874 sc->sc_dev.dv_xname);
1875 m_freem(m);
1876 continue;
1877 }
1878
1879 *(u_int *)(vtag + 1) = ntohs(extsts & EXTSTS_VTCI);
1880 }
1881
1882 /*
1883 * Set the incoming checksum information for the
1884 * packet.
1885 */
1886 if ((extsts & EXTSTS_IPPKT) != 0) {
1887 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1888 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1889 if (extsts & EXTSTS_Rx_IPERR)
1890 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1891 if (extsts & EXTSTS_TCPPKT) {
1892 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1893 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1894 if (extsts & EXTSTS_Rx_TCPERR)
1895 m->m_pkthdr.csum_flags |=
1896 M_CSUM_TCP_UDP_BAD;
1897 } else if (extsts & EXTSTS_UDPPKT) {
1898 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1899 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1900 if (extsts & EXTSTS_Rx_UDPERR)
1901 m->m_pkthdr.csum_flags |=
1902 M_CSUM_TCP_UDP_BAD;
1903 }
1904 }
1905
1906 ifp->if_ipackets++;
1907 m->m_flags |= M_HASFCS;
1908 m->m_pkthdr.rcvif = ifp;
1909 m->m_pkthdr.len = len;
1910
1911 #if NBPFILTER > 0
1912 /*
1913 * Pass this up to any BPF listeners, but only
1914 * pass if up the stack if it's for us.
1915 */
1916 if (ifp->if_bpf)
1917 bpf_mtap(ifp->if_bpf, m);
1918 #endif /* NBPFILTER > 0 */
1919
1920 /* Pass it on. */
1921 (*ifp->if_input)(ifp, m);
1922 }
1923
1924 /* Update the receive pointer. */
1925 sc->sc_rxptr = i;
1926 }
1927 #else /* ! DP83820 */
1928 /*
1929 * sip_rxintr:
1930 *
1931 * Helper; handle receive interrupts.
1932 */
1933 void
1934 SIP_DECL(rxintr)(struct sip_softc *sc)
1935 {
1936 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1937 struct sip_rxsoft *rxs;
1938 struct mbuf *m;
1939 u_int32_t cmdsts;
1940 int i, len;
1941
1942 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1943 rxs = &sc->sc_rxsoft[i];
1944
1945 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1946
1947 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1948
1949 /*
1950 * NOTE: OWN is set if owned by _consumer_. We're the
1951 * consumer of the receive ring, so if the bit is clear,
1952 * we have processed all of the packets.
1953 */
1954 if ((cmdsts & CMDSTS_OWN) == 0) {
1955 /*
1956 * We have processed all of the receive buffers.
1957 */
1958 break;
1959 }
1960
1961 /*
1962 * If any collisions were seen on the wire, count one.
1963 */
1964 if (cmdsts & CMDSTS_Rx_COL)
1965 ifp->if_collisions++;
1966
1967 /*
1968 * If an error occurred, update stats, clear the status
1969 * word, and leave the packet buffer in place. It will
1970 * simply be reused the next time the ring comes around.
1971 */
1972 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1973 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1974 ifp->if_ierrors++;
1975 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1976 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1977 /* Receive overrun handled elsewhere. */
1978 printf("%s: receive descriptor error\n",
1979 sc->sc_dev.dv_xname);
1980 }
1981 #define PRINTERR(bit, str) \
1982 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
1983 (cmdsts & (bit)) != 0) \
1984 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1985 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1986 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1987 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1988 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1989 #undef PRINTERR
1990 SIP_INIT_RXDESC(sc, i);
1991 continue;
1992 }
1993
1994 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1995 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1996
1997 /*
1998 * No errors; receive the packet. Note, the SiS 900
1999 * includes the CRC with every packet.
2000 */
2001 len = CMDSTS_SIZE(cmdsts);
2002
2003 #ifdef __NO_STRICT_ALIGNMENT
2004 /*
2005 * If the packet is small enough to fit in a
2006 * single header mbuf, allocate one and copy
2007 * the data into it. This greatly reduces
2008 * memory consumption when we receive lots
2009 * of small packets.
2010 *
2011 * Otherwise, we add a new buffer to the receive
2012 * chain. If this fails, we drop the packet and
2013 * recycle the old buffer.
2014 */
2015 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
2016 MGETHDR(m, M_DONTWAIT, MT_DATA);
2017 if (m == NULL)
2018 goto dropit;
2019 memcpy(mtod(m, caddr_t),
2020 mtod(rxs->rxs_mbuf, caddr_t), len);
2021 SIP_INIT_RXDESC(sc, i);
2022 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2023 rxs->rxs_dmamap->dm_mapsize,
2024 BUS_DMASYNC_PREREAD);
2025 } else {
2026 m = rxs->rxs_mbuf;
2027 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
2028 dropit:
2029 ifp->if_ierrors++;
2030 SIP_INIT_RXDESC(sc, i);
2031 bus_dmamap_sync(sc->sc_dmat,
2032 rxs->rxs_dmamap, 0,
2033 rxs->rxs_dmamap->dm_mapsize,
2034 BUS_DMASYNC_PREREAD);
2035 continue;
2036 }
2037 }
2038 #else
2039 /*
2040 * The SiS 900's receive buffers must be 4-byte aligned.
2041 * But this means that the data after the Ethernet header
2042 * is misaligned. We must allocate a new buffer and
2043 * copy the data, shifted forward 2 bytes.
2044 */
2045 MGETHDR(m, M_DONTWAIT, MT_DATA);
2046 if (m == NULL) {
2047 dropit:
2048 ifp->if_ierrors++;
2049 SIP_INIT_RXDESC(sc, i);
2050 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2051 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2052 continue;
2053 }
2054 if (len > (MHLEN - 2)) {
2055 MCLGET(m, M_DONTWAIT);
2056 if ((m->m_flags & M_EXT) == 0) {
2057 m_freem(m);
2058 goto dropit;
2059 }
2060 }
2061 m->m_data += 2;
2062
2063 /*
2064 * Note that we use clusters for incoming frames, so the
2065 * buffer is virtually contiguous.
2066 */
2067 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
2068
2069 /* Allow the receive descriptor to continue using its mbuf. */
2070 SIP_INIT_RXDESC(sc, i);
2071 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2072 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2073 #endif /* __NO_STRICT_ALIGNMENT */
2074
2075 ifp->if_ipackets++;
2076 m->m_flags |= M_HASFCS;
2077 m->m_pkthdr.rcvif = ifp;
2078 m->m_pkthdr.len = m->m_len = len;
2079
2080 #if NBPFILTER > 0
2081 /*
2082 * Pass this up to any BPF listeners, but only
2083 * pass if up the stack if it's for us.
2084 */
2085 if (ifp->if_bpf)
2086 bpf_mtap(ifp->if_bpf, m);
2087 #endif /* NBPFILTER > 0 */
2088
2089 /* Pass it on. */
2090 (*ifp->if_input)(ifp, m);
2091 }
2092
2093 /* Update the receive pointer. */
2094 sc->sc_rxptr = i;
2095 }
2096 #endif /* DP83820 */
2097
2098 /*
2099 * sip_tick:
2100 *
2101 * One second timer, used to tick the MII.
2102 */
2103 void
2104 SIP_DECL(tick)(void *arg)
2105 {
2106 struct sip_softc *sc = arg;
2107 int s;
2108
2109 s = splnet();
2110 mii_tick(&sc->sc_mii);
2111 splx(s);
2112
2113 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2114 }
2115
2116 /*
2117 * sip_reset:
2118 *
2119 * Perform a soft reset on the SiS 900.
2120 */
2121 void
2122 SIP_DECL(reset)(struct sip_softc *sc)
2123 {
2124 bus_space_tag_t st = sc->sc_st;
2125 bus_space_handle_t sh = sc->sc_sh;
2126 int i;
2127
2128 bus_space_write_4(st, sh, SIP_IER, 0);
2129 bus_space_write_4(st, sh, SIP_IMR, 0);
2130 bus_space_write_4(st, sh, SIP_RFCR, 0);
2131 bus_space_write_4(st, sh, SIP_CR, CR_RST);
2132
2133 for (i = 0; i < SIP_TIMEOUT; i++) {
2134 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
2135 break;
2136 delay(2);
2137 }
2138
2139 if (i == SIP_TIMEOUT)
2140 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
2141
2142 delay(1000);
2143
2144 #ifdef DP83820
2145 /*
2146 * Set the general purpose I/O bits. Do it here in case we
2147 * need to have GPIO set up to talk to the media interface.
2148 */
2149 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
2150 delay(1000);
2151 #endif /* DP83820 */
2152 }
2153
2154 /*
2155 * sip_init: [ ifnet interface function ]
2156 *
2157 * Initialize the interface. Must be called at splnet().
2158 */
2159 int
2160 SIP_DECL(init)(struct ifnet *ifp)
2161 {
2162 struct sip_softc *sc = ifp->if_softc;
2163 bus_space_tag_t st = sc->sc_st;
2164 bus_space_handle_t sh = sc->sc_sh;
2165 struct sip_txsoft *txs;
2166 struct sip_rxsoft *rxs;
2167 struct sip_desc *sipd;
2168 u_int32_t reg;
2169 int i, error = 0;
2170
2171 /*
2172 * Cancel any pending I/O.
2173 */
2174 SIP_DECL(stop)(ifp, 0);
2175
2176 /*
2177 * Reset the chip to a known state.
2178 */
2179 SIP_DECL(reset)(sc);
2180
2181 #if !defined(DP83820)
2182 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) {
2183 /*
2184 * DP83815 manual, page 78:
2185 * 4.4 Recommended Registers Configuration
2186 * For optimum performance of the DP83815, version noted
2187 * as DP83815CVNG (SRR = 203h), the listed register
2188 * modifications must be followed in sequence...
2189 *
2190 * It's not clear if this should be 302h or 203h because that
2191 * chip name is listed as SRR 302h in the description of the
2192 * SRR register. However, my revision 302h DP83815 on the
2193 * Netgear FA311 purchased in 02/2001 needs these settings
2194 * to avoid tons of errors in AcceptPerfectMatch (non-
2195 * IFF_PROMISC) mode. I do not know if other revisions need
2196 * this set or not. [briggs -- 09 March 2001]
2197 *
2198 * Note that only the low-order 12 bits of 0xe4 are documented
2199 * and that this sets reserved bits in that register.
2200 */
2201 reg = bus_space_read_4(st, sh, SIP_NS_SRR);
2202 if (reg == 0x302) {
2203 bus_space_write_4(st, sh, 0x00cc, 0x0001);
2204 bus_space_write_4(st, sh, 0x00e4, 0x189C);
2205 bus_space_write_4(st, sh, 0x00fc, 0x0000);
2206 bus_space_write_4(st, sh, 0x00f4, 0x5040);
2207 bus_space_write_4(st, sh, 0x00f8, 0x008c);
2208 }
2209 }
2210 #endif /* ! DP83820 */
2211
2212 /*
2213 * Initialize the transmit descriptor ring.
2214 */
2215 for (i = 0; i < SIP_NTXDESC; i++) {
2216 sipd = &sc->sc_txdescs[i];
2217 memset(sipd, 0, sizeof(struct sip_desc));
2218 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2219 }
2220 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2221 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2222 sc->sc_txfree = SIP_NTXDESC;
2223 sc->sc_txnext = 0;
2224 sc->sc_txwin = 0;
2225
2226 /*
2227 * Initialize the transmit job descriptors.
2228 */
2229 SIMPLEQ_INIT(&sc->sc_txfreeq);
2230 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2231 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2232 txs = &sc->sc_txsoft[i];
2233 txs->txs_mbuf = NULL;
2234 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2235 }
2236
2237 /*
2238 * Initialize the receive descriptor and receive job
2239 * descriptor rings.
2240 */
2241 for (i = 0; i < SIP_NRXDESC; i++) {
2242 rxs = &sc->sc_rxsoft[i];
2243 if (rxs->rxs_mbuf == NULL) {
2244 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2245 printf("%s: unable to allocate or map rx "
2246 "buffer %d, error = %d\n",
2247 sc->sc_dev.dv_xname, i, error);
2248 /*
2249 * XXX Should attempt to run with fewer receive
2250 * XXX buffers instead of just failing.
2251 */
2252 SIP_DECL(rxdrain)(sc);
2253 goto out;
2254 }
2255 } else
2256 SIP_INIT_RXDESC(sc, i);
2257 }
2258 sc->sc_rxptr = 0;
2259 #ifdef DP83820
2260 sc->sc_rxdiscard = 0;
2261 SIP_RXCHAIN_RESET(sc);
2262 #endif /* DP83820 */
2263
2264 /*
2265 * Set the configuration register; it's already initialized
2266 * in sip_attach().
2267 */
2268 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2269
2270 /*
2271 * Initialize the prototype TXCFG register.
2272 */
2273 #if defined(DP83820)
2274 sc->sc_txcfg = TXCFG_MXDMA_512;
2275 sc->sc_rxcfg = RXCFG_MXDMA_512;
2276 #else
2277 if ((SIP_SIS900_REV(sc, SIS_REV_635) ||
2278 SIP_SIS900_REV(sc, SIS_REV_900B)) &&
2279 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & CFG_EDBMASTEN)) {
2280 sc->sc_txcfg = TXCFG_MXDMA_64;
2281 sc->sc_rxcfg = RXCFG_MXDMA_64;
2282 } else {
2283 sc->sc_txcfg = TXCFG_MXDMA_512;
2284 sc->sc_rxcfg = RXCFG_MXDMA_512;
2285 }
2286 #endif /* DP83820 */
2287
2288 sc->sc_txcfg |= TXCFG_ATP |
2289 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2290 sc->sc_tx_drain_thresh;
2291 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2292
2293 /*
2294 * Initialize the receive drain threshold if we have never
2295 * done so.
2296 */
2297 if (sc->sc_rx_drain_thresh == 0) {
2298 /*
2299 * XXX This value should be tuned. This is set to the
2300 * maximum of 248 bytes, and we may be able to improve
2301 * performance by decreasing it (although we should never
2302 * set this value lower than 2; 14 bytes are required to
2303 * filter the packet).
2304 */
2305 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2306 }
2307
2308 /*
2309 * Initialize the prototype RXCFG register.
2310 */
2311 sc->sc_rxcfg |= (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2312 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2313
2314 #ifdef DP83820
2315 /*
2316 * Initialize the VLAN/IP receive control register.
2317 * We enable checksum computation on all incoming
2318 * packets, and do not reject packets w/ bad checksums.
2319 */
2320 reg = 0;
2321 if (ifp->if_capenable &
2322 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2323 reg |= VRCR_IPEN;
2324 if (sc->sc_ethercom.ec_nvlans != 0)
2325 reg |= VRCR_VTDEN|VRCR_VTREN;
2326 bus_space_write_4(st, sh, SIP_VRCR, reg);
2327
2328 /*
2329 * Initialize the VLAN/IP transmit control register.
2330 * We enable outgoing checksum computation on a
2331 * per-packet basis.
2332 */
2333 reg = 0;
2334 if (ifp->if_capenable &
2335 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2336 reg |= VTCR_PPCHK;
2337 if (sc->sc_ethercom.ec_nvlans != 0)
2338 reg |= VTCR_VPPTI;
2339 bus_space_write_4(st, sh, SIP_VTCR, reg);
2340
2341 /*
2342 * If we're using VLANs, initialize the VLAN data register.
2343 * To understand why we bswap the VLAN Ethertype, see section
2344 * 4.2.36 of the DP83820 manual.
2345 */
2346 if (sc->sc_ethercom.ec_nvlans != 0)
2347 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2348 #endif /* DP83820 */
2349
2350 /*
2351 * Give the transmit and receive rings to the chip.
2352 */
2353 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2354 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2355
2356 /*
2357 * Initialize the interrupt mask.
2358 */
2359 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2360 ISR_TXURN|ISR_TXDESC|ISR_TXIDLE|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2361 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2362
2363 /* Set up the receive filter. */
2364 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2365
2366 /*
2367 * Set the current media. Do this after initializing the prototype
2368 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2369 * control.
2370 */
2371 mii_mediachg(&sc->sc_mii);
2372
2373 /*
2374 * Enable interrupts.
2375 */
2376 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2377
2378 /*
2379 * Start the transmit and receive processes.
2380 */
2381 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2382
2383 /*
2384 * Start the one second MII clock.
2385 */
2386 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2387
2388 /*
2389 * ...all done!
2390 */
2391 ifp->if_flags |= IFF_RUNNING;
2392 ifp->if_flags &= ~IFF_OACTIVE;
2393
2394 out:
2395 if (error)
2396 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2397 return (error);
2398 }
2399
2400 /*
2401 * sip_drain:
2402 *
2403 * Drain the receive queue.
2404 */
2405 void
2406 SIP_DECL(rxdrain)(struct sip_softc *sc)
2407 {
2408 struct sip_rxsoft *rxs;
2409 int i;
2410
2411 for (i = 0; i < SIP_NRXDESC; i++) {
2412 rxs = &sc->sc_rxsoft[i];
2413 if (rxs->rxs_mbuf != NULL) {
2414 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2415 m_freem(rxs->rxs_mbuf);
2416 rxs->rxs_mbuf = NULL;
2417 }
2418 }
2419 }
2420
2421 /*
2422 * sip_stop: [ ifnet interface function ]
2423 *
2424 * Stop transmission on the interface.
2425 */
2426 void
2427 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2428 {
2429 struct sip_softc *sc = ifp->if_softc;
2430 bus_space_tag_t st = sc->sc_st;
2431 bus_space_handle_t sh = sc->sc_sh;
2432 struct sip_txsoft *txs;
2433 u_int32_t cmdsts = 0; /* DEBUG */
2434
2435 /*
2436 * Stop the one second clock.
2437 */
2438 callout_stop(&sc->sc_tick_ch);
2439
2440 /* Down the MII. */
2441 mii_down(&sc->sc_mii);
2442
2443 /*
2444 * Disable interrupts.
2445 */
2446 bus_space_write_4(st, sh, SIP_IER, 0);
2447
2448 /*
2449 * Stop receiver and transmitter.
2450 */
2451 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2452
2453 /*
2454 * Release any queued transmit buffers.
2455 */
2456 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2457 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2458 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2459 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2460 CMDSTS_INTR) == 0)
2461 printf("%s: sip_stop: last descriptor does not "
2462 "have INTR bit set\n", sc->sc_dev.dv_xname);
2463 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
2464 #ifdef DIAGNOSTIC
2465 if (txs->txs_mbuf == NULL) {
2466 printf("%s: dirty txsoft with no mbuf chain\n",
2467 sc->sc_dev.dv_xname);
2468 panic("sip_stop");
2469 }
2470 #endif
2471 cmdsts |= /* DEBUG */
2472 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2473 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2474 m_freem(txs->txs_mbuf);
2475 txs->txs_mbuf = NULL;
2476 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2477 }
2478
2479 if (disable)
2480 SIP_DECL(rxdrain)(sc);
2481
2482 /*
2483 * Mark the interface down and cancel the watchdog timer.
2484 */
2485 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2486 ifp->if_timer = 0;
2487
2488 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2489 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2490 printf("%s: sip_stop: no INTR bits set in dirty tx "
2491 "descriptors\n", sc->sc_dev.dv_xname);
2492 }
2493
2494 /*
2495 * sip_read_eeprom:
2496 *
2497 * Read data from the serial EEPROM.
2498 */
2499 void
2500 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2501 u_int16_t *data)
2502 {
2503 bus_space_tag_t st = sc->sc_st;
2504 bus_space_handle_t sh = sc->sc_sh;
2505 u_int16_t reg;
2506 int i, x;
2507
2508 for (i = 0; i < wordcnt; i++) {
2509 /* Send CHIP SELECT. */
2510 reg = EROMAR_EECS;
2511 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2512
2513 /* Shift in the READ opcode. */
2514 for (x = 3; x > 0; x--) {
2515 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2516 reg |= EROMAR_EEDI;
2517 else
2518 reg &= ~EROMAR_EEDI;
2519 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2520 bus_space_write_4(st, sh, SIP_EROMAR,
2521 reg | EROMAR_EESK);
2522 delay(4);
2523 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2524 delay(4);
2525 }
2526
2527 /* Shift in address. */
2528 for (x = 6; x > 0; x--) {
2529 if ((word + i) & (1 << (x - 1)))
2530 reg |= EROMAR_EEDI;
2531 else
2532 reg &= ~EROMAR_EEDI;
2533 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2534 bus_space_write_4(st, sh, SIP_EROMAR,
2535 reg | EROMAR_EESK);
2536 delay(4);
2537 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2538 delay(4);
2539 }
2540
2541 /* Shift out data. */
2542 reg = EROMAR_EECS;
2543 data[i] = 0;
2544 for (x = 16; x > 0; x--) {
2545 bus_space_write_4(st, sh, SIP_EROMAR,
2546 reg | EROMAR_EESK);
2547 delay(4);
2548 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2549 data[i] |= (1 << (x - 1));
2550 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2551 delay(4);
2552 }
2553
2554 /* Clear CHIP SELECT. */
2555 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2556 delay(4);
2557 }
2558 }
2559
2560 /*
2561 * sip_add_rxbuf:
2562 *
2563 * Add a receive buffer to the indicated descriptor.
2564 */
2565 int
2566 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2567 {
2568 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2569 struct mbuf *m;
2570 int error;
2571
2572 MGETHDR(m, M_DONTWAIT, MT_DATA);
2573 if (m == NULL)
2574 return (ENOBUFS);
2575
2576 MCLGET(m, M_DONTWAIT);
2577 if ((m->m_flags & M_EXT) == 0) {
2578 m_freem(m);
2579 return (ENOBUFS);
2580 }
2581
2582 #if defined(DP83820)
2583 m->m_len = SIP_RXBUF_LEN;
2584 #endif /* DP83820 */
2585
2586 if (rxs->rxs_mbuf != NULL)
2587 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2588
2589 rxs->rxs_mbuf = m;
2590
2591 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2592 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2593 BUS_DMA_READ|BUS_DMA_NOWAIT);
2594 if (error) {
2595 printf("%s: can't load rx DMA map %d, error = %d\n",
2596 sc->sc_dev.dv_xname, idx, error);
2597 panic("sip_add_rxbuf"); /* XXX */
2598 }
2599
2600 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2601 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2602
2603 SIP_INIT_RXDESC(sc, idx);
2604
2605 return (0);
2606 }
2607
2608 #if !defined(DP83820)
2609 /*
2610 * sip_sis900_set_filter:
2611 *
2612 * Set up the receive filter.
2613 */
2614 void
2615 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2616 {
2617 bus_space_tag_t st = sc->sc_st;
2618 bus_space_handle_t sh = sc->sc_sh;
2619 struct ethercom *ec = &sc->sc_ethercom;
2620 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2621 struct ether_multi *enm;
2622 u_int8_t *cp;
2623 struct ether_multistep step;
2624 u_int32_t crc, mchash[16];
2625
2626 /*
2627 * Initialize the prototype RFCR.
2628 */
2629 sc->sc_rfcr = RFCR_RFEN;
2630 if (ifp->if_flags & IFF_BROADCAST)
2631 sc->sc_rfcr |= RFCR_AAB;
2632 if (ifp->if_flags & IFF_PROMISC) {
2633 sc->sc_rfcr |= RFCR_AAP;
2634 goto allmulti;
2635 }
2636
2637 /*
2638 * Set up the multicast address filter by passing all multicast
2639 * addresses through a CRC generator, and then using the high-order
2640 * 6 bits as an index into the 128 bit multicast hash table (only
2641 * the lower 16 bits of each 32 bit multicast hash register are
2642 * valid). The high order bits select the register, while the
2643 * rest of the bits select the bit within the register.
2644 */
2645
2646 memset(mchash, 0, sizeof(mchash));
2647
2648 ETHER_FIRST_MULTI(step, ec, enm);
2649 while (enm != NULL) {
2650 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2651 /*
2652 * We must listen to a range of multicast addresses.
2653 * For now, just accept all multicasts, rather than
2654 * trying to set only those filter bits needed to match
2655 * the range. (At this time, the only use of address
2656 * ranges is for IP multicast routing, for which the
2657 * range is big enough to require all bits set.)
2658 */
2659 goto allmulti;
2660 }
2661
2662 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2663
2664 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2665 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2666 /* Just want the 8 most significant bits. */
2667 crc >>= 24;
2668 } else {
2669 /* Just want the 7 most significant bits. */
2670 crc >>= 25;
2671 }
2672
2673 /* Set the corresponding bit in the hash table. */
2674 mchash[crc >> 4] |= 1 << (crc & 0xf);
2675
2676 ETHER_NEXT_MULTI(step, enm);
2677 }
2678
2679 ifp->if_flags &= ~IFF_ALLMULTI;
2680 goto setit;
2681
2682 allmulti:
2683 ifp->if_flags |= IFF_ALLMULTI;
2684 sc->sc_rfcr |= RFCR_AAM;
2685
2686 setit:
2687 #define FILTER_EMIT(addr, data) \
2688 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2689 delay(1); \
2690 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2691 delay(1)
2692
2693 /*
2694 * Disable receive filter, and program the node address.
2695 */
2696 cp = LLADDR(ifp->if_sadl);
2697 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2698 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2699 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2700
2701 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2702 /*
2703 * Program the multicast hash table.
2704 */
2705 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2706 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2707 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2708 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2709 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2710 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2711 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2712 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2713 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2714 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2715 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]);
2716 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]);
2717 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]);
2718 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]);
2719 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]);
2720 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]);
2721 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]);
2722 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]);
2723 }
2724 }
2725 #undef FILTER_EMIT
2726
2727 /*
2728 * Re-enable the receiver filter.
2729 */
2730 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2731 }
2732 #endif /* ! DP83820 */
2733
2734 /*
2735 * sip_dp83815_set_filter:
2736 *
2737 * Set up the receive filter.
2738 */
2739 void
2740 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2741 {
2742 bus_space_tag_t st = sc->sc_st;
2743 bus_space_handle_t sh = sc->sc_sh;
2744 struct ethercom *ec = &sc->sc_ethercom;
2745 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2746 struct ether_multi *enm;
2747 u_int8_t *cp;
2748 struct ether_multistep step;
2749 u_int32_t crc, hash, slot, bit;
2750 #ifdef DP83820
2751 #define MCHASH_NWORDS 128
2752 #else
2753 #define MCHASH_NWORDS 32
2754 #endif /* DP83820 */
2755 u_int16_t mchash[MCHASH_NWORDS];
2756 int i;
2757
2758 /*
2759 * Initialize the prototype RFCR.
2760 * Enable the receive filter, and accept on
2761 * Perfect (destination address) Match
2762 * If IFF_BROADCAST, also accept all broadcast packets.
2763 * If IFF_PROMISC, accept all unicast packets (and later, set
2764 * IFF_ALLMULTI and accept all multicast, too).
2765 */
2766 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2767 if (ifp->if_flags & IFF_BROADCAST)
2768 sc->sc_rfcr |= RFCR_AAB;
2769 if (ifp->if_flags & IFF_PROMISC) {
2770 sc->sc_rfcr |= RFCR_AAP;
2771 goto allmulti;
2772 }
2773
2774 #ifdef DP83820
2775 /*
2776 * Set up the DP83820 multicast address filter by passing all multicast
2777 * addresses through a CRC generator, and then using the high-order
2778 * 11 bits as an index into the 2048 bit multicast hash table. The
2779 * high-order 7 bits select the slot, while the low-order 4 bits
2780 * select the bit within the slot. Note that only the low 16-bits
2781 * of each filter word are used, and there are 128 filter words.
2782 */
2783 #else
2784 /*
2785 * Set up the DP83815 multicast address filter by passing all multicast
2786 * addresses through a CRC generator, and then using the high-order
2787 * 9 bits as an index into the 512 bit multicast hash table. The
2788 * high-order 5 bits select the slot, while the low-order 4 bits
2789 * select the bit within the slot. Note that only the low 16-bits
2790 * of each filter word are used, and there are 32 filter words.
2791 */
2792 #endif /* DP83820 */
2793
2794 memset(mchash, 0, sizeof(mchash));
2795
2796 ifp->if_flags &= ~IFF_ALLMULTI;
2797 ETHER_FIRST_MULTI(step, ec, enm);
2798 if (enm == NULL)
2799 goto setit;
2800 while (enm != NULL) {
2801 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2802 /*
2803 * We must listen to a range of multicast addresses.
2804 * For now, just accept all multicasts, rather than
2805 * trying to set only those filter bits needed to match
2806 * the range. (At this time, the only use of address
2807 * ranges is for IP multicast routing, for which the
2808 * range is big enough to require all bits set.)
2809 */
2810 goto allmulti;
2811 }
2812
2813 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2814
2815 #ifdef DP83820
2816 /* Just want the 11 most significant bits. */
2817 hash = crc >> 21;
2818 #else
2819 /* Just want the 9 most significant bits. */
2820 hash = crc >> 23;
2821 #endif /* DP83820 */
2822
2823 slot = hash >> 4;
2824 bit = hash & 0xf;
2825
2826 /* Set the corresponding bit in the hash table. */
2827 mchash[slot] |= 1 << bit;
2828
2829 ETHER_NEXT_MULTI(step, enm);
2830 }
2831 sc->sc_rfcr |= RFCR_MHEN;
2832 goto setit;
2833
2834 allmulti:
2835 ifp->if_flags |= IFF_ALLMULTI;
2836 sc->sc_rfcr |= RFCR_AAM;
2837
2838 setit:
2839 #define FILTER_EMIT(addr, data) \
2840 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2841 delay(1); \
2842 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2843 delay(1)
2844
2845 /*
2846 * Disable receive filter, and program the node address.
2847 */
2848 cp = LLADDR(ifp->if_sadl);
2849 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
2850 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
2851 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
2852
2853 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2854 /*
2855 * Program the multicast hash table.
2856 */
2857 for (i = 0; i < MCHASH_NWORDS; i++) {
2858 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
2859 mchash[i]);
2860 }
2861 }
2862 #undef FILTER_EMIT
2863 #undef MCHASH_NWORDS
2864
2865 /*
2866 * Re-enable the receiver filter.
2867 */
2868 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2869 }
2870
2871 #if defined(DP83820)
2872 /*
2873 * sip_dp83820_mii_readreg: [mii interface function]
2874 *
2875 * Read a PHY register on the MII of the DP83820.
2876 */
2877 int
2878 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
2879 {
2880 struct sip_softc *sc = (void *) self;
2881
2882 if (sc->sc_cfg & CFG_TBI_EN) {
2883 bus_addr_t tbireg;
2884 int rv;
2885
2886 if (phy != 0)
2887 return (0);
2888
2889 switch (reg) {
2890 case MII_BMCR: tbireg = SIP_TBICR; break;
2891 case MII_BMSR: tbireg = SIP_TBISR; break;
2892 case MII_ANAR: tbireg = SIP_TANAR; break;
2893 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
2894 case MII_ANER: tbireg = SIP_TANER; break;
2895 case MII_EXTSR:
2896 /*
2897 * Don't even bother reading the TESR register.
2898 * The manual documents that the device has
2899 * 1000baseX full/half capability, but the
2900 * register itself seems read back 0 on some
2901 * boards. Just hard-code the result.
2902 */
2903 return (EXTSR_1000XFDX|EXTSR_1000XHDX);
2904
2905 default:
2906 return (0);
2907 }
2908
2909 rv = bus_space_read_4(sc->sc_st, sc->sc_sh, tbireg) & 0xffff;
2910 if (tbireg == SIP_TBISR) {
2911 /* LINK and ACOMP are switched! */
2912 int val = rv;
2913
2914 rv = 0;
2915 if (val & TBISR_MR_LINK_STATUS)
2916 rv |= BMSR_LINK;
2917 if (val & TBISR_MR_AN_COMPLETE)
2918 rv |= BMSR_ACOMP;
2919
2920 /*
2921 * The manual claims this register reads back 0
2922 * on hard and soft reset. But we want to let
2923 * the gentbi driver know that we support auto-
2924 * negotiation, so hard-code this bit in the
2925 * result.
2926 */
2927 rv |= BMSR_ANEG | BMSR_EXTSTAT;
2928 }
2929
2930 return (rv);
2931 }
2932
2933 return (mii_bitbang_readreg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2934 phy, reg));
2935 }
2936
2937 /*
2938 * sip_dp83820_mii_writereg: [mii interface function]
2939 *
2940 * Write a PHY register on the MII of the DP83820.
2941 */
2942 void
2943 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
2944 {
2945 struct sip_softc *sc = (void *) self;
2946
2947 if (sc->sc_cfg & CFG_TBI_EN) {
2948 bus_addr_t tbireg;
2949
2950 if (phy != 0)
2951 return;
2952
2953 switch (reg) {
2954 case MII_BMCR: tbireg = SIP_TBICR; break;
2955 case MII_ANAR: tbireg = SIP_TANAR; break;
2956 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
2957 default:
2958 return;
2959 }
2960
2961 bus_space_write_4(sc->sc_st, sc->sc_sh, tbireg, val);
2962 return;
2963 }
2964
2965 mii_bitbang_writereg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2966 phy, reg, val);
2967 }
2968
2969 /*
2970 * sip_dp83815_mii_statchg: [mii interface function]
2971 *
2972 * Callback from MII layer when media changes.
2973 */
2974 void
2975 SIP_DECL(dp83820_mii_statchg)(struct device *self)
2976 {
2977 struct sip_softc *sc = (struct sip_softc *) self;
2978 u_int32_t cfg;
2979
2980 /*
2981 * Update TXCFG for full-duplex operation.
2982 */
2983 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2984 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2985 else
2986 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2987
2988 /*
2989 * Update RXCFG for full-duplex or loopback.
2990 */
2991 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2992 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2993 sc->sc_rxcfg |= RXCFG_ATX;
2994 else
2995 sc->sc_rxcfg &= ~RXCFG_ATX;
2996
2997 /*
2998 * Update CFG for MII/GMII.
2999 */
3000 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
3001 cfg = sc->sc_cfg | CFG_MODE_1000;
3002 else
3003 cfg = sc->sc_cfg;
3004
3005 /*
3006 * XXX 802.3x flow control.
3007 */
3008
3009 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
3010 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3011 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3012 }
3013
3014 /*
3015 * sip_dp83820_mii_bitbang_read: [mii bit-bang interface function]
3016 *
3017 * Read the MII serial port for the MII bit-bang module.
3018 */
3019 u_int32_t
3020 SIP_DECL(dp83820_mii_bitbang_read)(struct device *self)
3021 {
3022 struct sip_softc *sc = (void *) self;
3023
3024 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
3025 }
3026
3027 /*
3028 * sip_dp83820_mii_bitbang_write: [mii big-bang interface function]
3029 *
3030 * Write the MII serial port for the MII bit-bang module.
3031 */
3032 void
3033 SIP_DECL(dp83820_mii_bitbang_write)(struct device *self, u_int32_t val)
3034 {
3035 struct sip_softc *sc = (void *) self;
3036
3037 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
3038 }
3039 #else /* ! DP83820 */
3040 /*
3041 * sip_sis900_mii_readreg: [mii interface function]
3042 *
3043 * Read a PHY register on the MII.
3044 */
3045 int
3046 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
3047 {
3048 struct sip_softc *sc = (struct sip_softc *) self;
3049 u_int32_t enphy;
3050
3051 /*
3052 * The SiS 900 has only an internal PHY on the MII. Only allow
3053 * MII address 0.
3054 */
3055 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
3056 sc->sc_rev < SIS_REV_635 && phy != 0)
3057 return (0);
3058
3059 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3060 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
3061 ENPHY_RWCMD | ENPHY_ACCESS);
3062 do {
3063 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3064 } while (enphy & ENPHY_ACCESS);
3065 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
3066 }
3067
3068 /*
3069 * sip_sis900_mii_writereg: [mii interface function]
3070 *
3071 * Write a PHY register on the MII.
3072 */
3073 void
3074 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
3075 {
3076 struct sip_softc *sc = (struct sip_softc *) self;
3077 u_int32_t enphy;
3078
3079 /*
3080 * The SiS 900 has only an internal PHY on the MII. Only allow
3081 * MII address 0.
3082 */
3083 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
3084 sc->sc_rev < SIS_REV_635 && phy != 0)
3085 return;
3086
3087 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3088 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
3089 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
3090 do {
3091 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3092 } while (enphy & ENPHY_ACCESS);
3093 }
3094
3095 /*
3096 * sip_sis900_mii_statchg: [mii interface function]
3097 *
3098 * Callback from MII layer when media changes.
3099 */
3100 void
3101 SIP_DECL(sis900_mii_statchg)(struct device *self)
3102 {
3103 struct sip_softc *sc = (struct sip_softc *) self;
3104 u_int32_t flowctl;
3105
3106 /*
3107 * Update TXCFG for full-duplex operation.
3108 */
3109 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3110 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3111 else
3112 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3113
3114 /*
3115 * Update RXCFG for full-duplex or loopback.
3116 */
3117 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3118 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3119 sc->sc_rxcfg |= RXCFG_ATX;
3120 else
3121 sc->sc_rxcfg &= ~RXCFG_ATX;
3122
3123 /*
3124 * Update IMR for use of 802.3x flow control.
3125 */
3126 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
3127 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
3128 flowctl = FLOWCTL_FLOWEN;
3129 } else {
3130 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
3131 flowctl = 0;
3132 }
3133
3134 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3135 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3136 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
3137 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
3138 }
3139
3140 /*
3141 * sip_dp83815_mii_readreg: [mii interface function]
3142 *
3143 * Read a PHY register on the MII.
3144 */
3145 int
3146 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
3147 {
3148 struct sip_softc *sc = (struct sip_softc *) self;
3149 u_int32_t val;
3150
3151 /*
3152 * The DP83815 only has an internal PHY. Only allow
3153 * MII address 0.
3154 */
3155 if (phy != 0)
3156 return (0);
3157
3158 /*
3159 * Apparently, after a reset, the DP83815 can take a while
3160 * to respond. During this recovery period, the BMSR returns
3161 * a value of 0. Catch this -- it's not supposed to happen
3162 * (the BMSR has some hardcoded-to-1 bits), and wait for the
3163 * PHY to come back to life.
3164 *
3165 * This works out because the BMSR is the first register
3166 * read during the PHY probe process.
3167 */
3168 do {
3169 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
3170 } while (reg == MII_BMSR && val == 0);
3171
3172 return (val & 0xffff);
3173 }
3174
3175 /*
3176 * sip_dp83815_mii_writereg: [mii interface function]
3177 *
3178 * Write a PHY register to the MII.
3179 */
3180 void
3181 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
3182 {
3183 struct sip_softc *sc = (struct sip_softc *) self;
3184
3185 /*
3186 * The DP83815 only has an internal PHY. Only allow
3187 * MII address 0.
3188 */
3189 if (phy != 0)
3190 return;
3191
3192 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
3193 }
3194
3195 /*
3196 * sip_dp83815_mii_statchg: [mii interface function]
3197 *
3198 * Callback from MII layer when media changes.
3199 */
3200 void
3201 SIP_DECL(dp83815_mii_statchg)(struct device *self)
3202 {
3203 struct sip_softc *sc = (struct sip_softc *) self;
3204
3205 /*
3206 * Update TXCFG for full-duplex operation.
3207 */
3208 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3209 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3210 else
3211 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3212
3213 /*
3214 * Update RXCFG for full-duplex or loopback.
3215 */
3216 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3217 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3218 sc->sc_rxcfg |= RXCFG_ATX;
3219 else
3220 sc->sc_rxcfg &= ~RXCFG_ATX;
3221
3222 /*
3223 * XXX 802.3x flow control.
3224 */
3225
3226 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3227 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3228 }
3229 #endif /* DP83820 */
3230
3231 #if defined(DP83820)
3232 void
3233 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc,
3234 const struct pci_attach_args *pa, u_int8_t *enaddr)
3235 {
3236 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
3237 u_int8_t cksum, *e, match;
3238 int i;
3239
3240 /*
3241 * EEPROM data format for the DP83820 can be found in
3242 * the DP83820 manual, section 4.2.4.
3243 */
3244
3245 SIP_DECL(read_eeprom)(sc, 0,
3246 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
3247
3248 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
3249 match = ~(match - 1);
3250
3251 cksum = 0x55;
3252 e = (u_int8_t *) eeprom_data;
3253 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
3254 cksum += *e++;
3255
3256 if (cksum != match)
3257 printf("%s: Checksum (%x) mismatch (%x)",
3258 sc->sc_dev.dv_xname, cksum, match);
3259
3260 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
3261 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
3262 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
3263 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
3264 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
3265 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
3266 }
3267 #else /* ! DP83820 */
3268 void
3269 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc,
3270 const struct pci_attach_args *pa, u_int8_t *enaddr)
3271 {
3272 u_int16_t myea[ETHER_ADDR_LEN / 2];
3273
3274 switch (sc->sc_rev) {
3275 case SIS_REV_630S:
3276 case SIS_REV_630E:
3277 case SIS_REV_630EA1:
3278 case SIS_REV_630ET:
3279 case SIS_REV_635:
3280 /*
3281 * The MAC address for the on-board Ethernet of
3282 * the SiS 630 chipset is in the NVRAM. Kick
3283 * the chip into re-loading it from NVRAM, and
3284 * read the MAC address out of the filter registers.
3285 */
3286 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3287
3288 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3289 RFCR_RFADDR_NODE0);
3290 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3291 0xffff;
3292
3293 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3294 RFCR_RFADDR_NODE2);
3295 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3296 0xffff;
3297
3298 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3299 RFCR_RFADDR_NODE4);
3300 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3301 0xffff;
3302 break;
3303
3304 default:
3305 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3306 sizeof(myea) / sizeof(myea[0]), myea);
3307 }
3308
3309 enaddr[0] = myea[0] & 0xff;
3310 enaddr[1] = myea[0] >> 8;
3311 enaddr[2] = myea[1] & 0xff;
3312 enaddr[3] = myea[1] >> 8;
3313 enaddr[4] = myea[2] & 0xff;
3314 enaddr[5] = myea[2] >> 8;
3315 }
3316
3317 /* Table and macro to bit-reverse an octet. */
3318 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3319 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3320
3321 void
3322 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc,
3323 const struct pci_attach_args *pa, u_int8_t *enaddr)
3324 {
3325 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3326 u_int8_t cksum, *e, match;
3327 int i;
3328
3329 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3330 sizeof(eeprom_data[0]), eeprom_data);
3331
3332 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3333 match = ~(match - 1);
3334
3335 cksum = 0x55;
3336 e = (u_int8_t *) eeprom_data;
3337 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3338 cksum += *e++;
3339 }
3340 if (cksum != match) {
3341 printf("%s: Checksum (%x) mismatch (%x)",
3342 sc->sc_dev.dv_xname, cksum, match);
3343 }
3344
3345 /*
3346 * Unrolled because it makes slightly more sense this way.
3347 * The DP83815 stores the MAC address in bit 0 of word 6
3348 * through bit 15 of word 8.
3349 */
3350 ea = &eeprom_data[6];
3351 enaddr[0] = ((*ea & 0x1) << 7);
3352 ea++;
3353 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3354 enaddr[1] = ((*ea & 0x1FE) >> 1);
3355 enaddr[2] = ((*ea & 0x1) << 7);
3356 ea++;
3357 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3358 enaddr[3] = ((*ea & 0x1FE) >> 1);
3359 enaddr[4] = ((*ea & 0x1) << 7);
3360 ea++;
3361 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3362 enaddr[5] = ((*ea & 0x1FE) >> 1);
3363
3364 /*
3365 * In case that's not weird enough, we also need to reverse
3366 * the bits in each byte. This all actually makes more sense
3367 * if you think about the EEPROM storage as an array of bits
3368 * being shifted into bytes, but that's not how we're looking
3369 * at it here...
3370 */
3371 for (i = 0; i < 6 ;i++)
3372 enaddr[i] = bbr(enaddr[i]);
3373 }
3374 #endif /* DP83820 */
3375
3376 /*
3377 * sip_mediastatus: [ifmedia interface function]
3378 *
3379 * Get the current interface media status.
3380 */
3381 void
3382 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3383 {
3384 struct sip_softc *sc = ifp->if_softc;
3385
3386 mii_pollstat(&sc->sc_mii);
3387 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3388 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3389 }
3390
3391 /*
3392 * sip_mediachange: [ifmedia interface function]
3393 *
3394 * Set hardware to newly-selected media.
3395 */
3396 int
3397 SIP_DECL(mediachange)(struct ifnet *ifp)
3398 {
3399 struct sip_softc *sc = ifp->if_softc;
3400
3401 if (ifp->if_flags & IFF_UP)
3402 mii_mediachg(&sc->sc_mii);
3403 return (0);
3404 }
3405