if_sip.c revision 1.55 1 /* $NetBSD: if_sip.c,v 1.55 2002/06/30 18:04:12 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Support the 10-bit interface on the DP83820 (for fiber).
80 *
81 * - Reduce the interrupt load.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.55 2002/06/30 18:04:12 thorpej Exp $");
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/callout.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/ioctl.h>
97 #include <sys/errno.h>
98 #include <sys/device.h>
99 #include <sys/queue.h>
100
101 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <machine/bus.h>
113 #include <machine/intr.h>
114 #include <machine/endian.h>
115
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
118 #ifdef DP83820
119 #include <dev/mii/mii_bitbang.h>
120 #endif /* DP83820 */
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_sipreg.h>
127
128 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
129 #define SIP_DECL(x) __CONCAT(gsip_,x)
130 #else /* SiS900 and DP83815 */
131 #define SIP_DECL(x) __CONCAT(sip_,x)
132 #endif
133
134 #define SIP_STR(x) __STRING(SIP_DECL(x))
135
136 /*
137 * Transmit descriptor list size. This is arbitrary, but allocate
138 * enough descriptors for 128 pending transmissions, and 8 segments
139 * per packet. This MUST work out to a power of 2.
140 */
141 #define SIP_NTXSEGS 16
142 #define SIP_NTXSEGS_ALLOC 8
143
144 #define SIP_TXQUEUELEN 256
145 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS_ALLOC)
146 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
147 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
148
149 #if defined(DP83020)
150 #define TX_DMAMAP_SIZE ETHER_MAX_LEN_JUMBO
151 #else
152 #define TX_DMAMAP_SIZE MCLBYTES
153 #endif
154
155 /*
156 * Receive descriptor list size. We have one Rx buffer per incoming
157 * packet, so this logic is a little simpler.
158 *
159 * Actually, on the DP83820, we allow the packet to consume more than
160 * one buffer, in order to support jumbo Ethernet frames. In that
161 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
162 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
163 * so we'd better be quick about handling receive interrupts.
164 */
165 #if defined(DP83820)
166 #define SIP_NRXDESC 256
167 #else
168 #define SIP_NRXDESC 128
169 #endif /* DP83820 */
170 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
171 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
172
173 /*
174 * Control structures are DMA'd to the SiS900 chip. We allocate them in
175 * a single clump that maps to a single DMA segment to make several things
176 * easier.
177 */
178 struct sip_control_data {
179 /*
180 * The transmit descriptors.
181 */
182 struct sip_desc scd_txdescs[SIP_NTXDESC];
183
184 /*
185 * The receive descriptors.
186 */
187 struct sip_desc scd_rxdescs[SIP_NRXDESC];
188 };
189
190 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
191 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
192 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
193
194 /*
195 * Software state for transmit jobs.
196 */
197 struct sip_txsoft {
198 struct mbuf *txs_mbuf; /* head of our mbuf chain */
199 bus_dmamap_t txs_dmamap; /* our DMA map */
200 int txs_firstdesc; /* first descriptor in packet */
201 int txs_lastdesc; /* last descriptor in packet */
202 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
203 };
204
205 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
206
207 /*
208 * Software state for receive jobs.
209 */
210 struct sip_rxsoft {
211 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
212 bus_dmamap_t rxs_dmamap; /* our DMA map */
213 };
214
215 /*
216 * Software state per device.
217 */
218 struct sip_softc {
219 struct device sc_dev; /* generic device information */
220 bus_space_tag_t sc_st; /* bus space tag */
221 bus_space_handle_t sc_sh; /* bus space handle */
222 bus_dma_tag_t sc_dmat; /* bus DMA tag */
223 struct ethercom sc_ethercom; /* ethernet common data */
224 void *sc_sdhook; /* shutdown hook */
225
226 const struct sip_product *sc_model; /* which model are we? */
227 int sc_rev; /* chip revision */
228
229 void *sc_ih; /* interrupt cookie */
230
231 struct mii_data sc_mii; /* MII/media information */
232
233 struct callout sc_tick_ch; /* tick callout */
234
235 bus_dmamap_t sc_cddmamap; /* control data DMA map */
236 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
237
238 /*
239 * Software state for transmit and receive descriptors.
240 */
241 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
242 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
243
244 /*
245 * Control data structures.
246 */
247 struct sip_control_data *sc_control_data;
248 #define sc_txdescs sc_control_data->scd_txdescs
249 #define sc_rxdescs sc_control_data->scd_rxdescs
250
251 #ifdef SIP_EVENT_COUNTERS
252 /*
253 * Event counters.
254 */
255 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
256 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
257 struct evcnt sc_ev_txintr; /* Tx interrupts */
258 struct evcnt sc_ev_rxintr; /* Rx interrupts */
259 #ifdef DP83820
260 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
261 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
262 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
263 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
264 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
265 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
266 #endif /* DP83820 */
267 #endif /* SIP_EVENT_COUNTERS */
268
269 u_int32_t sc_txcfg; /* prototype TXCFG register */
270 u_int32_t sc_rxcfg; /* prototype RXCFG register */
271 u_int32_t sc_imr; /* prototype IMR register */
272 u_int32_t sc_rfcr; /* prototype RFCR register */
273
274 u_int32_t sc_cfg; /* prototype CFG register */
275
276 #ifdef DP83820
277 u_int32_t sc_gpior; /* prototype GPIOR register */
278 #endif /* DP83820 */
279
280 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
281 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
282
283 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
284
285 int sc_flags; /* misc. flags; see below */
286
287 int sc_txfree; /* number of free Tx descriptors */
288 int sc_txnext; /* next ready Tx descriptor */
289
290 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
291 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
292
293 int sc_rxptr; /* next ready Rx descriptor/descsoft */
294 #if defined(DP83820)
295 int sc_rxdiscard;
296 int sc_rxlen;
297 struct mbuf *sc_rxhead;
298 struct mbuf *sc_rxtail;
299 struct mbuf **sc_rxtailp;
300 #endif /* DP83820 */
301 };
302
303 /* sc_flags */
304 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
305
306 #ifdef DP83820
307 #define SIP_RXCHAIN_RESET(sc) \
308 do { \
309 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
310 *(sc)->sc_rxtailp = NULL; \
311 (sc)->sc_rxlen = 0; \
312 } while (/*CONSTCOND*/0)
313
314 #define SIP_RXCHAIN_LINK(sc, m) \
315 do { \
316 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
317 (sc)->sc_rxtailp = &(m)->m_next; \
318 } while (/*CONSTCOND*/0)
319 #endif /* DP83820 */
320
321 #ifdef SIP_EVENT_COUNTERS
322 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
323 #else
324 #define SIP_EVCNT_INCR(ev) /* nothing */
325 #endif
326
327 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
328 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
329
330 #define SIP_CDTXSYNC(sc, x, n, ops) \
331 do { \
332 int __x, __n; \
333 \
334 __x = (x); \
335 __n = (n); \
336 \
337 /* If it will wrap around, sync to the end of the ring. */ \
338 if ((__x + __n) > SIP_NTXDESC) { \
339 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
340 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
341 (SIP_NTXDESC - __x), (ops)); \
342 __n -= (SIP_NTXDESC - __x); \
343 __x = 0; \
344 } \
345 \
346 /* Now sync whatever is left. */ \
347 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
348 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
349 } while (0)
350
351 #define SIP_CDRXSYNC(sc, x, ops) \
352 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
353 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
354
355 #ifdef DP83820
356 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
357 #define SIP_RXBUF_LEN (MCLBYTES - 4)
358 #else
359 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
360 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
361 #endif
362 #define SIP_INIT_RXDESC(sc, x) \
363 do { \
364 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
365 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
366 \
367 __sipd->sipd_link = \
368 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
369 __sipd->sipd_bufptr = \
370 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
371 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
372 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
373 SIP_INIT_RXDESC_EXTSTS \
374 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
375 } while (0)
376
377 #define SIP_CHIP_VERS(sc, v, p, r) \
378 ((sc)->sc_model->sip_vendor == (v) && \
379 (sc)->sc_model->sip_product == (p) && \
380 (sc)->sc_rev == (r))
381
382 #define SIP_CHIP_MODEL(sc, v, p) \
383 ((sc)->sc_model->sip_vendor == (v) && \
384 (sc)->sc_model->sip_product == (p))
385
386 #if !defined(DP83820)
387 #define SIP_SIS900_REV(sc, rev) \
388 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev))
389 #endif
390
391 #define SIP_TIMEOUT 1000
392
393 void SIP_DECL(start)(struct ifnet *);
394 void SIP_DECL(watchdog)(struct ifnet *);
395 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
396 int SIP_DECL(init)(struct ifnet *);
397 void SIP_DECL(stop)(struct ifnet *, int);
398
399 void SIP_DECL(shutdown)(void *);
400
401 void SIP_DECL(reset)(struct sip_softc *);
402 void SIP_DECL(rxdrain)(struct sip_softc *);
403 int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
404 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *);
405 void SIP_DECL(tick)(void *);
406
407 #if !defined(DP83820)
408 void SIP_DECL(sis900_set_filter)(struct sip_softc *);
409 #endif /* ! DP83820 */
410 void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
411
412 #if defined(DP83820)
413 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *,
414 const struct pci_attach_args *, u_int8_t *);
415 #else
416 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *,
417 const struct pci_attach_args *, u_int8_t *);
418 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *,
419 const struct pci_attach_args *, u_int8_t *);
420 #endif /* DP83820 */
421
422 int SIP_DECL(intr)(void *);
423 void SIP_DECL(txintr)(struct sip_softc *);
424 void SIP_DECL(rxintr)(struct sip_softc *);
425
426 #if defined(DP83820)
427 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
428 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
429 void SIP_DECL(dp83820_mii_statchg)(struct device *);
430 #else
431 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
432 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
433 void SIP_DECL(sis900_mii_statchg)(struct device *);
434
435 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
436 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
437 void SIP_DECL(dp83815_mii_statchg)(struct device *);
438 #endif /* DP83820 */
439
440 int SIP_DECL(mediachange)(struct ifnet *);
441 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
442
443 int SIP_DECL(match)(struct device *, struct cfdata *, void *);
444 void SIP_DECL(attach)(struct device *, struct device *, void *);
445
446 int SIP_DECL(copy_small) = 0;
447
448 struct cfattach SIP_DECL(ca) = {
449 sizeof(struct sip_softc), SIP_DECL(match), SIP_DECL(attach),
450 };
451
452 /*
453 * Descriptions of the variants of the SiS900.
454 */
455 struct sip_variant {
456 int (*sipv_mii_readreg)(struct device *, int, int);
457 void (*sipv_mii_writereg)(struct device *, int, int, int);
458 void (*sipv_mii_statchg)(struct device *);
459 void (*sipv_set_filter)(struct sip_softc *);
460 void (*sipv_read_macaddr)(struct sip_softc *,
461 const struct pci_attach_args *, u_int8_t *);
462 };
463
464 #if defined(DP83820)
465 u_int32_t SIP_DECL(dp83820_mii_bitbang_read)(struct device *);
466 void SIP_DECL(dp83820_mii_bitbang_write)(struct device *, u_int32_t);
467
468 const struct mii_bitbang_ops SIP_DECL(dp83820_mii_bitbang_ops) = {
469 SIP_DECL(dp83820_mii_bitbang_read),
470 SIP_DECL(dp83820_mii_bitbang_write),
471 {
472 EROMAR_MDIO, /* MII_BIT_MDO */
473 EROMAR_MDIO, /* MII_BIT_MDI */
474 EROMAR_MDC, /* MII_BIT_MDC */
475 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
476 0, /* MII_BIT_DIR_PHY_HOST */
477 }
478 };
479 #endif /* DP83820 */
480
481 #if defined(DP83820)
482 const struct sip_variant SIP_DECL(variant_dp83820) = {
483 SIP_DECL(dp83820_mii_readreg),
484 SIP_DECL(dp83820_mii_writereg),
485 SIP_DECL(dp83820_mii_statchg),
486 SIP_DECL(dp83815_set_filter),
487 SIP_DECL(dp83820_read_macaddr),
488 };
489 #else
490 const struct sip_variant SIP_DECL(variant_sis900) = {
491 SIP_DECL(sis900_mii_readreg),
492 SIP_DECL(sis900_mii_writereg),
493 SIP_DECL(sis900_mii_statchg),
494 SIP_DECL(sis900_set_filter),
495 SIP_DECL(sis900_read_macaddr),
496 };
497
498 const struct sip_variant SIP_DECL(variant_dp83815) = {
499 SIP_DECL(dp83815_mii_readreg),
500 SIP_DECL(dp83815_mii_writereg),
501 SIP_DECL(dp83815_mii_statchg),
502 SIP_DECL(dp83815_set_filter),
503 SIP_DECL(dp83815_read_macaddr),
504 };
505 #endif /* DP83820 */
506
507 /*
508 * Devices supported by this driver.
509 */
510 const struct sip_product {
511 pci_vendor_id_t sip_vendor;
512 pci_product_id_t sip_product;
513 const char *sip_name;
514 const struct sip_variant *sip_variant;
515 } SIP_DECL(products)[] = {
516 #if defined(DP83820)
517 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
518 "NatSemi DP83820 Gigabit Ethernet",
519 &SIP_DECL(variant_dp83820) },
520 #else
521 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
522 "SiS 900 10/100 Ethernet",
523 &SIP_DECL(variant_sis900) },
524 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
525 "SiS 7016 10/100 Ethernet",
526 &SIP_DECL(variant_sis900) },
527
528 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
529 "NatSemi DP83815 10/100 Ethernet",
530 &SIP_DECL(variant_dp83815) },
531 #endif /* DP83820 */
532
533 { 0, 0,
534 NULL,
535 NULL },
536 };
537
538 static const struct sip_product *
539 SIP_DECL(lookup)(const struct pci_attach_args *pa)
540 {
541 const struct sip_product *sip;
542
543 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
544 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
545 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
546 return (sip);
547 }
548 return (NULL);
549 }
550
551 int
552 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
553 {
554 struct pci_attach_args *pa = aux;
555
556 if (SIP_DECL(lookup)(pa) != NULL)
557 return (1);
558
559 return (0);
560 }
561
562 void
563 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
564 {
565 struct sip_softc *sc = (struct sip_softc *) self;
566 struct pci_attach_args *pa = aux;
567 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
568 pci_chipset_tag_t pc = pa->pa_pc;
569 pci_intr_handle_t ih;
570 const char *intrstr = NULL;
571 bus_space_tag_t iot, memt;
572 bus_space_handle_t ioh, memh;
573 bus_dma_segment_t seg;
574 int ioh_valid, memh_valid;
575 int i, rseg, error;
576 const struct sip_product *sip;
577 pcireg_t pmode;
578 u_int8_t enaddr[ETHER_ADDR_LEN];
579 int pmreg;
580 #ifdef DP83820
581 pcireg_t memtype;
582 u_int32_t reg;
583 #endif /* DP83820 */
584
585 callout_init(&sc->sc_tick_ch);
586
587 sip = SIP_DECL(lookup)(pa);
588 if (sip == NULL) {
589 printf("\n");
590 panic(SIP_STR(attach) ": impossible");
591 }
592 sc->sc_rev = PCI_REVISION(pa->pa_class);
593
594 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev);
595
596 sc->sc_model = sip;
597
598 /*
599 * XXX Work-around broken PXE firmware on some boards.
600 *
601 * The DP83815 shares an address decoder with the MEM BAR
602 * and the ROM BAR. Make sure the ROM BAR is disabled,
603 * so that memory mapped access works.
604 */
605 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM,
606 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) &
607 ~PCI_MAPREG_ROM_ENABLE);
608
609 /*
610 * Map the device.
611 */
612 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
613 PCI_MAPREG_TYPE_IO, 0,
614 &iot, &ioh, NULL, NULL) == 0);
615 #ifdef DP83820
616 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
617 switch (memtype) {
618 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
619 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
620 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
621 memtype, 0, &memt, &memh, NULL, NULL) == 0);
622 break;
623 default:
624 memh_valid = 0;
625 }
626 #else
627 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
628 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
629 &memt, &memh, NULL, NULL) == 0);
630 #endif /* DP83820 */
631
632 if (memh_valid) {
633 sc->sc_st = memt;
634 sc->sc_sh = memh;
635 } else if (ioh_valid) {
636 sc->sc_st = iot;
637 sc->sc_sh = ioh;
638 } else {
639 printf("%s: unable to map device registers\n",
640 sc->sc_dev.dv_xname);
641 return;
642 }
643
644 sc->sc_dmat = pa->pa_dmat;
645
646 /*
647 * Make sure bus mastering is enabled. Also make sure
648 * Write/Invalidate is enabled if we're allowed to use it.
649 */
650 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
651 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY)
652 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE;
653 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
654 pmreg | PCI_COMMAND_MASTER_ENABLE);
655
656 /* Get it out of power save mode if needed. */
657 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
658 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
659 if (pmode == 3) {
660 /*
661 * The card has lost all configuration data in
662 * this state, so punt.
663 */
664 printf("%s: unable to wake up from power state D3\n",
665 sc->sc_dev.dv_xname);
666 return;
667 }
668 if (pmode != 0) {
669 printf("%s: waking up from power state D%d\n",
670 sc->sc_dev.dv_xname, pmode);
671 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
672 }
673 }
674
675 /*
676 * Map and establish our interrupt.
677 */
678 if (pci_intr_map(pa, &ih)) {
679 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
680 return;
681 }
682 intrstr = pci_intr_string(pc, ih);
683 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
684 if (sc->sc_ih == NULL) {
685 printf("%s: unable to establish interrupt",
686 sc->sc_dev.dv_xname);
687 if (intrstr != NULL)
688 printf(" at %s", intrstr);
689 printf("\n");
690 return;
691 }
692 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
693
694 SIMPLEQ_INIT(&sc->sc_txfreeq);
695 SIMPLEQ_INIT(&sc->sc_txdirtyq);
696
697 /*
698 * Allocate the control data structures, and create and load the
699 * DMA map for it.
700 */
701 if ((error = bus_dmamem_alloc(sc->sc_dmat,
702 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
703 0)) != 0) {
704 printf("%s: unable to allocate control data, error = %d\n",
705 sc->sc_dev.dv_xname, error);
706 goto fail_0;
707 }
708
709 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
710 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
711 BUS_DMA_COHERENT)) != 0) {
712 printf("%s: unable to map control data, error = %d\n",
713 sc->sc_dev.dv_xname, error);
714 goto fail_1;
715 }
716
717 if ((error = bus_dmamap_create(sc->sc_dmat,
718 sizeof(struct sip_control_data), 1,
719 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
720 printf("%s: unable to create control data DMA map, "
721 "error = %d\n", sc->sc_dev.dv_xname, error);
722 goto fail_2;
723 }
724
725 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
726 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
727 0)) != 0) {
728 printf("%s: unable to load control data DMA map, error = %d\n",
729 sc->sc_dev.dv_xname, error);
730 goto fail_3;
731 }
732
733 /*
734 * Create the transmit buffer DMA maps.
735 */
736 for (i = 0; i < SIP_TXQUEUELEN; i++) {
737 if ((error = bus_dmamap_create(sc->sc_dmat, TX_DMAMAP_SIZE,
738 SIP_NTXSEGS, MCLBYTES, 0, 0,
739 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
740 printf("%s: unable to create tx DMA map %d, "
741 "error = %d\n", sc->sc_dev.dv_xname, i, error);
742 goto fail_4;
743 }
744 }
745
746 /*
747 * Create the receive buffer DMA maps.
748 */
749 for (i = 0; i < SIP_NRXDESC; i++) {
750 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
751 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
752 printf("%s: unable to create rx DMA map %d, "
753 "error = %d\n", sc->sc_dev.dv_xname, i, error);
754 goto fail_5;
755 }
756 sc->sc_rxsoft[i].rxs_mbuf = NULL;
757 }
758
759 /*
760 * Reset the chip to a known state.
761 */
762 SIP_DECL(reset)(sc);
763
764 /*
765 * Read the Ethernet address from the EEPROM. This might
766 * also fetch other stuff from the EEPROM and stash it
767 * in the softc.
768 */
769 sc->sc_cfg = 0;
770 #if !defined(DP83820)
771 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
772 SIP_SIS900_REV(sc,SIS_REV_900B))
773 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT);
774 #endif
775
776 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
777
778 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
779 ether_sprintf(enaddr));
780
781 /*
782 * Initialize the configuration register: aggressive PCI
783 * bus request algorithm, default backoff, default OW timer,
784 * default parity error detection.
785 *
786 * NOTE: "Big endian mode" is useless on the SiS900 and
787 * friends -- it affects packet data, not descriptors.
788 */
789 #ifdef DP83820
790 /*
791 * XXX Need some PCI flags indicating support for
792 * XXX 64-bit addressing.
793 */
794 sc->sc_cfg &= ~(CFG_M64ADDR | CFG_T64ADDR);
795
796 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
797 if (reg & CFG_PCI64_DET) {
798 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
799 if ((sc->sc_cfg & CFG_DATA64_EN) == 0)
800 printf("%s: 64-bit data transfers disabled in EEPROM\n",
801 sc->sc_dev.dv_xname);
802 } else
803 sc->sc_cfg &= ~CFG_DATA64_EN;
804
805 if (sc->sc_cfg & (CFG_TBI_EN|CFG_EXT_125)) {
806 const char *sep = "";
807 printf("%s: using ", sc->sc_dev.dv_xname);
808 if (sc->sc_cfg & CFG_EXT_125) {
809 printf("%s125MHz clock", sep);
810 sep = ", ";
811 }
812 if (sc->sc_cfg & CFG_TBI_EN) {
813 printf("%sten-bit interface", sep);
814 sep = ", ";
815 }
816 printf("\n");
817 }
818 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0)
819 sc->sc_cfg |= CFG_MRM_DIS;
820 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
821 sc->sc_cfg |= CFG_MWI_DIS;
822
823 /*
824 * Use the extended descriptor format on the DP83820. This
825 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
826 * checksumming.
827 */
828 sc->sc_cfg |= CFG_EXTSTS_EN;
829 #endif /* DP83820 */
830
831 /*
832 * Initialize our media structures and probe the MII.
833 */
834 sc->sc_mii.mii_ifp = ifp;
835 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
836 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
837 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
838 ifmedia_init(&sc->sc_mii.mii_media, 0, SIP_DECL(mediachange),
839 SIP_DECL(mediastatus));
840 #ifdef DP83820
841 if (sc->sc_cfg & CFG_TBI_EN) {
842 /* Using ten-bit interface. */
843 printf("%s: TBI -- FIXME\n", sc->sc_dev.dv_xname);
844 } else {
845 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
846 MII_OFFSET_ANY, 0);
847 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
848 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
849 0, NULL);
850 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
851 } else
852 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
853 }
854 #else
855 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
856 MII_OFFSET_ANY, 0);
857 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
858 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
859 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
860 } else
861 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
862 #endif /* DP83820 */
863
864 ifp = &sc->sc_ethercom.ec_if;
865 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
866 ifp->if_softc = sc;
867 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
868 ifp->if_ioctl = SIP_DECL(ioctl);
869 ifp->if_start = SIP_DECL(start);
870 ifp->if_watchdog = SIP_DECL(watchdog);
871 ifp->if_init = SIP_DECL(init);
872 ifp->if_stop = SIP_DECL(stop);
873 IFQ_SET_READY(&ifp->if_snd);
874
875 /*
876 * We can support 802.1Q VLAN-sized frames.
877 */
878 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
879
880 #ifdef DP83820
881 /*
882 * And the DP83820 can do VLAN tagging in hardware, and
883 * support the jumbo Ethernet MTU.
884 */
885 sc->sc_ethercom.ec_capabilities |=
886 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
887
888 /*
889 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
890 * in hardware.
891 */
892 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
893 IFCAP_CSUM_UDPv4;
894 #endif /* DP83820 */
895
896 /*
897 * Attach the interface.
898 */
899 if_attach(ifp);
900 ether_ifattach(ifp, enaddr);
901
902 /*
903 * The number of bytes that must be available in
904 * the Tx FIFO before the bus master can DMA more
905 * data into the FIFO.
906 */
907 sc->sc_tx_fill_thresh = 64 / 32;
908
909 /*
910 * Start at a drain threshold of 512 bytes. We will
911 * increase it if a DMA underrun occurs.
912 *
913 * XXX The minimum value of this variable should be
914 * tuned. We may be able to improve performance
915 * by starting with a lower value. That, however,
916 * may trash the first few outgoing packets if the
917 * PCI bus is saturated.
918 */
919 sc->sc_tx_drain_thresh = 1504 / 32;
920
921 /*
922 * Initialize the Rx FIFO drain threshold.
923 *
924 * This is in units of 8 bytes.
925 *
926 * We should never set this value lower than 2; 14 bytes are
927 * required to filter the packet.
928 */
929 sc->sc_rx_drain_thresh = 128 / 8;
930
931 #ifdef SIP_EVENT_COUNTERS
932 /*
933 * Attach event counters.
934 */
935 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txsstall");
937 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
938 NULL, sc->sc_dev.dv_xname, "txdstall");
939 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
940 NULL, sc->sc_dev.dv_xname, "txintr");
941 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
942 NULL, sc->sc_dev.dv_xname, "rxintr");
943 #ifdef DP83820
944 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
945 NULL, sc->sc_dev.dv_xname, "rxipsum");
946 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
947 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
948 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
949 NULL, sc->sc_dev.dv_xname, "rxudpsum");
950 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
951 NULL, sc->sc_dev.dv_xname, "txipsum");
952 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
953 NULL, sc->sc_dev.dv_xname, "txtcpsum");
954 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
955 NULL, sc->sc_dev.dv_xname, "txudpsum");
956 #endif /* DP83820 */
957 #endif /* SIP_EVENT_COUNTERS */
958
959 /*
960 * Make sure the interface is shutdown during reboot.
961 */
962 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
963 if (sc->sc_sdhook == NULL)
964 printf("%s: WARNING: unable to establish shutdown hook\n",
965 sc->sc_dev.dv_xname);
966 return;
967
968 /*
969 * Free any resources we've allocated during the failed attach
970 * attempt. Do this in reverse order and fall through.
971 */
972 fail_5:
973 for (i = 0; i < SIP_NRXDESC; i++) {
974 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
975 bus_dmamap_destroy(sc->sc_dmat,
976 sc->sc_rxsoft[i].rxs_dmamap);
977 }
978 fail_4:
979 for (i = 0; i < SIP_TXQUEUELEN; i++) {
980 if (sc->sc_txsoft[i].txs_dmamap != NULL)
981 bus_dmamap_destroy(sc->sc_dmat,
982 sc->sc_txsoft[i].txs_dmamap);
983 }
984 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
985 fail_3:
986 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
987 fail_2:
988 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
989 sizeof(struct sip_control_data));
990 fail_1:
991 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
992 fail_0:
993 return;
994 }
995
996 /*
997 * sip_shutdown:
998 *
999 * Make sure the interface is stopped at reboot time.
1000 */
1001 void
1002 SIP_DECL(shutdown)(void *arg)
1003 {
1004 struct sip_softc *sc = arg;
1005
1006 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
1007 }
1008
1009 /*
1010 * sip_start: [ifnet interface function]
1011 *
1012 * Start packet transmission on the interface.
1013 */
1014 void
1015 SIP_DECL(start)(struct ifnet *ifp)
1016 {
1017 struct sip_softc *sc = ifp->if_softc;
1018 struct mbuf *m0, *m;
1019 struct sip_txsoft *txs;
1020 bus_dmamap_t dmamap;
1021 int error, firsttx, nexttx, lasttx, ofree, seg;
1022 #ifdef DP83820
1023 u_int32_t extsts;
1024 #endif
1025
1026 /*
1027 * If we've been told to pause, don't transmit any more packets.
1028 */
1029 if (sc->sc_flags & SIPF_PAUSED)
1030 ifp->if_flags |= IFF_OACTIVE;
1031
1032 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1033 return;
1034
1035 /*
1036 * Remember the previous number of free descriptors and
1037 * the first descriptor we'll use.
1038 */
1039 ofree = sc->sc_txfree;
1040 firsttx = sc->sc_txnext;
1041
1042 /*
1043 * Loop through the send queue, setting up transmit descriptors
1044 * until we drain the queue, or use up all available transmit
1045 * descriptors.
1046 */
1047 for (;;) {
1048 /* Get a work queue entry. */
1049 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1050 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
1051 break;
1052 }
1053
1054 /*
1055 * Grab a packet off the queue.
1056 */
1057 IFQ_POLL(&ifp->if_snd, m0);
1058 if (m0 == NULL)
1059 break;
1060 #ifndef DP83820
1061 m = NULL;
1062 #endif
1063
1064 dmamap = txs->txs_dmamap;
1065
1066 #ifdef DP83820
1067 /*
1068 * Load the DMA map. If this fails, the packet either
1069 * didn't fit in the allotted number of segments, or we
1070 * were short on resources. For the too-many-segments
1071 * case, we simply report an error and drop the packet,
1072 * since we can't sanely copy a jumbo packet to a single
1073 * buffer.
1074 */
1075 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1076 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1077 if (error) {
1078 if (error == EFBIG) {
1079 printf("%s: Tx packet consumes too many "
1080 "DMA segments, dropping...\n",
1081 sc->sc_dev.dv_xname);
1082 IFQ_DEQUEUE(&ifp->if_snd, m0);
1083 m_freem(m0);
1084 continue;
1085 }
1086 /*
1087 * Short on resources, just stop for now.
1088 */
1089 break;
1090 }
1091 #else /* DP83820 */
1092 /*
1093 * Load the DMA map. If this fails, the packet either
1094 * didn't fit in the alloted number of segments, or we
1095 * were short on resources. In this case, we'll copy
1096 * and try again.
1097 */
1098 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1099 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1100 MGETHDR(m, M_DONTWAIT, MT_DATA);
1101 if (m == NULL) {
1102 printf("%s: unable to allocate Tx mbuf\n",
1103 sc->sc_dev.dv_xname);
1104 break;
1105 }
1106 if (m0->m_pkthdr.len > MHLEN) {
1107 MCLGET(m, M_DONTWAIT);
1108 if ((m->m_flags & M_EXT) == 0) {
1109 printf("%s: unable to allocate Tx "
1110 "cluster\n", sc->sc_dev.dv_xname);
1111 m_freem(m);
1112 break;
1113 }
1114 }
1115 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1116 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1117 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1118 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1119 if (error) {
1120 printf("%s: unable to load Tx buffer, "
1121 "error = %d\n", sc->sc_dev.dv_xname, error);
1122 break;
1123 }
1124 }
1125 #endif /* DP83820 */
1126
1127 /*
1128 * Ensure we have enough descriptors free to describe
1129 * the packet. Note, we always reserve one descriptor
1130 * at the end of the ring as a termination point, to
1131 * prevent wrap-around.
1132 */
1133 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1134 /*
1135 * Not enough free descriptors to transmit this
1136 * packet. We haven't committed anything yet,
1137 * so just unload the DMA map, put the packet
1138 * back on the queue, and punt. Notify the upper
1139 * layer that there are not more slots left.
1140 *
1141 * XXX We could allocate an mbuf and copy, but
1142 * XXX is it worth it?
1143 */
1144 ifp->if_flags |= IFF_OACTIVE;
1145 bus_dmamap_unload(sc->sc_dmat, dmamap);
1146 #ifndef DP83820
1147 if (m != NULL)
1148 m_freem(m);
1149 #endif
1150 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1151 break;
1152 }
1153
1154 IFQ_DEQUEUE(&ifp->if_snd, m0);
1155 #ifndef DP83820
1156 if (m != NULL) {
1157 m_freem(m0);
1158 m0 = m;
1159 }
1160 #endif
1161
1162 /*
1163 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1164 */
1165
1166 /* Sync the DMA map. */
1167 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1168 BUS_DMASYNC_PREWRITE);
1169
1170 /*
1171 * Initialize the transmit descriptors.
1172 */
1173 for (nexttx = sc->sc_txnext, seg = 0;
1174 seg < dmamap->dm_nsegs;
1175 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1176 /*
1177 * If this is the first descriptor we're
1178 * enqueueing, don't set the OWN bit just
1179 * yet. That could cause a race condition.
1180 * We'll do it below.
1181 */
1182 sc->sc_txdescs[nexttx].sipd_bufptr =
1183 htole32(dmamap->dm_segs[seg].ds_addr);
1184 sc->sc_txdescs[nexttx].sipd_cmdsts =
1185 htole32((nexttx == firsttx ? 0 : CMDSTS_OWN) |
1186 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1187 #ifdef DP83820
1188 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1189 #endif /* DP83820 */
1190 lasttx = nexttx;
1191 }
1192
1193 /* Clear the MORE bit on the last segment. */
1194 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1195
1196 #ifdef DP83820
1197 /*
1198 * If VLANs are enabled and the packet has a VLAN tag, set
1199 * up the descriptor to encapsulate the packet for us.
1200 *
1201 * This apparently has to be on the last descriptor of
1202 * the packet.
1203 */
1204 if (sc->sc_ethercom.ec_nvlans != 0 &&
1205 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1206 sc->sc_txdescs[lasttx].sipd_extsts |=
1207 htole32(EXTSTS_VPKT |
1208 htons(*mtod(m, int *) & EXTSTS_VTCI));
1209 }
1210
1211 /*
1212 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1213 * checksumming, set up the descriptor to do this work
1214 * for us.
1215 *
1216 * This apparently has to be on the first descriptor of
1217 * the packet.
1218 *
1219 * Byte-swap constants so the compiler can optimize.
1220 */
1221 extsts = 0;
1222 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1223 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1224 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1225 extsts |= htole32(EXTSTS_IPPKT);
1226 }
1227 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1228 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1229 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1230 extsts |= htole32(EXTSTS_TCPPKT);
1231 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1232 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1233 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1234 extsts |= htole32(EXTSTS_UDPPKT);
1235 }
1236 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1237 #endif /* DP83820 */
1238
1239 /* Sync the descriptors we're using. */
1240 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1241 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1242
1243 /*
1244 * Store a pointer to the packet so we can free it later,
1245 * and remember what txdirty will be once the packet is
1246 * done.
1247 */
1248 txs->txs_mbuf = m0;
1249 txs->txs_firstdesc = sc->sc_txnext;
1250 txs->txs_lastdesc = lasttx;
1251
1252 /* Advance the tx pointer. */
1253 sc->sc_txfree -= dmamap->dm_nsegs;
1254 sc->sc_txnext = nexttx;
1255
1256 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1257 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1258
1259 #if NBPFILTER > 0
1260 /*
1261 * Pass the packet to any BPF listeners.
1262 */
1263 if (ifp->if_bpf)
1264 bpf_mtap(ifp->if_bpf, m0);
1265 #endif /* NBPFILTER > 0 */
1266 }
1267
1268 if (txs == NULL || sc->sc_txfree == 0) {
1269 /* No more slots left; notify upper layer. */
1270 ifp->if_flags |= IFF_OACTIVE;
1271 }
1272
1273 if (sc->sc_txfree != ofree) {
1274 /*
1275 * Cause a descriptor interrupt to happen on the
1276 * last packet we enqueued.
1277 */
1278 sc->sc_txdescs[lasttx].sipd_cmdsts |= htole32(CMDSTS_INTR);
1279 SIP_CDTXSYNC(sc, lasttx, 1,
1280 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1281
1282 /*
1283 * The entire packet chain is set up. Give the
1284 * first descrptor to the chip now.
1285 */
1286 sc->sc_txdescs[firsttx].sipd_cmdsts |= htole32(CMDSTS_OWN);
1287 SIP_CDTXSYNC(sc, firsttx, 1,
1288 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1289
1290 /*
1291 * Start the transmit process. Note, the manual says
1292 * that if there are no pending transmissions in the
1293 * chip's internal queue (indicated by TXE being clear),
1294 * then the driver software must set the TXDP to the
1295 * first descriptor to be transmitted. However, if we
1296 * do this, it causes serious performance degredation on
1297 * the DP83820 under load, not setting TXDP doesn't seem
1298 * to adversely affect the SiS 900 or DP83815.
1299 *
1300 * Well, I guess it wouldn't be the first time a manual
1301 * has lied -- and they could be speaking of the NULL-
1302 * terminated descriptor list case, rather than OWN-
1303 * terminated rings.
1304 */
1305 #if 0
1306 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1307 CR_TXE) == 0) {
1308 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1309 SIP_CDTXADDR(sc, firsttx));
1310 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1311 }
1312 #else
1313 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1314 #endif
1315
1316 /* Set a watchdog timer in case the chip flakes out. */
1317 ifp->if_timer = 5;
1318 }
1319 }
1320
1321 /*
1322 * sip_watchdog: [ifnet interface function]
1323 *
1324 * Watchdog timer handler.
1325 */
1326 void
1327 SIP_DECL(watchdog)(struct ifnet *ifp)
1328 {
1329 struct sip_softc *sc = ifp->if_softc;
1330
1331 /*
1332 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1333 * If we get a timeout, try and sweep up transmit descriptors.
1334 * If we manage to sweep them all up, ignore the lack of
1335 * interrupt.
1336 */
1337 SIP_DECL(txintr)(sc);
1338
1339 if (sc->sc_txfree != SIP_NTXDESC) {
1340 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1341 ifp->if_oerrors++;
1342
1343 /* Reset the interface. */
1344 (void) SIP_DECL(init)(ifp);
1345 } else if (ifp->if_flags & IFF_DEBUG)
1346 printf("%s: recovered from device timeout\n",
1347 sc->sc_dev.dv_xname);
1348
1349 /* Try to get more packets going. */
1350 SIP_DECL(start)(ifp);
1351 }
1352
1353 /*
1354 * sip_ioctl: [ifnet interface function]
1355 *
1356 * Handle control requests from the operator.
1357 */
1358 int
1359 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1360 {
1361 struct sip_softc *sc = ifp->if_softc;
1362 struct ifreq *ifr = (struct ifreq *)data;
1363 int s, error;
1364
1365 s = splnet();
1366
1367 switch (cmd) {
1368 case SIOCSIFMEDIA:
1369 case SIOCGIFMEDIA:
1370 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1371 break;
1372
1373 default:
1374 error = ether_ioctl(ifp, cmd, data);
1375 if (error == ENETRESET) {
1376 /*
1377 * Multicast list has changed; set the hardware filter
1378 * accordingly.
1379 */
1380 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1381 error = 0;
1382 }
1383 break;
1384 }
1385
1386 /* Try to get more packets going. */
1387 SIP_DECL(start)(ifp);
1388
1389 splx(s);
1390 return (error);
1391 }
1392
1393 /*
1394 * sip_intr:
1395 *
1396 * Interrupt service routine.
1397 */
1398 int
1399 SIP_DECL(intr)(void *arg)
1400 {
1401 struct sip_softc *sc = arg;
1402 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1403 u_int32_t isr;
1404 int handled = 0;
1405
1406 for (;;) {
1407 /* Reading clears interrupt. */
1408 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1409 if ((isr & sc->sc_imr) == 0)
1410 break;
1411
1412 handled = 1;
1413
1414 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1415 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1416
1417 /* Grab any new packets. */
1418 SIP_DECL(rxintr)(sc);
1419
1420 if (isr & ISR_RXORN) {
1421 printf("%s: receive FIFO overrun\n",
1422 sc->sc_dev.dv_xname);
1423
1424 /* XXX adjust rx_drain_thresh? */
1425 }
1426
1427 if (isr & ISR_RXIDLE) {
1428 printf("%s: receive ring overrun\n",
1429 sc->sc_dev.dv_xname);
1430
1431 /* Get the receive process going again. */
1432 bus_space_write_4(sc->sc_st, sc->sc_sh,
1433 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1434 bus_space_write_4(sc->sc_st, sc->sc_sh,
1435 SIP_CR, CR_RXE);
1436 }
1437 }
1438
1439 if (isr & (ISR_TXURN|ISR_TXDESC)) {
1440 SIP_EVCNT_INCR(&sc->sc_ev_txintr);
1441
1442 /* Sweep up transmit descriptors. */
1443 SIP_DECL(txintr)(sc);
1444
1445 if (isr & ISR_TXURN) {
1446 u_int32_t thresh;
1447
1448 printf("%s: transmit FIFO underrun",
1449 sc->sc_dev.dv_xname);
1450
1451 thresh = sc->sc_tx_drain_thresh + 1;
1452 if (thresh <= TXCFG_DRTH &&
1453 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1454 (sc->sc_tx_fill_thresh * 32))) {
1455 printf("; increasing Tx drain "
1456 "threshold to %u bytes\n",
1457 thresh * 32);
1458 sc->sc_tx_drain_thresh = thresh;
1459 (void) SIP_DECL(init)(ifp);
1460 } else {
1461 (void) SIP_DECL(init)(ifp);
1462 printf("\n");
1463 }
1464 }
1465 }
1466
1467 #if !defined(DP83820)
1468 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1469 if (isr & ISR_PAUSE_ST) {
1470 sc->sc_flags |= SIPF_PAUSED;
1471 ifp->if_flags |= IFF_OACTIVE;
1472 }
1473 if (isr & ISR_PAUSE_END) {
1474 sc->sc_flags &= ~SIPF_PAUSED;
1475 ifp->if_flags &= ~IFF_OACTIVE;
1476 }
1477 }
1478 #endif /* ! DP83820 */
1479
1480 if (isr & ISR_HIBERR) {
1481 #define PRINTERR(bit, str) \
1482 if (isr & (bit)) \
1483 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1484 PRINTERR(ISR_DPERR, "parity error");
1485 PRINTERR(ISR_SSERR, "system error");
1486 PRINTERR(ISR_RMABT, "master abort");
1487 PRINTERR(ISR_RTABT, "target abort");
1488 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1489 (void) SIP_DECL(init)(ifp);
1490 #undef PRINTERR
1491 }
1492 }
1493
1494 /* Try to get more packets going. */
1495 SIP_DECL(start)(ifp);
1496
1497 return (handled);
1498 }
1499
1500 /*
1501 * sip_txintr:
1502 *
1503 * Helper; handle transmit interrupts.
1504 */
1505 void
1506 SIP_DECL(txintr)(struct sip_softc *sc)
1507 {
1508 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1509 struct sip_txsoft *txs;
1510 u_int32_t cmdsts;
1511
1512 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1513 ifp->if_flags &= ~IFF_OACTIVE;
1514
1515 /*
1516 * Go through our Tx list and free mbufs for those
1517 * frames which have been transmitted.
1518 */
1519 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1520 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1521 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1522
1523 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1524 if (cmdsts & CMDSTS_OWN)
1525 break;
1526
1527 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1528
1529 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1530
1531 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1532 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1533 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1534 m_freem(txs->txs_mbuf);
1535 txs->txs_mbuf = NULL;
1536
1537 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1538
1539 /*
1540 * Check for errors and collisions.
1541 */
1542 if (cmdsts &
1543 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1544 ifp->if_oerrors++;
1545 if (cmdsts & CMDSTS_Tx_EC)
1546 ifp->if_collisions += 16;
1547 if (ifp->if_flags & IFF_DEBUG) {
1548 if (cmdsts & CMDSTS_Tx_ED)
1549 printf("%s: excessive deferral\n",
1550 sc->sc_dev.dv_xname);
1551 if (cmdsts & CMDSTS_Tx_EC)
1552 printf("%s: excessive collisions\n",
1553 sc->sc_dev.dv_xname);
1554 }
1555 } else {
1556 /* Packet was transmitted successfully. */
1557 ifp->if_opackets++;
1558 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1559 }
1560 }
1561
1562 /*
1563 * If there are no more pending transmissions, cancel the watchdog
1564 * timer.
1565 */
1566 if (txs == NULL)
1567 ifp->if_timer = 0;
1568 }
1569
1570 #if defined(DP83820)
1571 /*
1572 * sip_rxintr:
1573 *
1574 * Helper; handle receive interrupts.
1575 */
1576 void
1577 SIP_DECL(rxintr)(struct sip_softc *sc)
1578 {
1579 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1580 struct sip_rxsoft *rxs;
1581 struct mbuf *m, *tailm;
1582 u_int32_t cmdsts, extsts;
1583 int i, len;
1584
1585 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1586 rxs = &sc->sc_rxsoft[i];
1587
1588 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1589
1590 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1591 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1592
1593 /*
1594 * NOTE: OWN is set if owned by _consumer_. We're the
1595 * consumer of the receive ring, so if the bit is clear,
1596 * we have processed all of the packets.
1597 */
1598 if ((cmdsts & CMDSTS_OWN) == 0) {
1599 /*
1600 * We have processed all of the receive buffers.
1601 */
1602 break;
1603 }
1604
1605 if (__predict_false(sc->sc_rxdiscard)) {
1606 SIP_INIT_RXDESC(sc, i);
1607 if ((cmdsts & CMDSTS_MORE) == 0) {
1608 /* Reset our state. */
1609 sc->sc_rxdiscard = 0;
1610 }
1611 continue;
1612 }
1613
1614 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1615 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1616
1617 m = rxs->rxs_mbuf;
1618
1619 /*
1620 * Add a new receive buffer to the ring.
1621 */
1622 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1623 /*
1624 * Failed, throw away what we've done so
1625 * far, and discard the rest of the packet.
1626 */
1627 ifp->if_ierrors++;
1628 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1629 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1630 SIP_INIT_RXDESC(sc, i);
1631 if (cmdsts & CMDSTS_MORE)
1632 sc->sc_rxdiscard = 1;
1633 if (sc->sc_rxhead != NULL)
1634 m_freem(sc->sc_rxhead);
1635 SIP_RXCHAIN_RESET(sc);
1636 continue;
1637 }
1638
1639 SIP_RXCHAIN_LINK(sc, m);
1640
1641 /*
1642 * If this is not the end of the packet, keep
1643 * looking.
1644 */
1645 if (cmdsts & CMDSTS_MORE) {
1646 sc->sc_rxlen += m->m_len;
1647 continue;
1648 }
1649
1650 /*
1651 * Okay, we have the entire packet now...
1652 */
1653 *sc->sc_rxtailp = NULL;
1654 m = sc->sc_rxhead;
1655 tailm = sc->sc_rxtail;
1656
1657 SIP_RXCHAIN_RESET(sc);
1658
1659 /*
1660 * If an error occurred, update stats and drop the packet.
1661 */
1662 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1663 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1664 ifp->if_ierrors++;
1665 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1666 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1667 /* Receive overrun handled elsewhere. */
1668 printf("%s: receive descriptor error\n",
1669 sc->sc_dev.dv_xname);
1670 }
1671 #define PRINTERR(bit, str) \
1672 if (cmdsts & (bit)) \
1673 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1674 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1675 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1676 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1677 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1678 #undef PRINTERR
1679 m_freem(m);
1680 continue;
1681 }
1682
1683 /*
1684 * No errors.
1685 *
1686 * Note, the DP83820 includes the CRC with
1687 * every packet.
1688 */
1689 len = CMDSTS_SIZE(cmdsts);
1690 tailm->m_len = len - sc->sc_rxlen;
1691
1692 /*
1693 * If the packet is small enough to fit in a
1694 * single header mbuf, allocate one and copy
1695 * the data into it. This greatly reduces
1696 * memory consumption when we receive lots
1697 * of small packets.
1698 */
1699 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1700 struct mbuf *nm;
1701 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1702 if (nm == NULL) {
1703 ifp->if_ierrors++;
1704 m_freem(m);
1705 continue;
1706 }
1707 nm->m_data += 2;
1708 nm->m_pkthdr.len = nm->m_len = len;
1709 m_copydata(m, 0, len, mtod(nm, caddr_t));
1710 m_freem(m);
1711 m = nm;
1712 }
1713 #ifndef __NO_STRICT_ALIGNMENT
1714 else {
1715 /*
1716 * The DP83820's receive buffers must be 4-byte
1717 * aligned. But this means that the data after
1718 * the Ethernet header is misaligned. To compensate,
1719 * we have artificially shortened the buffer size
1720 * in the descriptor, and we do an overlapping copy
1721 * of the data two bytes further in (in the first
1722 * buffer of the chain only).
1723 */
1724 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1725 m->m_len);
1726 m->m_data += 2;
1727 }
1728 #endif /* ! __NO_STRICT_ALIGNMENT */
1729
1730 /*
1731 * If VLANs are enabled, VLAN packets have been unwrapped
1732 * for us. Associate the tag with the packet.
1733 */
1734 if (sc->sc_ethercom.ec_nvlans != 0 &&
1735 (extsts & EXTSTS_VPKT) != 0) {
1736 struct mbuf *vtag;
1737
1738 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1739 if (vtag == NULL) {
1740 ifp->if_ierrors++;
1741 printf("%s: unable to allocate VLAN tag\n",
1742 sc->sc_dev.dv_xname);
1743 m_freem(m);
1744 continue;
1745 }
1746
1747 *mtod(vtag, int *) = ntohs(extsts & EXTSTS_VTCI);
1748 vtag->m_len = sizeof(int);
1749 }
1750
1751 /*
1752 * Set the incoming checksum information for the
1753 * packet.
1754 */
1755 if ((extsts & EXTSTS_IPPKT) != 0) {
1756 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1757 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1758 if (extsts & EXTSTS_Rx_IPERR)
1759 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1760 if (extsts & EXTSTS_TCPPKT) {
1761 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1762 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1763 if (extsts & EXTSTS_Rx_TCPERR)
1764 m->m_pkthdr.csum_flags |=
1765 M_CSUM_TCP_UDP_BAD;
1766 } else if (extsts & EXTSTS_UDPPKT) {
1767 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1768 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1769 if (extsts & EXTSTS_Rx_UDPERR)
1770 m->m_pkthdr.csum_flags |=
1771 M_CSUM_TCP_UDP_BAD;
1772 }
1773 }
1774
1775 ifp->if_ipackets++;
1776 m->m_flags |= M_HASFCS;
1777 m->m_pkthdr.rcvif = ifp;
1778 m->m_pkthdr.len = len;
1779
1780 #if NBPFILTER > 0
1781 /*
1782 * Pass this up to any BPF listeners, but only
1783 * pass if up the stack if it's for us.
1784 */
1785 if (ifp->if_bpf)
1786 bpf_mtap(ifp->if_bpf, m);
1787 #endif /* NBPFILTER > 0 */
1788
1789 /* Pass it on. */
1790 (*ifp->if_input)(ifp, m);
1791 }
1792
1793 /* Update the receive pointer. */
1794 sc->sc_rxptr = i;
1795 }
1796 #else /* ! DP83820 */
1797 /*
1798 * sip_rxintr:
1799 *
1800 * Helper; handle receive interrupts.
1801 */
1802 void
1803 SIP_DECL(rxintr)(struct sip_softc *sc)
1804 {
1805 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1806 struct sip_rxsoft *rxs;
1807 struct mbuf *m;
1808 u_int32_t cmdsts;
1809 int i, len;
1810
1811 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1812 rxs = &sc->sc_rxsoft[i];
1813
1814 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1815
1816 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1817
1818 /*
1819 * NOTE: OWN is set if owned by _consumer_. We're the
1820 * consumer of the receive ring, so if the bit is clear,
1821 * we have processed all of the packets.
1822 */
1823 if ((cmdsts & CMDSTS_OWN) == 0) {
1824 /*
1825 * We have processed all of the receive buffers.
1826 */
1827 break;
1828 }
1829
1830 /*
1831 * If any collisions were seen on the wire, count one.
1832 */
1833 if (cmdsts & CMDSTS_Rx_COL)
1834 ifp->if_collisions++;
1835
1836 /*
1837 * If an error occurred, update stats, clear the status
1838 * word, and leave the packet buffer in place. It will
1839 * simply be reused the next time the ring comes around.
1840 */
1841 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1842 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1843 ifp->if_ierrors++;
1844 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1845 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1846 /* Receive overrun handled elsewhere. */
1847 printf("%s: receive descriptor error\n",
1848 sc->sc_dev.dv_xname);
1849 }
1850 #define PRINTERR(bit, str) \
1851 if (cmdsts & (bit)) \
1852 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1853 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1854 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1855 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1856 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1857 #undef PRINTERR
1858 SIP_INIT_RXDESC(sc, i);
1859 continue;
1860 }
1861
1862 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1863 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1864
1865 /*
1866 * No errors; receive the packet. Note, the SiS 900
1867 * includes the CRC with every packet.
1868 */
1869 len = CMDSTS_SIZE(cmdsts);
1870
1871 #ifdef __NO_STRICT_ALIGNMENT
1872 /*
1873 * If the packet is small enough to fit in a
1874 * single header mbuf, allocate one and copy
1875 * the data into it. This greatly reduces
1876 * memory consumption when we receive lots
1877 * of small packets.
1878 *
1879 * Otherwise, we add a new buffer to the receive
1880 * chain. If this fails, we drop the packet and
1881 * recycle the old buffer.
1882 */
1883 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
1884 MGETHDR(m, M_DONTWAIT, MT_DATA);
1885 if (m == NULL)
1886 goto dropit;
1887 memcpy(mtod(m, caddr_t),
1888 mtod(rxs->rxs_mbuf, caddr_t), len);
1889 SIP_INIT_RXDESC(sc, i);
1890 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1891 rxs->rxs_dmamap->dm_mapsize,
1892 BUS_DMASYNC_PREREAD);
1893 } else {
1894 m = rxs->rxs_mbuf;
1895 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1896 dropit:
1897 ifp->if_ierrors++;
1898 SIP_INIT_RXDESC(sc, i);
1899 bus_dmamap_sync(sc->sc_dmat,
1900 rxs->rxs_dmamap, 0,
1901 rxs->rxs_dmamap->dm_mapsize,
1902 BUS_DMASYNC_PREREAD);
1903 continue;
1904 }
1905 }
1906 #else
1907 /*
1908 * The SiS 900's receive buffers must be 4-byte aligned.
1909 * But this means that the data after the Ethernet header
1910 * is misaligned. We must allocate a new buffer and
1911 * copy the data, shifted forward 2 bytes.
1912 */
1913 MGETHDR(m, M_DONTWAIT, MT_DATA);
1914 if (m == NULL) {
1915 dropit:
1916 ifp->if_ierrors++;
1917 SIP_INIT_RXDESC(sc, i);
1918 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1919 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1920 continue;
1921 }
1922 if (len > (MHLEN - 2)) {
1923 MCLGET(m, M_DONTWAIT);
1924 if ((m->m_flags & M_EXT) == 0) {
1925 m_freem(m);
1926 goto dropit;
1927 }
1928 }
1929 m->m_data += 2;
1930
1931 /*
1932 * Note that we use clusters for incoming frames, so the
1933 * buffer is virtually contiguous.
1934 */
1935 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
1936
1937 /* Allow the receive descriptor to continue using its mbuf. */
1938 SIP_INIT_RXDESC(sc, i);
1939 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1940 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1941 #endif /* __NO_STRICT_ALIGNMENT */
1942
1943 ifp->if_ipackets++;
1944 m->m_flags |= M_HASFCS;
1945 m->m_pkthdr.rcvif = ifp;
1946 m->m_pkthdr.len = m->m_len = len;
1947
1948 #if NBPFILTER > 0
1949 /*
1950 * Pass this up to any BPF listeners, but only
1951 * pass if up the stack if it's for us.
1952 */
1953 if (ifp->if_bpf)
1954 bpf_mtap(ifp->if_bpf, m);
1955 #endif /* NBPFILTER > 0 */
1956
1957 /* Pass it on. */
1958 (*ifp->if_input)(ifp, m);
1959 }
1960
1961 /* Update the receive pointer. */
1962 sc->sc_rxptr = i;
1963 }
1964 #endif /* DP83820 */
1965
1966 /*
1967 * sip_tick:
1968 *
1969 * One second timer, used to tick the MII.
1970 */
1971 void
1972 SIP_DECL(tick)(void *arg)
1973 {
1974 struct sip_softc *sc = arg;
1975 int s;
1976
1977 s = splnet();
1978 mii_tick(&sc->sc_mii);
1979 splx(s);
1980
1981 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
1982 }
1983
1984 /*
1985 * sip_reset:
1986 *
1987 * Perform a soft reset on the SiS 900.
1988 */
1989 void
1990 SIP_DECL(reset)(struct sip_softc *sc)
1991 {
1992 bus_space_tag_t st = sc->sc_st;
1993 bus_space_handle_t sh = sc->sc_sh;
1994 int i;
1995
1996 bus_space_write_4(st, sh, SIP_IER, 0);
1997 bus_space_write_4(st, sh, SIP_IMR, 0);
1998 bus_space_write_4(st, sh, SIP_RFCR, 0);
1999 bus_space_write_4(st, sh, SIP_CR, CR_RST);
2000
2001 for (i = 0; i < SIP_TIMEOUT; i++) {
2002 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
2003 break;
2004 delay(2);
2005 }
2006
2007 if (i == SIP_TIMEOUT)
2008 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
2009
2010 delay(1000);
2011
2012 #ifdef DP83820
2013 /*
2014 * Set the general purpose I/O bits. Do it here in case we
2015 * need to have GPIO set up to talk to the media interface.
2016 */
2017 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
2018 delay(1000);
2019 #endif /* DP83820 */
2020 }
2021
2022 /*
2023 * sip_init: [ ifnet interface function ]
2024 *
2025 * Initialize the interface. Must be called at splnet().
2026 */
2027 int
2028 SIP_DECL(init)(struct ifnet *ifp)
2029 {
2030 struct sip_softc *sc = ifp->if_softc;
2031 bus_space_tag_t st = sc->sc_st;
2032 bus_space_handle_t sh = sc->sc_sh;
2033 struct sip_txsoft *txs;
2034 struct sip_rxsoft *rxs;
2035 struct sip_desc *sipd;
2036 u_int32_t reg;
2037 int i, error = 0;
2038
2039 /*
2040 * Cancel any pending I/O.
2041 */
2042 SIP_DECL(stop)(ifp, 0);
2043
2044 /*
2045 * Reset the chip to a known state.
2046 */
2047 SIP_DECL(reset)(sc);
2048
2049 #if !defined(DP83820)
2050 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) {
2051 /*
2052 * DP83815 manual, page 78:
2053 * 4.4 Recommended Registers Configuration
2054 * For optimum performance of the DP83815, version noted
2055 * as DP83815CVNG (SRR = 203h), the listed register
2056 * modifications must be followed in sequence...
2057 *
2058 * It's not clear if this should be 302h or 203h because that
2059 * chip name is listed as SRR 302h in the description of the
2060 * SRR register. However, my revision 302h DP83815 on the
2061 * Netgear FA311 purchased in 02/2001 needs these settings
2062 * to avoid tons of errors in AcceptPerfectMatch (non-
2063 * IFF_PROMISC) mode. I do not know if other revisions need
2064 * this set or not. [briggs -- 09 March 2001]
2065 *
2066 * Note that only the low-order 12 bits of 0xe4 are documented
2067 * and that this sets reserved bits in that register.
2068 */
2069 reg = bus_space_read_4(st, sh, SIP_NS_SRR);
2070 if (reg == 0x302) {
2071 bus_space_write_4(st, sh, 0x00cc, 0x0001);
2072 bus_space_write_4(st, sh, 0x00e4, 0x189C);
2073 bus_space_write_4(st, sh, 0x00fc, 0x0000);
2074 bus_space_write_4(st, sh, 0x00f4, 0x5040);
2075 bus_space_write_4(st, sh, 0x00f8, 0x008c);
2076 }
2077 }
2078 #endif /* ! DP83820 */
2079
2080 /*
2081 * Initialize the transmit descriptor ring.
2082 */
2083 for (i = 0; i < SIP_NTXDESC; i++) {
2084 sipd = &sc->sc_txdescs[i];
2085 memset(sipd, 0, sizeof(struct sip_desc));
2086 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2087 }
2088 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2089 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2090 sc->sc_txfree = SIP_NTXDESC;
2091 sc->sc_txnext = 0;
2092
2093 /*
2094 * Initialize the transmit job descriptors.
2095 */
2096 SIMPLEQ_INIT(&sc->sc_txfreeq);
2097 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2098 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2099 txs = &sc->sc_txsoft[i];
2100 txs->txs_mbuf = NULL;
2101 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2102 }
2103
2104 /*
2105 * Initialize the receive descriptor and receive job
2106 * descriptor rings.
2107 */
2108 for (i = 0; i < SIP_NRXDESC; i++) {
2109 rxs = &sc->sc_rxsoft[i];
2110 if (rxs->rxs_mbuf == NULL) {
2111 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2112 printf("%s: unable to allocate or map rx "
2113 "buffer %d, error = %d\n",
2114 sc->sc_dev.dv_xname, i, error);
2115 /*
2116 * XXX Should attempt to run with fewer receive
2117 * XXX buffers instead of just failing.
2118 */
2119 SIP_DECL(rxdrain)(sc);
2120 goto out;
2121 }
2122 } else
2123 SIP_INIT_RXDESC(sc, i);
2124 }
2125 sc->sc_rxptr = 0;
2126 #ifdef DP83820
2127 sc->sc_rxdiscard = 0;
2128 SIP_RXCHAIN_RESET(sc);
2129 #endif /* DP83820 */
2130
2131 /*
2132 * Set the configuration register; it's already initialized
2133 * in sip_attach().
2134 */
2135 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2136
2137 /*
2138 * Initialize the prototype TXCFG register.
2139 */
2140 #if defined(DP83820)
2141 sc->sc_txcfg = TXCFG_MXDMA_512;
2142 sc->sc_rxcfg = RXCFG_MXDMA_512;
2143 #else
2144 if ((SIP_SIS900_REV(sc, SIS_REV_635) ||
2145 SIP_SIS900_REV(sc, SIS_REV_900B)) &&
2146 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & CFG_EDBMASTEN)) {
2147 sc->sc_txcfg = TXCFG_MXDMA_64;
2148 sc->sc_rxcfg = RXCFG_MXDMA_64;
2149 } else {
2150 sc->sc_txcfg = TXCFG_MXDMA_512;
2151 sc->sc_rxcfg = RXCFG_MXDMA_512;
2152 }
2153 #endif /* DP83820 */
2154
2155 sc->sc_txcfg |= TXCFG_ATP |
2156 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2157 sc->sc_tx_drain_thresh;
2158 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2159
2160 /*
2161 * Initialize the receive drain threshold if we have never
2162 * done so.
2163 */
2164 if (sc->sc_rx_drain_thresh == 0) {
2165 /*
2166 * XXX This value should be tuned. This is set to the
2167 * maximum of 248 bytes, and we may be able to improve
2168 * performance by decreasing it (although we should never
2169 * set this value lower than 2; 14 bytes are required to
2170 * filter the packet).
2171 */
2172 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2173 }
2174
2175 /*
2176 * Initialize the prototype RXCFG register.
2177 */
2178 sc->sc_rxcfg |= (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2179 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2180
2181 #ifdef DP83820
2182 /*
2183 * Initialize the VLAN/IP receive control register.
2184 * We enable checksum computation on all incoming
2185 * packets, and do not reject packets w/ bad checksums.
2186 */
2187 reg = 0;
2188 if (ifp->if_capenable &
2189 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2190 reg |= VRCR_IPEN;
2191 if (sc->sc_ethercom.ec_nvlans != 0)
2192 reg |= VRCR_VTDEN|VRCR_VTREN;
2193 bus_space_write_4(st, sh, SIP_VRCR, reg);
2194
2195 /*
2196 * Initialize the VLAN/IP transmit control register.
2197 * We enable outgoing checksum computation on a
2198 * per-packet basis.
2199 */
2200 reg = 0;
2201 if (ifp->if_capenable &
2202 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2203 reg |= VTCR_PPCHK;
2204 if (sc->sc_ethercom.ec_nvlans != 0)
2205 reg |= VTCR_VPPTI;
2206 bus_space_write_4(st, sh, SIP_VTCR, reg);
2207
2208 /*
2209 * If we're using VLANs, initialize the VLAN data register.
2210 * To understand why we bswap the VLAN Ethertype, see section
2211 * 4.2.36 of the DP83820 manual.
2212 */
2213 if (sc->sc_ethercom.ec_nvlans != 0)
2214 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2215 #endif /* DP83820 */
2216
2217 /*
2218 * Give the transmit and receive rings to the chip.
2219 */
2220 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2221 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2222
2223 /*
2224 * Initialize the interrupt mask.
2225 */
2226 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2227 ISR_TXURN|ISR_TXDESC|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2228 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2229
2230 /* Set up the receive filter. */
2231 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2232
2233 /*
2234 * Set the current media. Do this after initializing the prototype
2235 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2236 * control.
2237 */
2238 mii_mediachg(&sc->sc_mii);
2239
2240 /*
2241 * Enable interrupts.
2242 */
2243 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2244
2245 /*
2246 * Start the transmit and receive processes.
2247 */
2248 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2249
2250 /*
2251 * Start the one second MII clock.
2252 */
2253 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2254
2255 /*
2256 * ...all done!
2257 */
2258 ifp->if_flags |= IFF_RUNNING;
2259 ifp->if_flags &= ~IFF_OACTIVE;
2260
2261 out:
2262 if (error)
2263 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2264 return (error);
2265 }
2266
2267 /*
2268 * sip_drain:
2269 *
2270 * Drain the receive queue.
2271 */
2272 void
2273 SIP_DECL(rxdrain)(struct sip_softc *sc)
2274 {
2275 struct sip_rxsoft *rxs;
2276 int i;
2277
2278 for (i = 0; i < SIP_NRXDESC; i++) {
2279 rxs = &sc->sc_rxsoft[i];
2280 if (rxs->rxs_mbuf != NULL) {
2281 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2282 m_freem(rxs->rxs_mbuf);
2283 rxs->rxs_mbuf = NULL;
2284 }
2285 }
2286 }
2287
2288 /*
2289 * sip_stop: [ ifnet interface function ]
2290 *
2291 * Stop transmission on the interface.
2292 */
2293 void
2294 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2295 {
2296 struct sip_softc *sc = ifp->if_softc;
2297 bus_space_tag_t st = sc->sc_st;
2298 bus_space_handle_t sh = sc->sc_sh;
2299 struct sip_txsoft *txs;
2300 u_int32_t cmdsts = 0; /* DEBUG */
2301
2302 /*
2303 * Stop the one second clock.
2304 */
2305 callout_stop(&sc->sc_tick_ch);
2306
2307 /* Down the MII. */
2308 mii_down(&sc->sc_mii);
2309
2310 /*
2311 * Disable interrupts.
2312 */
2313 bus_space_write_4(st, sh, SIP_IER, 0);
2314
2315 /*
2316 * Stop receiver and transmitter.
2317 */
2318 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2319
2320 /*
2321 * Release any queued transmit buffers.
2322 */
2323 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2324 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2325 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2326 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2327 CMDSTS_INTR) == 0)
2328 printf("%s: sip_stop: last descriptor does not "
2329 "have INTR bit set\n", sc->sc_dev.dv_xname);
2330 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
2331 #ifdef DIAGNOSTIC
2332 if (txs->txs_mbuf == NULL) {
2333 printf("%s: dirty txsoft with no mbuf chain\n",
2334 sc->sc_dev.dv_xname);
2335 panic("sip_stop");
2336 }
2337 #endif
2338 cmdsts |= /* DEBUG */
2339 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2340 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2341 m_freem(txs->txs_mbuf);
2342 txs->txs_mbuf = NULL;
2343 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2344 }
2345
2346 if (disable)
2347 SIP_DECL(rxdrain)(sc);
2348
2349 /*
2350 * Mark the interface down and cancel the watchdog timer.
2351 */
2352 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2353 ifp->if_timer = 0;
2354
2355 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2356 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2357 printf("%s: sip_stop: no INTR bits set in dirty tx "
2358 "descriptors\n", sc->sc_dev.dv_xname);
2359 }
2360
2361 /*
2362 * sip_read_eeprom:
2363 *
2364 * Read data from the serial EEPROM.
2365 */
2366 void
2367 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2368 u_int16_t *data)
2369 {
2370 bus_space_tag_t st = sc->sc_st;
2371 bus_space_handle_t sh = sc->sc_sh;
2372 u_int16_t reg;
2373 int i, x;
2374
2375 for (i = 0; i < wordcnt; i++) {
2376 /* Send CHIP SELECT. */
2377 reg = EROMAR_EECS;
2378 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2379
2380 /* Shift in the READ opcode. */
2381 for (x = 3; x > 0; x--) {
2382 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2383 reg |= EROMAR_EEDI;
2384 else
2385 reg &= ~EROMAR_EEDI;
2386 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2387 bus_space_write_4(st, sh, SIP_EROMAR,
2388 reg | EROMAR_EESK);
2389 delay(4);
2390 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2391 delay(4);
2392 }
2393
2394 /* Shift in address. */
2395 for (x = 6; x > 0; x--) {
2396 if ((word + i) & (1 << (x - 1)))
2397 reg |= EROMAR_EEDI;
2398 else
2399 reg &= ~EROMAR_EEDI;
2400 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2401 bus_space_write_4(st, sh, SIP_EROMAR,
2402 reg | EROMAR_EESK);
2403 delay(4);
2404 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2405 delay(4);
2406 }
2407
2408 /* Shift out data. */
2409 reg = EROMAR_EECS;
2410 data[i] = 0;
2411 for (x = 16; x > 0; x--) {
2412 bus_space_write_4(st, sh, SIP_EROMAR,
2413 reg | EROMAR_EESK);
2414 delay(4);
2415 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2416 data[i] |= (1 << (x - 1));
2417 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2418 delay(4);
2419 }
2420
2421 /* Clear CHIP SELECT. */
2422 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2423 delay(4);
2424 }
2425 }
2426
2427 /*
2428 * sip_add_rxbuf:
2429 *
2430 * Add a receive buffer to the indicated descriptor.
2431 */
2432 int
2433 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2434 {
2435 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2436 struct mbuf *m;
2437 int error;
2438
2439 MGETHDR(m, M_DONTWAIT, MT_DATA);
2440 if (m == NULL)
2441 return (ENOBUFS);
2442
2443 MCLGET(m, M_DONTWAIT);
2444 if ((m->m_flags & M_EXT) == 0) {
2445 m_freem(m);
2446 return (ENOBUFS);
2447 }
2448
2449 #if defined(DP83820)
2450 m->m_len = SIP_RXBUF_LEN;
2451 #endif /* DP83820 */
2452
2453 if (rxs->rxs_mbuf != NULL)
2454 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2455
2456 rxs->rxs_mbuf = m;
2457
2458 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2459 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2460 BUS_DMA_READ|BUS_DMA_NOWAIT);
2461 if (error) {
2462 printf("%s: can't load rx DMA map %d, error = %d\n",
2463 sc->sc_dev.dv_xname, idx, error);
2464 panic("sip_add_rxbuf"); /* XXX */
2465 }
2466
2467 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2468 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2469
2470 SIP_INIT_RXDESC(sc, idx);
2471
2472 return (0);
2473 }
2474
2475 #if !defined(DP83820)
2476 /*
2477 * sip_sis900_set_filter:
2478 *
2479 * Set up the receive filter.
2480 */
2481 void
2482 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2483 {
2484 bus_space_tag_t st = sc->sc_st;
2485 bus_space_handle_t sh = sc->sc_sh;
2486 struct ethercom *ec = &sc->sc_ethercom;
2487 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2488 struct ether_multi *enm;
2489 u_int8_t *cp;
2490 struct ether_multistep step;
2491 u_int32_t crc, mchash[16];
2492
2493 /*
2494 * Initialize the prototype RFCR.
2495 */
2496 sc->sc_rfcr = RFCR_RFEN;
2497 if (ifp->if_flags & IFF_BROADCAST)
2498 sc->sc_rfcr |= RFCR_AAB;
2499 if (ifp->if_flags & IFF_PROMISC) {
2500 sc->sc_rfcr |= RFCR_AAP;
2501 goto allmulti;
2502 }
2503
2504 /*
2505 * Set up the multicast address filter by passing all multicast
2506 * addresses through a CRC generator, and then using the high-order
2507 * 6 bits as an index into the 128 bit multicast hash table (only
2508 * the lower 16 bits of each 32 bit multicast hash register are
2509 * valid). The high order bits select the register, while the
2510 * rest of the bits select the bit within the register.
2511 */
2512
2513 memset(mchash, 0, sizeof(mchash));
2514
2515 ETHER_FIRST_MULTI(step, ec, enm);
2516 while (enm != NULL) {
2517 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2518 /*
2519 * We must listen to a range of multicast addresses.
2520 * For now, just accept all multicasts, rather than
2521 * trying to set only those filter bits needed to match
2522 * the range. (At this time, the only use of address
2523 * ranges is for IP multicast routing, for which the
2524 * range is big enough to require all bits set.)
2525 */
2526 goto allmulti;
2527 }
2528
2529 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2530
2531 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2532 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2533 /* Just want the 8 most significant bits. */
2534 crc >>= 24;
2535 } else {
2536 /* Just want the 7 most significant bits. */
2537 crc >>= 25;
2538 }
2539
2540 /* Set the corresponding bit in the hash table. */
2541 mchash[crc >> 4] |= 1 << (crc & 0xf);
2542
2543 ETHER_NEXT_MULTI(step, enm);
2544 }
2545
2546 ifp->if_flags &= ~IFF_ALLMULTI;
2547 goto setit;
2548
2549 allmulti:
2550 ifp->if_flags |= IFF_ALLMULTI;
2551 sc->sc_rfcr |= RFCR_AAM;
2552
2553 setit:
2554 #define FILTER_EMIT(addr, data) \
2555 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2556 delay(1); \
2557 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2558 delay(1)
2559
2560 /*
2561 * Disable receive filter, and program the node address.
2562 */
2563 cp = LLADDR(ifp->if_sadl);
2564 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2565 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2566 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2567
2568 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2569 /*
2570 * Program the multicast hash table.
2571 */
2572 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2573 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2574 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2575 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2576 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2577 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2578 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2579 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2580 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2581 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2582 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]);
2583 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]);
2584 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]);
2585 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]);
2586 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]);
2587 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]);
2588 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]);
2589 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]);
2590 }
2591 }
2592 #undef FILTER_EMIT
2593
2594 /*
2595 * Re-enable the receiver filter.
2596 */
2597 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2598 }
2599 #endif /* ! DP83820 */
2600
2601 /*
2602 * sip_dp83815_set_filter:
2603 *
2604 * Set up the receive filter.
2605 */
2606 void
2607 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2608 {
2609 bus_space_tag_t st = sc->sc_st;
2610 bus_space_handle_t sh = sc->sc_sh;
2611 struct ethercom *ec = &sc->sc_ethercom;
2612 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2613 struct ether_multi *enm;
2614 u_int8_t *cp;
2615 struct ether_multistep step;
2616 u_int32_t crc, hash, slot, bit;
2617 #ifdef DP83820
2618 #define MCHASH_NWORDS 128
2619 #else
2620 #define MCHASH_NWORDS 32
2621 #endif /* DP83820 */
2622 u_int16_t mchash[MCHASH_NWORDS];
2623 int i;
2624
2625 /*
2626 * Initialize the prototype RFCR.
2627 * Enable the receive filter, and accept on
2628 * Perfect (destination address) Match
2629 * If IFF_BROADCAST, also accept all broadcast packets.
2630 * If IFF_PROMISC, accept all unicast packets (and later, set
2631 * IFF_ALLMULTI and accept all multicast, too).
2632 */
2633 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2634 if (ifp->if_flags & IFF_BROADCAST)
2635 sc->sc_rfcr |= RFCR_AAB;
2636 if (ifp->if_flags & IFF_PROMISC) {
2637 sc->sc_rfcr |= RFCR_AAP;
2638 goto allmulti;
2639 }
2640
2641 #ifdef DP83820
2642 /*
2643 * Set up the DP83820 multicast address filter by passing all multicast
2644 * addresses through a CRC generator, and then using the high-order
2645 * 11 bits as an index into the 2048 bit multicast hash table. The
2646 * high-order 7 bits select the slot, while the low-order 4 bits
2647 * select the bit within the slot. Note that only the low 16-bits
2648 * of each filter word are used, and there are 128 filter words.
2649 */
2650 #else
2651 /*
2652 * Set up the DP83815 multicast address filter by passing all multicast
2653 * addresses through a CRC generator, and then using the high-order
2654 * 9 bits as an index into the 512 bit multicast hash table. The
2655 * high-order 5 bits select the slot, while the low-order 4 bits
2656 * select the bit within the slot. Note that only the low 16-bits
2657 * of each filter word are used, and there are 32 filter words.
2658 */
2659 #endif /* DP83820 */
2660
2661 memset(mchash, 0, sizeof(mchash));
2662
2663 ifp->if_flags &= ~IFF_ALLMULTI;
2664 ETHER_FIRST_MULTI(step, ec, enm);
2665 if (enm == NULL)
2666 goto setit;
2667 while (enm != NULL) {
2668 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2669 /*
2670 * We must listen to a range of multicast addresses.
2671 * For now, just accept all multicasts, rather than
2672 * trying to set only those filter bits needed to match
2673 * the range. (At this time, the only use of address
2674 * ranges is for IP multicast routing, for which the
2675 * range is big enough to require all bits set.)
2676 */
2677 goto allmulti;
2678 }
2679
2680 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2681
2682 #ifdef DP83820
2683 /* Just want the 11 most significant bits. */
2684 hash = crc >> 21;
2685 #else
2686 /* Just want the 9 most significant bits. */
2687 hash = crc >> 23;
2688 #endif /* DP83820 */
2689
2690 slot = hash >> 4;
2691 bit = hash & 0xf;
2692
2693 /* Set the corresponding bit in the hash table. */
2694 mchash[slot] |= 1 << bit;
2695
2696 ETHER_NEXT_MULTI(step, enm);
2697 }
2698 sc->sc_rfcr |= RFCR_MHEN;
2699 goto setit;
2700
2701 allmulti:
2702 ifp->if_flags |= IFF_ALLMULTI;
2703 sc->sc_rfcr |= RFCR_AAM;
2704
2705 setit:
2706 #define FILTER_EMIT(addr, data) \
2707 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2708 delay(1); \
2709 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2710 delay(1)
2711
2712 /*
2713 * Disable receive filter, and program the node address.
2714 */
2715 cp = LLADDR(ifp->if_sadl);
2716 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
2717 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
2718 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
2719
2720 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2721 /*
2722 * Program the multicast hash table.
2723 */
2724 for (i = 0; i < MCHASH_NWORDS; i++) {
2725 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
2726 mchash[i]);
2727 }
2728 }
2729 #undef FILTER_EMIT
2730 #undef MCHASH_NWORDS
2731
2732 /*
2733 * Re-enable the receiver filter.
2734 */
2735 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2736 }
2737
2738 #if defined(DP83820)
2739 /*
2740 * sip_dp83820_mii_readreg: [mii interface function]
2741 *
2742 * Read a PHY register on the MII of the DP83820.
2743 */
2744 int
2745 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
2746 {
2747
2748 return (mii_bitbang_readreg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2749 phy, reg));
2750 }
2751
2752 /*
2753 * sip_dp83820_mii_writereg: [mii interface function]
2754 *
2755 * Write a PHY register on the MII of the DP83820.
2756 */
2757 void
2758 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
2759 {
2760
2761 mii_bitbang_writereg(self, &SIP_DECL(dp83820_mii_bitbang_ops),
2762 phy, reg, val);
2763 }
2764
2765 /*
2766 * sip_dp83815_mii_statchg: [mii interface function]
2767 *
2768 * Callback from MII layer when media changes.
2769 */
2770 void
2771 SIP_DECL(dp83820_mii_statchg)(struct device *self)
2772 {
2773 struct sip_softc *sc = (struct sip_softc *) self;
2774 u_int32_t cfg;
2775
2776 /*
2777 * Update TXCFG for full-duplex operation.
2778 */
2779 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2780 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2781 else
2782 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2783
2784 /*
2785 * Update RXCFG for full-duplex or loopback.
2786 */
2787 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2788 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2789 sc->sc_rxcfg |= RXCFG_ATX;
2790 else
2791 sc->sc_rxcfg &= ~RXCFG_ATX;
2792
2793 /*
2794 * Update CFG for MII/GMII.
2795 */
2796 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
2797 cfg = sc->sc_cfg | CFG_MODE_1000;
2798 else
2799 cfg = sc->sc_cfg;
2800
2801 /*
2802 * XXX 802.3x flow control.
2803 */
2804
2805 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
2806 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2807 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2808 }
2809
2810 /*
2811 * sip_dp83820_mii_bitbang_read: [mii bit-bang interface function]
2812 *
2813 * Read the MII serial port for the MII bit-bang module.
2814 */
2815 u_int32_t
2816 SIP_DECL(dp83820_mii_bitbang_read)(struct device *self)
2817 {
2818 struct sip_softc *sc = (void *) self;
2819
2820 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
2821 }
2822
2823 /*
2824 * sip_dp83820_mii_bitbang_write: [mii big-bang interface function]
2825 *
2826 * Write the MII serial port for the MII bit-bang module.
2827 */
2828 void
2829 SIP_DECL(dp83820_mii_bitbang_write)(struct device *self, u_int32_t val)
2830 {
2831 struct sip_softc *sc = (void *) self;
2832
2833 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
2834 }
2835 #else /* ! DP83820 */
2836 /*
2837 * sip_sis900_mii_readreg: [mii interface function]
2838 *
2839 * Read a PHY register on the MII.
2840 */
2841 int
2842 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
2843 {
2844 struct sip_softc *sc = (struct sip_softc *) self;
2845 u_int32_t enphy;
2846
2847 /*
2848 * The SiS 900 has only an internal PHY on the MII. Only allow
2849 * MII address 0.
2850 */
2851 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
2852 sc->sc_rev < SIS_REV_635 && phy != 0)
2853 return (0);
2854
2855 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2856 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
2857 ENPHY_RWCMD | ENPHY_ACCESS);
2858 do {
2859 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2860 } while (enphy & ENPHY_ACCESS);
2861 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
2862 }
2863
2864 /*
2865 * sip_sis900_mii_writereg: [mii interface function]
2866 *
2867 * Write a PHY register on the MII.
2868 */
2869 void
2870 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
2871 {
2872 struct sip_softc *sc = (struct sip_softc *) self;
2873 u_int32_t enphy;
2874
2875 /*
2876 * The SiS 900 has only an internal PHY on the MII. Only allow
2877 * MII address 0.
2878 */
2879 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
2880 sc->sc_rev < SIS_REV_635 && phy != 0)
2881 return;
2882
2883 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
2884 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
2885 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
2886 do {
2887 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
2888 } while (enphy & ENPHY_ACCESS);
2889 }
2890
2891 /*
2892 * sip_sis900_mii_statchg: [mii interface function]
2893 *
2894 * Callback from MII layer when media changes.
2895 */
2896 void
2897 SIP_DECL(sis900_mii_statchg)(struct device *self)
2898 {
2899 struct sip_softc *sc = (struct sip_softc *) self;
2900 u_int32_t flowctl;
2901
2902 /*
2903 * Update TXCFG for full-duplex operation.
2904 */
2905 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2906 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
2907 else
2908 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
2909
2910 /*
2911 * Update RXCFG for full-duplex or loopback.
2912 */
2913 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
2914 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
2915 sc->sc_rxcfg |= RXCFG_ATX;
2916 else
2917 sc->sc_rxcfg &= ~RXCFG_ATX;
2918
2919 /*
2920 * Update IMR for use of 802.3x flow control.
2921 */
2922 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
2923 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
2924 flowctl = FLOWCTL_FLOWEN;
2925 } else {
2926 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
2927 flowctl = 0;
2928 }
2929
2930 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
2931 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
2932 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
2933 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
2934 }
2935
2936 /*
2937 * sip_dp83815_mii_readreg: [mii interface function]
2938 *
2939 * Read a PHY register on the MII.
2940 */
2941 int
2942 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
2943 {
2944 struct sip_softc *sc = (struct sip_softc *) self;
2945 u_int32_t val;
2946
2947 /*
2948 * The DP83815 only has an internal PHY. Only allow
2949 * MII address 0.
2950 */
2951 if (phy != 0)
2952 return (0);
2953
2954 /*
2955 * Apparently, after a reset, the DP83815 can take a while
2956 * to respond. During this recovery period, the BMSR returns
2957 * a value of 0. Catch this -- it's not supposed to happen
2958 * (the BMSR has some hardcoded-to-1 bits), and wait for the
2959 * PHY to come back to life.
2960 *
2961 * This works out because the BMSR is the first register
2962 * read during the PHY probe process.
2963 */
2964 do {
2965 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
2966 } while (reg == MII_BMSR && val == 0);
2967
2968 return (val & 0xffff);
2969 }
2970
2971 /*
2972 * sip_dp83815_mii_writereg: [mii interface function]
2973 *
2974 * Write a PHY register to the MII.
2975 */
2976 void
2977 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
2978 {
2979 struct sip_softc *sc = (struct sip_softc *) self;
2980
2981 /*
2982 * The DP83815 only has an internal PHY. Only allow
2983 * MII address 0.
2984 */
2985 if (phy != 0)
2986 return;
2987
2988 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
2989 }
2990
2991 /*
2992 * sip_dp83815_mii_statchg: [mii interface function]
2993 *
2994 * Callback from MII layer when media changes.
2995 */
2996 void
2997 SIP_DECL(dp83815_mii_statchg)(struct device *self)
2998 {
2999 struct sip_softc *sc = (struct sip_softc *) self;
3000
3001 /*
3002 * Update TXCFG for full-duplex operation.
3003 */
3004 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3005 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3006 else
3007 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3008
3009 /*
3010 * Update RXCFG for full-duplex or loopback.
3011 */
3012 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3013 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3014 sc->sc_rxcfg |= RXCFG_ATX;
3015 else
3016 sc->sc_rxcfg &= ~RXCFG_ATX;
3017
3018 /*
3019 * XXX 802.3x flow control.
3020 */
3021
3022 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3023 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3024 }
3025 #endif /* DP83820 */
3026
3027 #if defined(DP83820)
3028 void
3029 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc,
3030 const struct pci_attach_args *pa, u_int8_t *enaddr)
3031 {
3032 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
3033 u_int8_t cksum, *e, match;
3034 int i;
3035
3036 /*
3037 * EEPROM data format for the DP83820 can be found in
3038 * the DP83820 manual, section 4.2.4.
3039 */
3040
3041 SIP_DECL(read_eeprom)(sc, 0,
3042 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
3043
3044 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
3045 match = ~(match - 1);
3046
3047 cksum = 0x55;
3048 e = (u_int8_t *) eeprom_data;
3049 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
3050 cksum += *e++;
3051
3052 if (cksum != match)
3053 printf("%s: Checksum (%x) mismatch (%x)",
3054 sc->sc_dev.dv_xname, cksum, match);
3055
3056 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
3057 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
3058 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
3059 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
3060 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
3061 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
3062
3063 /* Get the GPIOR bits. */
3064 sc->sc_gpior = eeprom_data[0x04];
3065
3066 /* Get various CFG related bits. */
3067 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_EXT_125)
3068 sc->sc_cfg |= CFG_EXT_125;
3069 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_M64ADDR)
3070 sc->sc_cfg |= CFG_M64ADDR;
3071 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_DATA64_EN)
3072 sc->sc_cfg |= CFG_DATA64_EN;
3073 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_T64ADDR)
3074 sc->sc_cfg |= CFG_T64ADDR;
3075 if (eeprom_data[0x05] & DP83820_CONFIG2_CFG_TBI_EN)
3076 sc->sc_cfg |= CFG_TBI_EN;
3077 }
3078 #else /* ! DP83820 */
3079 void
3080 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc,
3081 const struct pci_attach_args *pa, u_int8_t *enaddr)
3082 {
3083 u_int16_t myea[ETHER_ADDR_LEN / 2];
3084
3085 switch (sc->sc_rev) {
3086 case SIS_REV_630S:
3087 case SIS_REV_630E:
3088 case SIS_REV_630EA1:
3089 case SIS_REV_630ET:
3090 case SIS_REV_635:
3091 /*
3092 * The MAC address for the on-board Ethernet of
3093 * the SiS 630 chipset is in the NVRAM. Kick
3094 * the chip into re-loading it from NVRAM, and
3095 * read the MAC address out of the filter registers.
3096 */
3097 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3098
3099 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3100 RFCR_RFADDR_NODE0);
3101 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3102 0xffff;
3103
3104 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3105 RFCR_RFADDR_NODE2);
3106 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3107 0xffff;
3108
3109 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3110 RFCR_RFADDR_NODE4);
3111 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3112 0xffff;
3113 break;
3114
3115 default:
3116 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3117 sizeof(myea) / sizeof(myea[0]), myea);
3118 }
3119
3120 enaddr[0] = myea[0] & 0xff;
3121 enaddr[1] = myea[0] >> 8;
3122 enaddr[2] = myea[1] & 0xff;
3123 enaddr[3] = myea[1] >> 8;
3124 enaddr[4] = myea[2] & 0xff;
3125 enaddr[5] = myea[2] >> 8;
3126 }
3127
3128 /* Table and macro to bit-reverse an octet. */
3129 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3130 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3131
3132 void
3133 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc,
3134 const struct pci_attach_args *pa, u_int8_t *enaddr)
3135 {
3136 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3137 u_int8_t cksum, *e, match;
3138 int i;
3139
3140 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3141 sizeof(eeprom_data[0]), eeprom_data);
3142
3143 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3144 match = ~(match - 1);
3145
3146 cksum = 0x55;
3147 e = (u_int8_t *) eeprom_data;
3148 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3149 cksum += *e++;
3150 }
3151 if (cksum != match) {
3152 printf("%s: Checksum (%x) mismatch (%x)",
3153 sc->sc_dev.dv_xname, cksum, match);
3154 }
3155
3156 /*
3157 * Unrolled because it makes slightly more sense this way.
3158 * The DP83815 stores the MAC address in bit 0 of word 6
3159 * through bit 15 of word 8.
3160 */
3161 ea = &eeprom_data[6];
3162 enaddr[0] = ((*ea & 0x1) << 7);
3163 ea++;
3164 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3165 enaddr[1] = ((*ea & 0x1FE) >> 1);
3166 enaddr[2] = ((*ea & 0x1) << 7);
3167 ea++;
3168 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3169 enaddr[3] = ((*ea & 0x1FE) >> 1);
3170 enaddr[4] = ((*ea & 0x1) << 7);
3171 ea++;
3172 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3173 enaddr[5] = ((*ea & 0x1FE) >> 1);
3174
3175 /*
3176 * In case that's not weird enough, we also need to reverse
3177 * the bits in each byte. This all actually makes more sense
3178 * if you think about the EEPROM storage as an array of bits
3179 * being shifted into bytes, but that's not how we're looking
3180 * at it here...
3181 */
3182 for (i = 0; i < 6 ;i++)
3183 enaddr[i] = bbr(enaddr[i]);
3184 }
3185 #endif /* DP83820 */
3186
3187 /*
3188 * sip_mediastatus: [ifmedia interface function]
3189 *
3190 * Get the current interface media status.
3191 */
3192 void
3193 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3194 {
3195 struct sip_softc *sc = ifp->if_softc;
3196
3197 mii_pollstat(&sc->sc_mii);
3198 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3199 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3200 }
3201
3202 /*
3203 * sip_mediachange: [ifmedia interface function]
3204 *
3205 * Set hardware to newly-selected media.
3206 */
3207 int
3208 SIP_DECL(mediachange)(struct ifnet *ifp)
3209 {
3210 struct sip_softc *sc = ifp->if_softc;
3211
3212 if (ifp->if_flags & IFF_UP)
3213 mii_mediachg(&sc->sc_mii);
3214 return (0);
3215 }
3216