if_sip.c revision 1.120 1 /* $NetBSD: if_sip.c,v 1.120 2007/12/15 05:46:21 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Reduce the Rx interrupt load.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.120 2007/12/15 05:46:21 dyoung Exp $");
84
85 #include "bpfilter.h"
86 #include "rnd.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/socket.h>
95 #include <sys/ioctl.h>
96 #include <sys/errno.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/mii_bitbang.h>
122
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
125 #include <dev/pci/pcidevs.h>
126
127 #include <dev/pci/if_sipreg.h>
128
129 /*
130 * Transmit descriptor list size. This is arbitrary, but allocate
131 * enough descriptors for 128 pending transmissions, and 8 segments
132 * per packet (64 for DP83820 for jumbo frames).
133 *
134 * This MUST work out to a power of 2.
135 */
136 #define GSIP_NTXSEGS_ALLOC 16
137 #define SIP_NTXSEGS_ALLOC 8
138
139 #define SIP_TXQUEUELEN 256
140 #define MAX_SIP_NTXDESC \
141 (SIP_TXQUEUELEN * MAX(SIP_NTXSEGS_ALLOC, GSIP_NTXSEGS_ALLOC))
142
143 /*
144 * Receive descriptor list size. We have one Rx buffer per incoming
145 * packet, so this logic is a little simpler.
146 *
147 * Actually, on the DP83820, we allow the packet to consume more than
148 * one buffer, in order to support jumbo Ethernet frames. In that
149 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
150 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
151 * so we'd better be quick about handling receive interrupts.
152 */
153 #define GSIP_NRXDESC 256
154 #define SIP_NRXDESC 128
155
156 #define MAX_SIP_NRXDESC MAX(GSIP_NRXDESC, SIP_NRXDESC)
157
158 /*
159 * Control structures are DMA'd to the SiS900 chip. We allocate them in
160 * a single clump that maps to a single DMA segment to make several things
161 * easier.
162 */
163 struct sip_control_data {
164 /*
165 * The transmit descriptors.
166 */
167 struct sip_desc scd_txdescs[MAX_SIP_NTXDESC];
168
169 /*
170 * The receive descriptors.
171 */
172 struct sip_desc scd_rxdescs[MAX_SIP_NRXDESC];
173 };
174
175 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
176 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
177 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
178
179 /*
180 * Software state for transmit jobs.
181 */
182 struct sip_txsoft {
183 struct mbuf *txs_mbuf; /* head of our mbuf chain */
184 bus_dmamap_t txs_dmamap; /* our DMA map */
185 int txs_firstdesc; /* first descriptor in packet */
186 int txs_lastdesc; /* last descriptor in packet */
187 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
188 };
189
190 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
191
192 /*
193 * Software state for receive jobs.
194 */
195 struct sip_rxsoft {
196 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
197 bus_dmamap_t rxs_dmamap; /* our DMA map */
198 };
199
200 enum sip_attach_stage {
201 SIP_ATTACH_FIN = 0
202 , SIP_ATTACH_CREATE_RXMAP
203 , SIP_ATTACH_CREATE_TXMAP
204 , SIP_ATTACH_LOAD_MAP
205 , SIP_ATTACH_CREATE_MAP
206 , SIP_ATTACH_MAP_MEM
207 , SIP_ATTACH_ALLOC_MEM
208 , SIP_ATTACH_BEGIN
209 };
210
211 /*
212 * Software state per device.
213 */
214 struct sip_softc {
215 struct device sc_dev; /* generic device information */
216 bus_space_tag_t sc_st; /* bus space tag */
217 bus_space_handle_t sc_sh; /* bus space handle */
218 bus_dma_tag_t sc_dmat; /* bus DMA tag */
219 pci_chipset_tag_t sc_pc;
220 bus_dma_segment_t sc_seg;
221 struct ethercom sc_ethercom; /* ethernet common data */
222 void *sc_sdhook; /* shutdown hook */
223
224 const struct sip_product *sc_model; /* which model are we? */
225 int sc_gigabit; /* 1: 83820, 0: other */
226 int sc_rev; /* chip revision */
227
228 void *sc_ih; /* interrupt cookie */
229
230 struct mii_data sc_mii; /* MII/media information */
231
232 callout_t sc_tick_ch; /* tick callout */
233
234 bus_dmamap_t sc_cddmamap; /* control data DMA map */
235 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
236
237 /*
238 * Software state for transmit and receive descriptors.
239 */
240 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
241 struct sip_rxsoft sc_rxsoft[MAX_SIP_NRXDESC];
242
243 /*
244 * Control data structures.
245 */
246 struct sip_control_data *sc_control_data;
247 #define sc_txdescs sc_control_data->scd_txdescs
248 #define sc_rxdescs sc_control_data->scd_rxdescs
249
250 #ifdef SIP_EVENT_COUNTERS
251 /*
252 * Event counters.
253 */
254 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
255 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
256 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
257 struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */
258 struct evcnt sc_ev_txiintr; /* Tx idle interrupts */
259 struct evcnt sc_ev_rxintr; /* Rx interrupts */
260 struct evcnt sc_ev_hiberr; /* HIBERR interrupts */
261 struct evcnt sc_ev_rxpause; /* PAUSE received */
262 /* DP83820 only */
263 struct evcnt sc_ev_txpause; /* PAUSE transmitted */
264 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
265 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
266 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
267 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
268 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
269 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
270 #endif /* SIP_EVENT_COUNTERS */
271
272 u_int32_t sc_txcfg; /* prototype TXCFG register */
273 u_int32_t sc_rxcfg; /* prototype RXCFG register */
274 u_int32_t sc_imr; /* prototype IMR register */
275 u_int32_t sc_rfcr; /* prototype RFCR register */
276
277 u_int32_t sc_cfg; /* prototype CFG register */
278
279 u_int32_t sc_gpior; /* prototype GPIOR register */
280
281 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
282 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
283
284 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
285
286 int sc_flowflags; /* 802.3x flow control flags */
287 int sc_rx_flow_thresh; /* Rx FIFO threshold for flow control */
288 int sc_paused; /* paused indication */
289
290 int sc_txfree; /* number of free Tx descriptors */
291 int sc_txnext; /* next ready Tx descriptor */
292 int sc_txwin; /* Tx descriptors since last intr */
293
294 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
295 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
296
297 /* values of interface state at last init */
298 struct {
299 /* if_capenable */
300 uint64_t if_capenable;
301 /* ec_capenable */
302 int ec_capenable;
303 /* VLAN_ATTACHED */
304 int is_vlan;
305 } sc_prev;
306
307 short sc_if_flags;
308
309 int sc_rxptr; /* next ready Rx descriptor/descsoft */
310 int sc_rxdiscard;
311 int sc_rxlen;
312 struct mbuf *sc_rxhead;
313 struct mbuf *sc_rxtail;
314 struct mbuf **sc_rxtailp;
315
316 int sc_ntxdesc;
317 int sc_ntxdesc_mask;
318
319 int sc_nrxdesc_mask;
320
321 const struct sip_parm {
322 const struct sip_regs {
323 int r_rxcfg;
324 int r_txcfg;
325 } p_regs;
326
327 const struct sip_bits {
328 uint32_t b_txcfg_mxdma_8;
329 uint32_t b_txcfg_mxdma_16;
330 uint32_t b_txcfg_mxdma_32;
331 uint32_t b_txcfg_mxdma_64;
332 uint32_t b_txcfg_mxdma_128;
333 uint32_t b_txcfg_mxdma_256;
334 uint32_t b_txcfg_mxdma_512;
335 uint32_t b_txcfg_flth_mask;
336 uint32_t b_txcfg_drth_mask;
337
338 uint32_t b_rxcfg_mxdma_8;
339 uint32_t b_rxcfg_mxdma_16;
340 uint32_t b_rxcfg_mxdma_32;
341 uint32_t b_rxcfg_mxdma_64;
342 uint32_t b_rxcfg_mxdma_128;
343 uint32_t b_rxcfg_mxdma_256;
344 uint32_t b_rxcfg_mxdma_512;
345
346 uint32_t b_isr_txrcmp;
347 uint32_t b_isr_rxrcmp;
348 uint32_t b_isr_dperr;
349 uint32_t b_isr_sserr;
350 uint32_t b_isr_rmabt;
351 uint32_t b_isr_rtabt;
352
353 uint32_t b_cmdsts_size_mask;
354 } p_bits;
355 int p_filtmem;
356 int p_rxbuf_len;
357 bus_size_t p_tx_dmamap_size;
358 int p_ntxsegs;
359 int p_ntxsegs_alloc;
360 int p_nrxdesc;
361 } *sc_parm;
362
363 void (*sc_rxintr)(struct sip_softc *);
364
365 #if NRND > 0
366 rndsource_element_t rnd_source; /* random source */
367 #endif
368 };
369
370 #define sc_bits sc_parm->p_bits
371 #define sc_regs sc_parm->p_regs
372
373 static const struct sip_parm sip_parm = {
374 .p_filtmem = OTHER_RFCR_NS_RFADDR_FILTMEM
375 , .p_rxbuf_len = MCLBYTES - 1 /* field width */
376 , .p_tx_dmamap_size = MCLBYTES
377 , .p_ntxsegs = 16
378 , .p_ntxsegs_alloc = SIP_NTXSEGS_ALLOC
379 , .p_nrxdesc = SIP_NRXDESC
380 , .p_bits = {
381 .b_txcfg_mxdma_8 = 0x00200000 /* 8 bytes */
382 , .b_txcfg_mxdma_16 = 0x00300000 /* 16 bytes */
383 , .b_txcfg_mxdma_32 = 0x00400000 /* 32 bytes */
384 , .b_txcfg_mxdma_64 = 0x00500000 /* 64 bytes */
385 , .b_txcfg_mxdma_128 = 0x00600000 /* 128 bytes */
386 , .b_txcfg_mxdma_256 = 0x00700000 /* 256 bytes */
387 , .b_txcfg_mxdma_512 = 0x00000000 /* 512 bytes */
388 , .b_txcfg_flth_mask = 0x00003f00 /* Tx fill threshold */
389 , .b_txcfg_drth_mask = 0x0000003f /* Tx drain threshold */
390
391 , .b_rxcfg_mxdma_8 = 0x00200000 /* 8 bytes */
392 , .b_rxcfg_mxdma_16 = 0x00300000 /* 16 bytes */
393 , .b_rxcfg_mxdma_32 = 0x00400000 /* 32 bytes */
394 , .b_rxcfg_mxdma_64 = 0x00500000 /* 64 bytes */
395 , .b_rxcfg_mxdma_128 = 0x00600000 /* 128 bytes */
396 , .b_rxcfg_mxdma_256 = 0x00700000 /* 256 bytes */
397 , .b_rxcfg_mxdma_512 = 0x00000000 /* 512 bytes */
398
399 , .b_isr_txrcmp = 0x02000000 /* transmit reset complete */
400 , .b_isr_rxrcmp = 0x01000000 /* receive reset complete */
401 , .b_isr_dperr = 0x00800000 /* detected parity error */
402 , .b_isr_sserr = 0x00400000 /* signalled system error */
403 , .b_isr_rmabt = 0x00200000 /* received master abort */
404 , .b_isr_rtabt = 0x00100000 /* received target abort */
405 , .b_cmdsts_size_mask = OTHER_CMDSTS_SIZE_MASK
406 }
407 , .p_regs = {
408 .r_rxcfg = OTHER_SIP_RXCFG,
409 .r_txcfg = OTHER_SIP_TXCFG
410 }
411 }, gsip_parm = {
412 .p_filtmem = DP83820_RFCR_NS_RFADDR_FILTMEM
413 , .p_rxbuf_len = MCLBYTES - 8
414 , .p_tx_dmamap_size = ETHER_MAX_LEN_JUMBO
415 , .p_ntxsegs = 64
416 , .p_ntxsegs_alloc = GSIP_NTXSEGS_ALLOC
417 , .p_nrxdesc = GSIP_NRXDESC
418 , .p_bits = {
419 .b_txcfg_mxdma_8 = 0x00100000 /* 8 bytes */
420 , .b_txcfg_mxdma_16 = 0x00200000 /* 16 bytes */
421 , .b_txcfg_mxdma_32 = 0x00300000 /* 32 bytes */
422 , .b_txcfg_mxdma_64 = 0x00400000 /* 64 bytes */
423 , .b_txcfg_mxdma_128 = 0x00500000 /* 128 bytes */
424 , .b_txcfg_mxdma_256 = 0x00600000 /* 256 bytes */
425 , .b_txcfg_mxdma_512 = 0x00700000 /* 512 bytes */
426 , .b_txcfg_flth_mask = 0x0000ff00 /* Fx fill threshold */
427 , .b_txcfg_drth_mask = 0x000000ff /* Tx drain threshold */
428
429 , .b_rxcfg_mxdma_8 = 0x00100000 /* 8 bytes */
430 , .b_rxcfg_mxdma_16 = 0x00200000 /* 16 bytes */
431 , .b_rxcfg_mxdma_32 = 0x00300000 /* 32 bytes */
432 , .b_rxcfg_mxdma_64 = 0x00400000 /* 64 bytes */
433 , .b_rxcfg_mxdma_128 = 0x00500000 /* 128 bytes */
434 , .b_rxcfg_mxdma_256 = 0x00600000 /* 256 bytes */
435 , .b_rxcfg_mxdma_512 = 0x00700000 /* 512 bytes */
436
437 , .b_isr_txrcmp = 0x00400000 /* transmit reset complete */
438 , .b_isr_rxrcmp = 0x00200000 /* receive reset complete */
439 , .b_isr_dperr = 0x00100000 /* detected parity error */
440 , .b_isr_sserr = 0x00080000 /* signalled system error */
441 , .b_isr_rmabt = 0x00040000 /* received master abort */
442 , .b_isr_rtabt = 0x00020000 /* received target abort */
443 , .b_cmdsts_size_mask = DP83820_CMDSTS_SIZE_MASK
444 }
445 , .p_regs = {
446 .r_rxcfg = DP83820_SIP_RXCFG,
447 .r_txcfg = DP83820_SIP_TXCFG
448 }
449 };
450
451 static inline int
452 sip_nexttx(const struct sip_softc *sc, int x)
453 {
454 return (x + 1) & sc->sc_ntxdesc_mask;
455 }
456
457 static inline int
458 sip_nextrx(const struct sip_softc *sc, int x)
459 {
460 return (x + 1) & sc->sc_nrxdesc_mask;
461 }
462
463 /* 83820 only */
464 #define SIP_RXCHAIN_RESET(sc) \
465 do { \
466 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
467 *(sc)->sc_rxtailp = NULL; \
468 (sc)->sc_rxlen = 0; \
469 } while (/*CONSTCOND*/0)
470
471 /* 83820 only */
472 #define SIP_RXCHAIN_LINK(sc, m) \
473 do { \
474 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
475 (sc)->sc_rxtailp = &(m)->m_next; \
476 } while (/*CONSTCOND*/0)
477
478 #ifdef SIP_EVENT_COUNTERS
479 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
480 #else
481 #define SIP_EVCNT_INCR(ev) /* nothing */
482 #endif
483
484 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
485 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
486
487 #define SIP_CDTXSYNC(sc, x, n, ops) \
488 do { \
489 int __x, __n; \
490 \
491 __x = (x); \
492 __n = (n); \
493 \
494 /* If it will wrap around, sync to the end of the ring. */ \
495 if ((__x + __n) > sc->sc_ntxdesc) { \
496 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
497 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
498 (sc->sc_ntxdesc - __x), (ops)); \
499 __n -= (sc->sc_ntxdesc - __x); \
500 __x = 0; \
501 } \
502 \
503 /* Now sync whatever is left. */ \
504 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
505 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
506 } while (0)
507
508 #define SIP_CDRXSYNC(sc, x, ops) \
509 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
510 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
511
512 #if 0
513 #ifdef DP83820
514 u_int32_t sipd_bufptr; /* pointer to DMA segment */
515 u_int32_t sipd_cmdsts; /* command/status word */
516 #else
517 u_int32_t sipd_cmdsts; /* command/status word */
518 u_int32_t sipd_bufptr; /* pointer to DMA segment */
519 #endif /* DP83820 */
520 #endif /* 0 */
521
522 static inline volatile uint32_t *
523 sipd_cmdsts(struct sip_softc *sc, struct sip_desc *sipd)
524 {
525 return &sipd->sipd_cbs[(sc->sc_gigabit) ? 1 : 0];
526 }
527
528 static inline volatile uint32_t *
529 sipd_bufptr(struct sip_softc *sc, struct sip_desc *sipd)
530 {
531 return &sipd->sipd_cbs[(sc->sc_gigabit) ? 0 : 1];
532 }
533
534 static inline void
535 SIP_INIT_RXDESC(struct sip_softc *sc, int x)
536 {
537 struct sip_rxsoft *rxs = &sc->sc_rxsoft[x];
538 struct sip_desc *sipd = &sc->sc_rxdescs[x];
539
540 sipd->sipd_link = htole32(SIP_CDRXADDR(sc, sip_nextrx(sc, x)));
541 *sipd_bufptr(sc, sipd) = htole32(rxs->rxs_dmamap->dm_segs[0].ds_addr);
542 *sipd_cmdsts(sc, sipd) = htole32(CMDSTS_INTR |
543 (sc->sc_parm->p_rxbuf_len & sc->sc_bits.b_cmdsts_size_mask));
544 sipd->sipd_extsts = 0;
545 SIP_CDRXSYNC(sc, x, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
546 }
547
548 #define SIP_CHIP_VERS(sc, v, p, r) \
549 ((sc)->sc_model->sip_vendor == (v) && \
550 (sc)->sc_model->sip_product == (p) && \
551 (sc)->sc_rev == (r))
552
553 #define SIP_CHIP_MODEL(sc, v, p) \
554 ((sc)->sc_model->sip_vendor == (v) && \
555 (sc)->sc_model->sip_product == (p))
556
557 #define SIP_SIS900_REV(sc, rev) \
558 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev))
559
560 #define SIP_TIMEOUT 1000
561
562 static void sipcom_start(struct ifnet *);
563 static void sipcom_watchdog(struct ifnet *);
564 static int sipcom_ioctl(struct ifnet *, u_long, void *);
565 static int sipcom_init(struct ifnet *);
566 static void sipcom_stop(struct ifnet *, int);
567
568 static void sipcom_shutdown(void *);
569
570 static bool sipcom_reset(struct sip_softc *);
571 static void sipcom_rxdrain(struct sip_softc *);
572 static int sipcom_add_rxbuf(struct sip_softc *, int);
573 static void sipcom_read_eeprom(struct sip_softc *, int, int,
574 u_int16_t *);
575 static void sipcom_tick(void *);
576
577 static void sipcom_sis900_set_filter(struct sip_softc *);
578 static void sipcom_dp83815_set_filter(struct sip_softc *);
579
580 static void sipcom_dp83820_read_macaddr(struct sip_softc *,
581 const struct pci_attach_args *, u_int8_t *);
582 static void sipcom_sis900_eeprom_delay(struct sip_softc *sc);
583 static void sipcom_sis900_read_macaddr(struct sip_softc *,
584 const struct pci_attach_args *, u_int8_t *);
585 static void sipcom_dp83815_read_macaddr(struct sip_softc *,
586 const struct pci_attach_args *, u_int8_t *);
587
588 static int sipcom_intr(void *);
589 static void sipcom_txintr(struct sip_softc *);
590 static void sip_rxintr(struct sip_softc *);
591 static void gsip_rxintr(struct sip_softc *);
592
593 static int sipcom_dp83820_mii_readreg(struct device *, int, int);
594 static void sipcom_dp83820_mii_writereg(struct device *, int, int, int);
595 static void sipcom_dp83820_mii_statchg(struct device *);
596
597 static int sipcom_sis900_mii_readreg(struct device *, int, int);
598 static void sipcom_sis900_mii_writereg(struct device *, int, int, int);
599 static void sipcom_sis900_mii_statchg(struct device *);
600
601 static int sipcom_dp83815_mii_readreg(struct device *, int, int);
602 static void sipcom_dp83815_mii_writereg(struct device *, int, int, int);
603 static void sipcom_dp83815_mii_statchg(struct device *);
604
605 static int sipcom_mediachange(struct ifnet *);
606 static void sipcom_mediastatus(struct ifnet *, struct ifmediareq *);
607
608 static int sipcom_match(struct device *, struct cfdata *, void *);
609 static void sipcom_attach(struct device *, struct device *, void *);
610 static void sipcom_do_detach(device_t, enum sip_attach_stage);
611 static int sipcom_detach(device_t, int);
612 static bool sipcom_resume(device_t);
613
614 static int gsip_copy_small = 0; /* XXX make non-static! */
615 static int sip_copy_small = 0; /* XXX make non-static! */
616
617 CFATTACH_DECL(gsip, sizeof(struct sip_softc),
618 sipcom_match, sipcom_attach, sipcom_detach, NULL);
619 CFATTACH_DECL(sip, sizeof(struct sip_softc),
620 sipcom_match, sipcom_attach, sipcom_detach, NULL);
621
622 /*
623 * Descriptions of the variants of the SiS900.
624 */
625 struct sip_variant {
626 int (*sipv_mii_readreg)(struct device *, int, int);
627 void (*sipv_mii_writereg)(struct device *, int, int, int);
628 void (*sipv_mii_statchg)(struct device *);
629 void (*sipv_set_filter)(struct sip_softc *);
630 void (*sipv_read_macaddr)(struct sip_softc *,
631 const struct pci_attach_args *, u_int8_t *);
632 };
633
634 static u_int32_t sipcom_mii_bitbang_read(struct device *);
635 static void sipcom_mii_bitbang_write(struct device *, u_int32_t);
636
637 static const struct mii_bitbang_ops sipcom_mii_bitbang_ops = {
638 sipcom_mii_bitbang_read,
639 sipcom_mii_bitbang_write,
640 {
641 EROMAR_MDIO, /* MII_BIT_MDO */
642 EROMAR_MDIO, /* MII_BIT_MDI */
643 EROMAR_MDC, /* MII_BIT_MDC */
644 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
645 0, /* MII_BIT_DIR_PHY_HOST */
646 }
647 };
648
649 static const struct sip_variant sipcom_variant_dp83820 = {
650 sipcom_dp83820_mii_readreg,
651 sipcom_dp83820_mii_writereg,
652 sipcom_dp83820_mii_statchg,
653 sipcom_dp83815_set_filter,
654 sipcom_dp83820_read_macaddr,
655 };
656
657 static const struct sip_variant sipcom_variant_sis900 = {
658 sipcom_sis900_mii_readreg,
659 sipcom_sis900_mii_writereg,
660 sipcom_sis900_mii_statchg,
661 sipcom_sis900_set_filter,
662 sipcom_sis900_read_macaddr,
663 };
664
665 static const struct sip_variant sipcom_variant_dp83815 = {
666 sipcom_dp83815_mii_readreg,
667 sipcom_dp83815_mii_writereg,
668 sipcom_dp83815_mii_statchg,
669 sipcom_dp83815_set_filter,
670 sipcom_dp83815_read_macaddr,
671 };
672
673
674 /*
675 * Devices supported by this driver.
676 */
677 static const struct sip_product {
678 pci_vendor_id_t sip_vendor;
679 pci_product_id_t sip_product;
680 const char *sip_name;
681 const struct sip_variant *sip_variant;
682 int sip_gigabit;
683 } sipcom_products[] = {
684 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
685 "NatSemi DP83820 Gigabit Ethernet",
686 &sipcom_variant_dp83820, 1 },
687 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
688 "SiS 900 10/100 Ethernet",
689 &sipcom_variant_sis900, 0 },
690 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
691 "SiS 7016 10/100 Ethernet",
692 &sipcom_variant_sis900, 0 },
693
694 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
695 "NatSemi DP83815 10/100 Ethernet",
696 &sipcom_variant_dp83815, 0 },
697
698 { 0, 0,
699 NULL,
700 NULL, 0 },
701 };
702
703 static const struct sip_product *
704 sipcom_lookup(const struct pci_attach_args *pa, bool gigabit)
705 {
706 const struct sip_product *sip;
707
708 for (sip = sipcom_products; sip->sip_name != NULL; sip++) {
709 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
710 PCI_PRODUCT(pa->pa_id) == sip->sip_product &&
711 sip->sip_gigabit == gigabit)
712 return sip;
713 }
714 return NULL;
715 }
716
717 /*
718 * I really hate stupid hardware vendors. There's a bit in the EEPROM
719 * which indicates if the card can do 64-bit data transfers. Unfortunately,
720 * several vendors of 32-bit cards fail to clear this bit in the EEPROM,
721 * which means we try to use 64-bit data transfers on those cards if we
722 * happen to be plugged into a 32-bit slot.
723 *
724 * What we do is use this table of cards known to be 64-bit cards. If
725 * you have a 64-bit card who's subsystem ID is not listed in this table,
726 * send the output of "pcictl dump ..." of the device to me so that your
727 * card will use the 64-bit data path when plugged into a 64-bit slot.
728 *
729 * -- Jason R. Thorpe <thorpej (at) NetBSD.org>
730 * June 30, 2002
731 */
732 static int
733 sipcom_check_64bit(const struct pci_attach_args *pa)
734 {
735 static const struct {
736 pci_vendor_id_t c64_vendor;
737 pci_product_id_t c64_product;
738 } card64[] = {
739 /* Asante GigaNIX */
740 { 0x128a, 0x0002 },
741
742 /* Accton EN1407-T, Planex GN-1000TE */
743 { 0x1113, 0x1407 },
744
745 /* Netgear GA-621 */
746 { 0x1385, 0x621a },
747
748 /* SMC EZ Card */
749 { 0x10b8, 0x9462 },
750
751 { 0, 0}
752 };
753 pcireg_t subsys;
754 int i;
755
756 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
757
758 for (i = 0; card64[i].c64_vendor != 0; i++) {
759 if (PCI_VENDOR(subsys) == card64[i].c64_vendor &&
760 PCI_PRODUCT(subsys) == card64[i].c64_product)
761 return (1);
762 }
763
764 return (0);
765 }
766
767 static int
768 sipcom_match(struct device *parent, struct cfdata *cf, void *aux)
769 {
770 struct pci_attach_args *pa = aux;
771
772 if (sipcom_lookup(pa, strcmp(cf->cf_name, "gsip") == 0) != NULL)
773 return 1;
774
775 return 0;
776 }
777
778 static void
779 sipcom_dp83820_attach(struct sip_softc *sc, struct pci_attach_args *pa)
780 {
781 u_int32_t reg;
782 int i;
783
784 /*
785 * Cause the chip to load configuration data from the EEPROM.
786 */
787 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_PTSCR, PTSCR_EELOAD_EN);
788 for (i = 0; i < 10000; i++) {
789 delay(10);
790 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
791 PTSCR_EELOAD_EN) == 0)
792 break;
793 }
794 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
795 PTSCR_EELOAD_EN) {
796 printf("%s: timeout loading configuration from EEPROM\n",
797 sc->sc_dev.dv_xname);
798 return;
799 }
800
801 sc->sc_gpior = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_GPIOR);
802
803 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
804 if (reg & CFG_PCI64_DET) {
805 printf("%s: 64-bit PCI slot detected", sc->sc_dev.dv_xname);
806 /*
807 * Check to see if this card is 64-bit. If so, enable 64-bit
808 * data transfers.
809 *
810 * We can't use the DATA64_EN bit in the EEPROM, because
811 * vendors of 32-bit cards fail to clear that bit in many
812 * cases (yet the card still detects that it's in a 64-bit
813 * slot; go figure).
814 */
815 if (sipcom_check_64bit(pa)) {
816 sc->sc_cfg |= CFG_DATA64_EN;
817 printf(", using 64-bit data transfers");
818 }
819 printf("\n");
820 }
821
822 /*
823 * XXX Need some PCI flags indicating support for
824 * XXX 64-bit addressing.
825 */
826 #if 0
827 if (reg & CFG_M64ADDR)
828 sc->sc_cfg |= CFG_M64ADDR;
829 if (reg & CFG_T64ADDR)
830 sc->sc_cfg |= CFG_T64ADDR;
831 #endif
832
833 if (reg & (CFG_TBI_EN|CFG_EXT_125)) {
834 const char *sep = "";
835 printf("%s: using ", sc->sc_dev.dv_xname);
836 if (reg & CFG_EXT_125) {
837 sc->sc_cfg |= CFG_EXT_125;
838 printf("%s125MHz clock", sep);
839 sep = ", ";
840 }
841 if (reg & CFG_TBI_EN) {
842 sc->sc_cfg |= CFG_TBI_EN;
843 printf("%sten-bit interface", sep);
844 sep = ", ";
845 }
846 printf("\n");
847 }
848 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0 ||
849 (reg & CFG_MRM_DIS) != 0)
850 sc->sc_cfg |= CFG_MRM_DIS;
851 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0 ||
852 (reg & CFG_MWI_DIS) != 0)
853 sc->sc_cfg |= CFG_MWI_DIS;
854
855 /*
856 * Use the extended descriptor format on the DP83820. This
857 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
858 * checksumming.
859 */
860 sc->sc_cfg |= CFG_EXTSTS_EN;
861 }
862
863 static int
864 sipcom_detach(device_t self, int flags)
865 {
866 sipcom_do_detach(self, SIP_ATTACH_FIN);
867 return 0;
868 }
869
870 static void
871 sipcom_do_detach(device_t self, enum sip_attach_stage stage)
872 {
873 int i;
874 struct sip_softc *sc = device_private(self);
875 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
876
877 /*
878 * Free any resources we've allocated during attach.
879 * Do this in reverse order and fall through.
880 */
881 switch (stage) {
882 case SIP_ATTACH_FIN:
883 sipcom_stop(ifp, 1);
884 pmf_device_deregister(self);
885 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
886 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
887
888 if (sc->sc_sdhook != NULL)
889 shutdownhook_disestablish(sc->sc_sdhook);
890
891 /*FALLTHROUGH*/
892 case SIP_ATTACH_CREATE_RXMAP:
893 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) {
894 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
895 bus_dmamap_destroy(sc->sc_dmat,
896 sc->sc_rxsoft[i].rxs_dmamap);
897 }
898 /*FALLTHROUGH*/
899 case SIP_ATTACH_CREATE_TXMAP:
900 for (i = 0; i < SIP_TXQUEUELEN; i++) {
901 if (sc->sc_txsoft[i].txs_dmamap != NULL)
902 bus_dmamap_destroy(sc->sc_dmat,
903 sc->sc_txsoft[i].txs_dmamap);
904 }
905 /*FALLTHROUGH*/
906 case SIP_ATTACH_LOAD_MAP:
907 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
908 /*FALLTHROUGH*/
909 case SIP_ATTACH_CREATE_MAP:
910 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
911 /*FALLTHROUGH*/
912 case SIP_ATTACH_MAP_MEM:
913 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
914 sizeof(struct sip_control_data));
915 /*FALLTHROUGH*/
916 case SIP_ATTACH_ALLOC_MEM:
917 bus_dmamem_free(sc->sc_dmat, &sc->sc_seg, 1);
918 break;
919 default:
920 break;
921 }
922 return;
923 }
924
925 static bool
926 sipcom_resume(device_t self)
927 {
928 struct sip_softc *sc = device_private(self);
929
930 return sipcom_reset(sc);
931 }
932
933 static void
934 sipcom_attach(device_t parent, device_t self, void *aux)
935 {
936 struct sip_softc *sc = (struct sip_softc *) self;
937 struct pci_attach_args *pa = aux;
938 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
939 pci_chipset_tag_t pc = pa->pa_pc;
940 pci_intr_handle_t ih;
941 const char *intrstr = NULL;
942 bus_space_tag_t iot, memt;
943 bus_space_handle_t ioh, memh;
944 int ioh_valid, memh_valid;
945 int i, rseg, error;
946 const struct sip_product *sip;
947 u_int8_t enaddr[ETHER_ADDR_LEN];
948 pcireg_t pmreg;
949 pcireg_t memtype;
950 bus_size_t tx_dmamap_size;
951 int ntxsegs_alloc;
952 cfdata_t cf = device_cfdata(self);
953
954 callout_init(&sc->sc_tick_ch, 0);
955
956 sip = sipcom_lookup(pa, strcmp(cf->cf_name, "gsip") == 0);
957 if (sip == NULL) {
958 printf("\n");
959 panic("%s: impossible", __func__);
960 }
961 sc->sc_gigabit = sip->sip_gigabit;
962
963 sc->sc_pc = pc;
964
965 if (sc->sc_gigabit) {
966 sc->sc_rxintr = gsip_rxintr;
967 sc->sc_parm = &gsip_parm;
968 } else {
969 sc->sc_rxintr = sip_rxintr;
970 sc->sc_parm = &sip_parm;
971 }
972 tx_dmamap_size = sc->sc_parm->p_tx_dmamap_size;
973 ntxsegs_alloc = sc->sc_parm->p_ntxsegs_alloc;
974 sc->sc_ntxdesc = SIP_TXQUEUELEN * ntxsegs_alloc;
975 sc->sc_ntxdesc_mask = sc->sc_ntxdesc - 1;
976 sc->sc_nrxdesc_mask = sc->sc_parm->p_nrxdesc - 1;
977
978 sc->sc_rev = PCI_REVISION(pa->pa_class);
979
980 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev);
981
982 sc->sc_model = sip;
983
984 /*
985 * XXX Work-around broken PXE firmware on some boards.
986 *
987 * The DP83815 shares an address decoder with the MEM BAR
988 * and the ROM BAR. Make sure the ROM BAR is disabled,
989 * so that memory mapped access works.
990 */
991 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM,
992 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) &
993 ~PCI_MAPREG_ROM_ENABLE);
994
995 /*
996 * Map the device.
997 */
998 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
999 PCI_MAPREG_TYPE_IO, 0,
1000 &iot, &ioh, NULL, NULL) == 0);
1001 if (sc->sc_gigabit) {
1002 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
1003 switch (memtype) {
1004 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1005 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1006 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
1007 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1008 break;
1009 default:
1010 memh_valid = 0;
1011 }
1012 } else {
1013 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
1014 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
1015 &memt, &memh, NULL, NULL) == 0);
1016 }
1017
1018 if (memh_valid) {
1019 sc->sc_st = memt;
1020 sc->sc_sh = memh;
1021 } else if (ioh_valid) {
1022 sc->sc_st = iot;
1023 sc->sc_sh = ioh;
1024 } else {
1025 printf("%s: unable to map device registers\n",
1026 sc->sc_dev.dv_xname);
1027 return;
1028 }
1029
1030 sc->sc_dmat = pa->pa_dmat;
1031
1032 /*
1033 * Make sure bus mastering is enabled. Also make sure
1034 * Write/Invalidate is enabled if we're allowed to use it.
1035 */
1036 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1037 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY)
1038 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE;
1039 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1040 pmreg | PCI_COMMAND_MASTER_ENABLE);
1041
1042 /* power up chip */
1043 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
1044 NULL)) && error != EOPNOTSUPP) {
1045 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
1046 error);
1047 return;
1048 }
1049
1050 /*
1051 * Map and establish our interrupt.
1052 */
1053 if (pci_intr_map(pa, &ih)) {
1054 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
1055 return;
1056 }
1057 intrstr = pci_intr_string(pc, ih);
1058 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sipcom_intr, sc);
1059 if (sc->sc_ih == NULL) {
1060 printf("%s: unable to establish interrupt",
1061 sc->sc_dev.dv_xname);
1062 if (intrstr != NULL)
1063 printf(" at %s", intrstr);
1064 printf("\n");
1065 return;
1066 }
1067 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
1068
1069 SIMPLEQ_INIT(&sc->sc_txfreeq);
1070 SIMPLEQ_INIT(&sc->sc_txdirtyq);
1071
1072 /*
1073 * Allocate the control data structures, and create and load the
1074 * DMA map for it.
1075 */
1076 if ((error = bus_dmamem_alloc(sc->sc_dmat,
1077 sizeof(struct sip_control_data), PAGE_SIZE, 0, &sc->sc_seg, 1,
1078 &rseg, 0)) != 0) {
1079 printf("%s: unable to allocate control data, error = %d\n",
1080 sc->sc_dev.dv_xname, error);
1081 return sipcom_do_detach(self, -1);
1082 }
1083
1084 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_seg, rseg,
1085 sizeof(struct sip_control_data), (void **)&sc->sc_control_data,
1086 BUS_DMA_COHERENT|BUS_DMA_NOCACHE)) != 0) {
1087 printf("%s: unable to map control data, error = %d\n",
1088 sc->sc_dev.dv_xname, error);
1089 sipcom_do_detach(self, SIP_ATTACH_ALLOC_MEM);
1090 }
1091
1092 if ((error = bus_dmamap_create(sc->sc_dmat,
1093 sizeof(struct sip_control_data), 1,
1094 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
1095 printf("%s: unable to create control data DMA map, "
1096 "error = %d\n", sc->sc_dev.dv_xname, error);
1097 sipcom_do_detach(self, SIP_ATTACH_MAP_MEM);
1098 }
1099
1100 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1101 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
1102 0)) != 0) {
1103 printf("%s: unable to load control data DMA map, error = %d\n",
1104 sc->sc_dev.dv_xname, error);
1105 sipcom_do_detach(self, SIP_ATTACH_CREATE_MAP);
1106 }
1107
1108 /*
1109 * Create the transmit buffer DMA maps.
1110 */
1111 for (i = 0; i < SIP_TXQUEUELEN; i++) {
1112 if ((error = bus_dmamap_create(sc->sc_dmat, tx_dmamap_size,
1113 sc->sc_parm->p_ntxsegs, MCLBYTES, 0, 0,
1114 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1115 printf("%s: unable to create tx DMA map %d, "
1116 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1117 sipcom_do_detach(self, SIP_ATTACH_CREATE_TXMAP);
1118 }
1119 }
1120
1121 /*
1122 * Create the receive buffer DMA maps.
1123 */
1124 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) {
1125 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1126 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1127 printf("%s: unable to create rx DMA map %d, "
1128 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1129 sipcom_do_detach(self, SIP_ATTACH_CREATE_RXMAP);
1130 }
1131 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1132 }
1133
1134 /*
1135 * Reset the chip to a known state.
1136 */
1137 sipcom_reset(sc);
1138
1139 /*
1140 * Read the Ethernet address from the EEPROM. This might
1141 * also fetch other stuff from the EEPROM and stash it
1142 * in the softc.
1143 */
1144 sc->sc_cfg = 0;
1145 if (!sc->sc_gigabit) {
1146 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
1147 SIP_SIS900_REV(sc,SIS_REV_900B))
1148 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT);
1149
1150 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
1151 SIP_SIS900_REV(sc,SIS_REV_960) ||
1152 SIP_SIS900_REV(sc,SIS_REV_900B))
1153 sc->sc_cfg |=
1154 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) &
1155 CFG_EDBMASTEN);
1156 }
1157
1158 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
1159
1160 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1161 ether_sprintf(enaddr));
1162
1163 /*
1164 * Initialize the configuration register: aggressive PCI
1165 * bus request algorithm, default backoff, default OW timer,
1166 * default parity error detection.
1167 *
1168 * NOTE: "Big endian mode" is useless on the SiS900 and
1169 * friends -- it affects packet data, not descriptors.
1170 */
1171 if (sc->sc_gigabit)
1172 sipcom_dp83820_attach(sc, pa);
1173
1174 /*
1175 * Initialize our media structures and probe the MII.
1176 */
1177 sc->sc_mii.mii_ifp = ifp;
1178 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
1179 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
1180 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
1181 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sipcom_mediachange,
1182 sipcom_mediastatus);
1183
1184 /*
1185 * XXX We cannot handle flow control on the DP83815.
1186 */
1187 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815))
1188 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1189 MII_OFFSET_ANY, 0);
1190 else
1191 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1192 MII_OFFSET_ANY, MIIF_DOPAUSE);
1193 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1194 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1195 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1196 } else
1197 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1198
1199 ifp = &sc->sc_ethercom.ec_if;
1200 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1201 ifp->if_softc = sc;
1202 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1203 sc->sc_if_flags = ifp->if_flags;
1204 ifp->if_ioctl = sipcom_ioctl;
1205 ifp->if_start = sipcom_start;
1206 ifp->if_watchdog = sipcom_watchdog;
1207 ifp->if_init = sipcom_init;
1208 ifp->if_stop = sipcom_stop;
1209 IFQ_SET_READY(&ifp->if_snd);
1210
1211 /*
1212 * We can support 802.1Q VLAN-sized frames.
1213 */
1214 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
1215
1216 if (sc->sc_gigabit) {
1217 /*
1218 * And the DP83820 can do VLAN tagging in hardware, and
1219 * support the jumbo Ethernet MTU.
1220 */
1221 sc->sc_ethercom.ec_capabilities |=
1222 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
1223
1224 /*
1225 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
1226 * in hardware.
1227 */
1228 ifp->if_capabilities |=
1229 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1230 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1231 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1232 }
1233
1234 /*
1235 * Attach the interface.
1236 */
1237 if_attach(ifp);
1238 ether_ifattach(ifp, enaddr);
1239 sc->sc_prev.ec_capenable = sc->sc_ethercom.ec_capenable;
1240 sc->sc_prev.is_vlan = VLAN_ATTACHED(&(sc)->sc_ethercom);
1241 sc->sc_prev.if_capenable = ifp->if_capenable;
1242 #if NRND > 0
1243 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1244 RND_TYPE_NET, 0);
1245 #endif
1246
1247 /*
1248 * The number of bytes that must be available in
1249 * the Tx FIFO before the bus master can DMA more
1250 * data into the FIFO.
1251 */
1252 sc->sc_tx_fill_thresh = 64 / 32;
1253
1254 /*
1255 * Start at a drain threshold of 512 bytes. We will
1256 * increase it if a DMA underrun occurs.
1257 *
1258 * XXX The minimum value of this variable should be
1259 * tuned. We may be able to improve performance
1260 * by starting with a lower value. That, however,
1261 * may trash the first few outgoing packets if the
1262 * PCI bus is saturated.
1263 */
1264 if (sc->sc_gigabit)
1265 sc->sc_tx_drain_thresh = 6400 / 32; /* from FreeBSD nge(4) */
1266 else
1267 sc->sc_tx_drain_thresh = 1504 / 32;
1268
1269 /*
1270 * Initialize the Rx FIFO drain threshold.
1271 *
1272 * This is in units of 8 bytes.
1273 *
1274 * We should never set this value lower than 2; 14 bytes are
1275 * required to filter the packet.
1276 */
1277 sc->sc_rx_drain_thresh = 128 / 8;
1278
1279 #ifdef SIP_EVENT_COUNTERS
1280 /*
1281 * Attach event counters.
1282 */
1283 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1284 NULL, sc->sc_dev.dv_xname, "txsstall");
1285 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1286 NULL, sc->sc_dev.dv_xname, "txdstall");
1287 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR,
1288 NULL, sc->sc_dev.dv_xname, "txforceintr");
1289 evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR,
1290 NULL, sc->sc_dev.dv_xname, "txdintr");
1291 evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR,
1292 NULL, sc->sc_dev.dv_xname, "txiintr");
1293 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1294 NULL, sc->sc_dev.dv_xname, "rxintr");
1295 evcnt_attach_dynamic(&sc->sc_ev_hiberr, EVCNT_TYPE_INTR,
1296 NULL, sc->sc_dev.dv_xname, "hiberr");
1297 if (!sc->sc_gigabit) {
1298 evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_INTR,
1299 NULL, sc->sc_dev.dv_xname, "rxpause");
1300 } else {
1301 evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_MISC,
1302 NULL, sc->sc_dev.dv_xname, "rxpause");
1303 evcnt_attach_dynamic(&sc->sc_ev_txpause, EVCNT_TYPE_MISC,
1304 NULL, sc->sc_dev.dv_xname, "txpause");
1305 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1306 NULL, sc->sc_dev.dv_xname, "rxipsum");
1307 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
1308 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
1309 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
1310 NULL, sc->sc_dev.dv_xname, "rxudpsum");
1311 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1312 NULL, sc->sc_dev.dv_xname, "txipsum");
1313 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
1314 NULL, sc->sc_dev.dv_xname, "txtcpsum");
1315 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
1316 NULL, sc->sc_dev.dv_xname, "txudpsum");
1317 }
1318 #endif /* SIP_EVENT_COUNTERS */
1319
1320 if (!pmf_device_register(self, NULL, sipcom_resume))
1321 aprint_error_dev(self, "couldn't establish power handler\n");
1322 else
1323 pmf_class_network_register(self, ifp);
1324
1325 /*
1326 * Make sure the interface is shutdown during reboot.
1327 */
1328 sc->sc_sdhook = shutdownhook_establish(sipcom_shutdown, sc);
1329 if (sc->sc_sdhook == NULL)
1330 printf("%s: WARNING: unable to establish shutdown hook\n",
1331 sc->sc_dev.dv_xname);
1332 }
1333
1334 /*
1335 * sip_shutdown:
1336 *
1337 * Make sure the interface is stopped at reboot time.
1338 */
1339 static void
1340 sipcom_shutdown(void *arg)
1341 {
1342 struct sip_softc *sc = arg;
1343
1344 sipcom_stop(&sc->sc_ethercom.ec_if, 1);
1345 }
1346
1347 static inline void
1348 sipcom_set_extsts(struct sip_softc *sc, int lasttx, struct mbuf *m0,
1349 uint64_t capenable)
1350 {
1351 struct m_tag *mtag;
1352 u_int32_t extsts;
1353 #ifdef DEBUG
1354 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1355 #endif
1356 /*
1357 * If VLANs are enabled and the packet has a VLAN tag, set
1358 * up the descriptor to encapsulate the packet for us.
1359 *
1360 * This apparently has to be on the last descriptor of
1361 * the packet.
1362 */
1363
1364 /*
1365 * Byte swapping is tricky. We need to provide the tag
1366 * in a network byte order. On a big-endian machine,
1367 * the byteorder is correct, but we need to swap it
1368 * anyway, because this will be undone by the outside
1369 * htole32(). That's why there must be an
1370 * unconditional swap instead of htons() inside.
1371 */
1372 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
1373 sc->sc_txdescs[lasttx].sipd_extsts |=
1374 htole32(EXTSTS_VPKT |
1375 (bswap16(VLAN_TAG_VALUE(mtag)) &
1376 EXTSTS_VTCI));
1377 }
1378
1379 /*
1380 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1381 * checksumming, set up the descriptor to do this work
1382 * for us.
1383 *
1384 * This apparently has to be on the first descriptor of
1385 * the packet.
1386 *
1387 * Byte-swap constants so the compiler can optimize.
1388 */
1389 extsts = 0;
1390 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1391 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4_Tx);
1392 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1393 extsts |= htole32(EXTSTS_IPPKT);
1394 }
1395 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1396 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx);
1397 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1398 extsts |= htole32(EXTSTS_TCPPKT);
1399 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1400 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx);
1401 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1402 extsts |= htole32(EXTSTS_UDPPKT);
1403 }
1404 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1405 }
1406
1407 /*
1408 * sip_start: [ifnet interface function]
1409 *
1410 * Start packet transmission on the interface.
1411 */
1412 static void
1413 sipcom_start(struct ifnet *ifp)
1414 {
1415 struct sip_softc *sc = ifp->if_softc;
1416 struct mbuf *m0;
1417 struct mbuf *m;
1418 struct sip_txsoft *txs;
1419 bus_dmamap_t dmamap;
1420 int error, nexttx, lasttx, seg;
1421 int ofree = sc->sc_txfree;
1422 #if 0
1423 int firsttx = sc->sc_txnext;
1424 #endif
1425
1426 /*
1427 * If we've been told to pause, don't transmit any more packets.
1428 */
1429 if (!sc->sc_gigabit && sc->sc_paused)
1430 ifp->if_flags |= IFF_OACTIVE;
1431
1432 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1433 return;
1434
1435 /*
1436 * Loop through the send queue, setting up transmit descriptors
1437 * until we drain the queue, or use up all available transmit
1438 * descriptors.
1439 */
1440 for (;;) {
1441 /* Get a work queue entry. */
1442 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1443 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
1444 break;
1445 }
1446
1447 /*
1448 * Grab a packet off the queue.
1449 */
1450 IFQ_POLL(&ifp->if_snd, m0);
1451 if (m0 == NULL)
1452 break;
1453 m = NULL;
1454
1455 dmamap = txs->txs_dmamap;
1456
1457 /*
1458 * Load the DMA map. If this fails, the packet either
1459 * didn't fit in the alloted number of segments, or we
1460 * were short on resources.
1461 */
1462 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1463 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1464 /* In the non-gigabit case, we'll copy and try again. */
1465 if (error != 0 && !sc->sc_gigabit) {
1466 MGETHDR(m, M_DONTWAIT, MT_DATA);
1467 if (m == NULL) {
1468 printf("%s: unable to allocate Tx mbuf\n",
1469 sc->sc_dev.dv_xname);
1470 break;
1471 }
1472 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1473 if (m0->m_pkthdr.len > MHLEN) {
1474 MCLGET(m, M_DONTWAIT);
1475 if ((m->m_flags & M_EXT) == 0) {
1476 printf("%s: unable to allocate Tx "
1477 "cluster\n", sc->sc_dev.dv_xname);
1478 m_freem(m);
1479 break;
1480 }
1481 }
1482 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
1483 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1484 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1485 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1486 if (error) {
1487 printf("%s: unable to load Tx buffer, "
1488 "error = %d\n", sc->sc_dev.dv_xname, error);
1489 break;
1490 }
1491 } else if (error == EFBIG) {
1492 /*
1493 * For the too-many-segments case, we simply
1494 * report an error and drop the packet,
1495 * since we can't sanely copy a jumbo packet
1496 * to a single buffer.
1497 */
1498 printf("%s: Tx packet consumes too many "
1499 "DMA segments, dropping...\n", sc->sc_dev.dv_xname);
1500 IFQ_DEQUEUE(&ifp->if_snd, m0);
1501 m_freem(m0);
1502 continue;
1503 } else if (error != 0) {
1504 /*
1505 * Short on resources, just stop for now.
1506 */
1507 break;
1508 }
1509
1510 /*
1511 * Ensure we have enough descriptors free to describe
1512 * the packet. Note, we always reserve one descriptor
1513 * at the end of the ring as a termination point, to
1514 * prevent wrap-around.
1515 */
1516 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1517 /*
1518 * Not enough free descriptors to transmit this
1519 * packet. We haven't committed anything yet,
1520 * so just unload the DMA map, put the packet
1521 * back on the queue, and punt. Notify the upper
1522 * layer that there are not more slots left.
1523 *
1524 * XXX We could allocate an mbuf and copy, but
1525 * XXX is it worth it?
1526 */
1527 ifp->if_flags |= IFF_OACTIVE;
1528 bus_dmamap_unload(sc->sc_dmat, dmamap);
1529 if (m != NULL)
1530 m_freem(m);
1531 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1532 break;
1533 }
1534
1535 IFQ_DEQUEUE(&ifp->if_snd, m0);
1536 if (m != NULL) {
1537 m_freem(m0);
1538 m0 = m;
1539 }
1540
1541 /*
1542 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1543 */
1544
1545 /* Sync the DMA map. */
1546 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1547 BUS_DMASYNC_PREWRITE);
1548
1549 /*
1550 * Initialize the transmit descriptors.
1551 */
1552 for (nexttx = lasttx = sc->sc_txnext, seg = 0;
1553 seg < dmamap->dm_nsegs;
1554 seg++, nexttx = sip_nexttx(sc, nexttx)) {
1555 /*
1556 * If this is the first descriptor we're
1557 * enqueueing, don't set the OWN bit just
1558 * yet. That could cause a race condition.
1559 * We'll do it below.
1560 */
1561 *sipd_bufptr(sc, &sc->sc_txdescs[nexttx]) =
1562 htole32(dmamap->dm_segs[seg].ds_addr);
1563 *sipd_cmdsts(sc, &sc->sc_txdescs[nexttx]) =
1564 htole32((nexttx == sc->sc_txnext ? 0 : CMDSTS_OWN) |
1565 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1566 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1567 lasttx = nexttx;
1568 }
1569
1570 /* Clear the MORE bit on the last segment. */
1571 *sipd_cmdsts(sc, &sc->sc_txdescs[lasttx]) &=
1572 htole32(~CMDSTS_MORE);
1573
1574 /*
1575 * If we're in the interrupt delay window, delay the
1576 * interrupt.
1577 */
1578 if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) {
1579 SIP_EVCNT_INCR(&sc->sc_ev_txforceintr);
1580 *sipd_cmdsts(sc, &sc->sc_txdescs[lasttx]) |=
1581 htole32(CMDSTS_INTR);
1582 sc->sc_txwin = 0;
1583 }
1584
1585 if (sc->sc_gigabit)
1586 sipcom_set_extsts(sc, lasttx, m0, ifp->if_capenable);
1587
1588 /* Sync the descriptors we're using. */
1589 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1590 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1591
1592 /*
1593 * The entire packet is set up. Give the first descrptor
1594 * to the chip now.
1595 */
1596 *sipd_cmdsts(sc, &sc->sc_txdescs[sc->sc_txnext]) |=
1597 htole32(CMDSTS_OWN);
1598 SIP_CDTXSYNC(sc, sc->sc_txnext, 1,
1599 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1600
1601 /*
1602 * Store a pointer to the packet so we can free it later,
1603 * and remember what txdirty will be once the packet is
1604 * done.
1605 */
1606 txs->txs_mbuf = m0;
1607 txs->txs_firstdesc = sc->sc_txnext;
1608 txs->txs_lastdesc = lasttx;
1609
1610 /* Advance the tx pointer. */
1611 sc->sc_txfree -= dmamap->dm_nsegs;
1612 sc->sc_txnext = nexttx;
1613
1614 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1615 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1616
1617 #if NBPFILTER > 0
1618 /*
1619 * Pass the packet to any BPF listeners.
1620 */
1621 if (ifp->if_bpf)
1622 bpf_mtap(ifp->if_bpf, m0);
1623 #endif /* NBPFILTER > 0 */
1624 }
1625
1626 if (txs == NULL || sc->sc_txfree == 0) {
1627 /* No more slots left; notify upper layer. */
1628 ifp->if_flags |= IFF_OACTIVE;
1629 }
1630
1631 if (sc->sc_txfree != ofree) {
1632 /*
1633 * Start the transmit process. Note, the manual says
1634 * that if there are no pending transmissions in the
1635 * chip's internal queue (indicated by TXE being clear),
1636 * then the driver software must set the TXDP to the
1637 * first descriptor to be transmitted. However, if we
1638 * do this, it causes serious performance degredation on
1639 * the DP83820 under load, not setting TXDP doesn't seem
1640 * to adversely affect the SiS 900 or DP83815.
1641 *
1642 * Well, I guess it wouldn't be the first time a manual
1643 * has lied -- and they could be speaking of the NULL-
1644 * terminated descriptor list case, rather than OWN-
1645 * terminated rings.
1646 */
1647 #if 0
1648 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1649 CR_TXE) == 0) {
1650 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1651 SIP_CDTXADDR(sc, firsttx));
1652 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1653 }
1654 #else
1655 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1656 #endif
1657
1658 /* Set a watchdog timer in case the chip flakes out. */
1659 /* Gigabit autonegotiation takes 5 seconds. */
1660 ifp->if_timer = (sc->sc_gigabit) ? 10 : 5;
1661 }
1662 }
1663
1664 /*
1665 * sip_watchdog: [ifnet interface function]
1666 *
1667 * Watchdog timer handler.
1668 */
1669 static void
1670 sipcom_watchdog(struct ifnet *ifp)
1671 {
1672 struct sip_softc *sc = ifp->if_softc;
1673
1674 /*
1675 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1676 * If we get a timeout, try and sweep up transmit descriptors.
1677 * If we manage to sweep them all up, ignore the lack of
1678 * interrupt.
1679 */
1680 sipcom_txintr(sc);
1681
1682 if (sc->sc_txfree != sc->sc_ntxdesc) {
1683 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1684 ifp->if_oerrors++;
1685
1686 /* Reset the interface. */
1687 (void) sipcom_init(ifp);
1688 } else if (ifp->if_flags & IFF_DEBUG)
1689 printf("%s: recovered from device timeout\n",
1690 sc->sc_dev.dv_xname);
1691
1692 /* Try to get more packets going. */
1693 sipcom_start(ifp);
1694 }
1695
1696 /*
1697 * sip_ioctl: [ifnet interface function]
1698 *
1699 * Handle control requests from the operator.
1700 */
1701 static int
1702 sipcom_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1703 {
1704 struct sip_softc *sc = ifp->if_softc;
1705 struct ifreq *ifr = (struct ifreq *)data;
1706 int s, error;
1707
1708 s = splnet();
1709
1710 switch (cmd) {
1711 case SIOCSIFMEDIA:
1712 /* Flow control requires full-duplex mode. */
1713 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1714 (ifr->ifr_media & IFM_FDX) == 0)
1715 ifr->ifr_media &= ~IFM_ETH_FMASK;
1716
1717 /* XXX */
1718 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815))
1719 ifr->ifr_media &= ~IFM_ETH_FMASK;
1720 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1721 if (sc->sc_gigabit &&
1722 (ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1723 /* We can do both TXPAUSE and RXPAUSE. */
1724 ifr->ifr_media |=
1725 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1726 } else if (ifr->ifr_media & IFM_FLOW) {
1727 /*
1728 * Both TXPAUSE and RXPAUSE must be set.
1729 * (SiS900 and DP83815 don't have PAUSE_ASYM
1730 * feature.)
1731 *
1732 * XXX Can SiS900 and DP83815 send PAUSE?
1733 */
1734 ifr->ifr_media |=
1735 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1736 }
1737 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1738 }
1739 /* FALLTHROUGH */
1740 case SIOCGIFMEDIA:
1741 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1742 break;
1743 case SIOCSIFFLAGS:
1744 /* If the interface is up and running, only modify the receive
1745 * filter when setting promiscuous or debug mode. Otherwise
1746 * fall through to ether_ioctl, which will reset the chip.
1747 */
1748
1749 #define COMPARE_EC(sc) (((sc)->sc_prev.ec_capenable \
1750 == (sc)->sc_ethercom.ec_capenable) \
1751 && ((sc)->sc_prev.is_vlan == \
1752 VLAN_ATTACHED(&(sc)->sc_ethercom) ))
1753
1754 #define COMPARE_IC(sc, ifp) ((sc)->sc_prev.if_capenable == (ifp)->if_capenable)
1755
1756 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
1757 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
1758 == (IFF_UP|IFF_RUNNING))
1759 && ((ifp->if_flags & (~RESETIGN))
1760 == (sc->sc_if_flags & (~RESETIGN)))
1761 && COMPARE_EC(sc) && COMPARE_IC(sc, ifp)) {
1762 /* Set up the receive filter. */
1763 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1764 error = 0;
1765 break;
1766 #undef RESETIGN
1767 }
1768 /* FALLTHROUGH */
1769 default:
1770 error = ether_ioctl(ifp, cmd, data);
1771 if (error == ENETRESET) {
1772 /*
1773 * Multicast list has changed; set the hardware filter
1774 * accordingly.
1775 */
1776 if (ifp->if_flags & IFF_RUNNING)
1777 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1778 error = 0;
1779 }
1780 break;
1781 }
1782
1783 /* Try to get more packets going. */
1784 sipcom_start(ifp);
1785
1786 sc->sc_if_flags = ifp->if_flags;
1787 splx(s);
1788 return (error);
1789 }
1790
1791 /*
1792 * sip_intr:
1793 *
1794 * Interrupt service routine.
1795 */
1796 static int
1797 sipcom_intr(void *arg)
1798 {
1799 struct sip_softc *sc = arg;
1800 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1801 u_int32_t isr;
1802 int handled = 0;
1803
1804 /* Disable interrupts. */
1805 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, 0);
1806
1807 for (;;) {
1808 /* Reading clears interrupt. */
1809 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1810 if ((isr & sc->sc_imr) == 0)
1811 break;
1812
1813 #if NRND > 0
1814 if (RND_ENABLED(&sc->rnd_source))
1815 rnd_add_uint32(&sc->rnd_source, isr);
1816 #endif
1817
1818 handled = 1;
1819
1820 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1821 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1822
1823 /* Grab any new packets. */
1824 (*sc->sc_rxintr)(sc);
1825
1826 if (isr & ISR_RXORN) {
1827 printf("%s: receive FIFO overrun\n",
1828 sc->sc_dev.dv_xname);
1829
1830 /* XXX adjust rx_drain_thresh? */
1831 }
1832
1833 if (isr & ISR_RXIDLE) {
1834 printf("%s: receive ring overrun\n",
1835 sc->sc_dev.dv_xname);
1836
1837 /* Get the receive process going again. */
1838 bus_space_write_4(sc->sc_st, sc->sc_sh,
1839 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1840 bus_space_write_4(sc->sc_st, sc->sc_sh,
1841 SIP_CR, CR_RXE);
1842 }
1843 }
1844
1845 if (isr & (ISR_TXURN|ISR_TXDESC|ISR_TXIDLE)) {
1846 #ifdef SIP_EVENT_COUNTERS
1847 if (isr & ISR_TXDESC)
1848 SIP_EVCNT_INCR(&sc->sc_ev_txdintr);
1849 else if (isr & ISR_TXIDLE)
1850 SIP_EVCNT_INCR(&sc->sc_ev_txiintr);
1851 #endif
1852
1853 /* Sweep up transmit descriptors. */
1854 sipcom_txintr(sc);
1855
1856 if (isr & ISR_TXURN) {
1857 u_int32_t thresh;
1858 int txfifo_size = (sc->sc_gigabit)
1859 ? DP83820_SIP_TXFIFO_SIZE
1860 : OTHER_SIP_TXFIFO_SIZE;
1861
1862 printf("%s: transmit FIFO underrun",
1863 sc->sc_dev.dv_xname);
1864 thresh = sc->sc_tx_drain_thresh + 1;
1865 if (thresh <= __SHIFTOUT_MASK(sc->sc_bits.b_txcfg_drth_mask)
1866 && (thresh * 32) <= (txfifo_size -
1867 (sc->sc_tx_fill_thresh * 32))) {
1868 printf("; increasing Tx drain "
1869 "threshold to %u bytes\n",
1870 thresh * 32);
1871 sc->sc_tx_drain_thresh = thresh;
1872 (void) sipcom_init(ifp);
1873 } else {
1874 (void) sipcom_init(ifp);
1875 printf("\n");
1876 }
1877 }
1878 }
1879
1880 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1881 if (isr & ISR_PAUSE_ST) {
1882 sc->sc_paused = 1;
1883 SIP_EVCNT_INCR(&sc->sc_ev_rxpause);
1884 ifp->if_flags |= IFF_OACTIVE;
1885 }
1886 if (isr & ISR_PAUSE_END) {
1887 sc->sc_paused = 0;
1888 ifp->if_flags &= ~IFF_OACTIVE;
1889 }
1890 }
1891
1892 if (isr & ISR_HIBERR) {
1893 int want_init = 0;
1894
1895 SIP_EVCNT_INCR(&sc->sc_ev_hiberr);
1896
1897 #define PRINTERR(bit, str) \
1898 do { \
1899 if ((isr & (bit)) != 0) { \
1900 if ((ifp->if_flags & IFF_DEBUG) != 0) \
1901 printf("%s: %s\n", \
1902 sc->sc_dev.dv_xname, str); \
1903 want_init = 1; \
1904 } \
1905 } while (/*CONSTCOND*/0)
1906
1907 PRINTERR(sc->sc_bits.b_isr_dperr, "parity error");
1908 PRINTERR(sc->sc_bits.b_isr_sserr, "system error");
1909 PRINTERR(sc->sc_bits.b_isr_rmabt, "master abort");
1910 PRINTERR(sc->sc_bits.b_isr_rtabt, "target abort");
1911 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1912 /*
1913 * Ignore:
1914 * Tx reset complete
1915 * Rx reset complete
1916 */
1917 if (want_init)
1918 (void) sipcom_init(ifp);
1919 #undef PRINTERR
1920 }
1921 }
1922
1923 /* Re-enable interrupts. */
1924 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, IER_IE);
1925
1926 /* Try to get more packets going. */
1927 sipcom_start(ifp);
1928
1929 return (handled);
1930 }
1931
1932 /*
1933 * sip_txintr:
1934 *
1935 * Helper; handle transmit interrupts.
1936 */
1937 static void
1938 sipcom_txintr(struct sip_softc *sc)
1939 {
1940 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1941 struct sip_txsoft *txs;
1942 u_int32_t cmdsts;
1943
1944 if (sc->sc_paused == 0)
1945 ifp->if_flags &= ~IFF_OACTIVE;
1946
1947 /*
1948 * Go through our Tx list and free mbufs for those
1949 * frames which have been transmitted.
1950 */
1951 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1952 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1953 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1954
1955 cmdsts = le32toh(*sipd_cmdsts(sc, &sc->sc_txdescs[txs->txs_lastdesc]));
1956 if (cmdsts & CMDSTS_OWN)
1957 break;
1958
1959 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1960
1961 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1962
1963 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1964 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1965 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1966 m_freem(txs->txs_mbuf);
1967 txs->txs_mbuf = NULL;
1968
1969 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1970
1971 /*
1972 * Check for errors and collisions.
1973 */
1974 if (cmdsts &
1975 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1976 ifp->if_oerrors++;
1977 if (cmdsts & CMDSTS_Tx_EC)
1978 ifp->if_collisions += 16;
1979 if (ifp->if_flags & IFF_DEBUG) {
1980 if (cmdsts & CMDSTS_Tx_ED)
1981 printf("%s: excessive deferral\n",
1982 sc->sc_dev.dv_xname);
1983 if (cmdsts & CMDSTS_Tx_EC)
1984 printf("%s: excessive collisions\n",
1985 sc->sc_dev.dv_xname);
1986 }
1987 } else {
1988 /* Packet was transmitted successfully. */
1989 ifp->if_opackets++;
1990 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1991 }
1992 }
1993
1994 /*
1995 * If there are no more pending transmissions, cancel the watchdog
1996 * timer.
1997 */
1998 if (txs == NULL) {
1999 ifp->if_timer = 0;
2000 sc->sc_txwin = 0;
2001 }
2002 }
2003
2004 /*
2005 * gsip_rxintr:
2006 *
2007 * Helper; handle receive interrupts on gigabit parts.
2008 */
2009 static void
2010 gsip_rxintr(struct sip_softc *sc)
2011 {
2012 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2013 struct sip_rxsoft *rxs;
2014 struct mbuf *m;
2015 u_int32_t cmdsts, extsts;
2016 int i, len;
2017
2018 for (i = sc->sc_rxptr;; i = sip_nextrx(sc, i)) {
2019 rxs = &sc->sc_rxsoft[i];
2020
2021 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2022
2023 cmdsts = le32toh(*sipd_cmdsts(sc, &sc->sc_rxdescs[i]));
2024 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
2025 len = CMDSTS_SIZE(sc, cmdsts);
2026
2027 /*
2028 * NOTE: OWN is set if owned by _consumer_. We're the
2029 * consumer of the receive ring, so if the bit is clear,
2030 * we have processed all of the packets.
2031 */
2032 if ((cmdsts & CMDSTS_OWN) == 0) {
2033 /*
2034 * We have processed all of the receive buffers.
2035 */
2036 break;
2037 }
2038
2039 if (__predict_false(sc->sc_rxdiscard)) {
2040 SIP_INIT_RXDESC(sc, i);
2041 if ((cmdsts & CMDSTS_MORE) == 0) {
2042 /* Reset our state. */
2043 sc->sc_rxdiscard = 0;
2044 }
2045 continue;
2046 }
2047
2048 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2049 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2050
2051 m = rxs->rxs_mbuf;
2052
2053 /*
2054 * Add a new receive buffer to the ring.
2055 */
2056 if (sipcom_add_rxbuf(sc, i) != 0) {
2057 /*
2058 * Failed, throw away what we've done so
2059 * far, and discard the rest of the packet.
2060 */
2061 ifp->if_ierrors++;
2062 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2063 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2064 SIP_INIT_RXDESC(sc, i);
2065 if (cmdsts & CMDSTS_MORE)
2066 sc->sc_rxdiscard = 1;
2067 if (sc->sc_rxhead != NULL)
2068 m_freem(sc->sc_rxhead);
2069 SIP_RXCHAIN_RESET(sc);
2070 continue;
2071 }
2072
2073 SIP_RXCHAIN_LINK(sc, m);
2074
2075 m->m_len = len;
2076
2077 /*
2078 * If this is not the end of the packet, keep
2079 * looking.
2080 */
2081 if (cmdsts & CMDSTS_MORE) {
2082 sc->sc_rxlen += len;
2083 continue;
2084 }
2085
2086 /*
2087 * Okay, we have the entire packet now. The chip includes
2088 * the FCS, so we need to trim it.
2089 */
2090 m->m_len -= ETHER_CRC_LEN;
2091
2092 *sc->sc_rxtailp = NULL;
2093 len = m->m_len + sc->sc_rxlen;
2094 m = sc->sc_rxhead;
2095
2096 SIP_RXCHAIN_RESET(sc);
2097
2098 /*
2099 * If an error occurred, update stats and drop the packet.
2100 */
2101 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
2102 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
2103 ifp->if_ierrors++;
2104 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
2105 (cmdsts & CMDSTS_Rx_RXO) == 0) {
2106 /* Receive overrun handled elsewhere. */
2107 printf("%s: receive descriptor error\n",
2108 sc->sc_dev.dv_xname);
2109 }
2110 #define PRINTERR(bit, str) \
2111 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
2112 (cmdsts & (bit)) != 0) \
2113 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
2114 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
2115 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
2116 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
2117 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
2118 #undef PRINTERR
2119 m_freem(m);
2120 continue;
2121 }
2122
2123 /*
2124 * If the packet is small enough to fit in a
2125 * single header mbuf, allocate one and copy
2126 * the data into it. This greatly reduces
2127 * memory consumption when we receive lots
2128 * of small packets.
2129 */
2130 if (gsip_copy_small != 0 && len <= (MHLEN - 2)) {
2131 struct mbuf *nm;
2132 MGETHDR(nm, M_DONTWAIT, MT_DATA);
2133 if (nm == NULL) {
2134 ifp->if_ierrors++;
2135 m_freem(m);
2136 continue;
2137 }
2138 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
2139 nm->m_data += 2;
2140 nm->m_pkthdr.len = nm->m_len = len;
2141 m_copydata(m, 0, len, mtod(nm, void *));
2142 m_freem(m);
2143 m = nm;
2144 }
2145 #ifndef __NO_STRICT_ALIGNMENT
2146 else {
2147 /*
2148 * The DP83820's receive buffers must be 4-byte
2149 * aligned. But this means that the data after
2150 * the Ethernet header is misaligned. To compensate,
2151 * we have artificially shortened the buffer size
2152 * in the descriptor, and we do an overlapping copy
2153 * of the data two bytes further in (in the first
2154 * buffer of the chain only).
2155 */
2156 memmove(mtod(m, char *) + 2, mtod(m, void *),
2157 m->m_len);
2158 m->m_data += 2;
2159 }
2160 #endif /* ! __NO_STRICT_ALIGNMENT */
2161
2162 /*
2163 * If VLANs are enabled, VLAN packets have been unwrapped
2164 * for us. Associate the tag with the packet.
2165 */
2166
2167 /*
2168 * Again, byte swapping is tricky. Hardware provided
2169 * the tag in the network byte order, but extsts was
2170 * passed through le32toh() in the meantime. On a
2171 * big-endian machine, we need to swap it again. On a
2172 * little-endian machine, we need to convert from the
2173 * network to host byte order. This means that we must
2174 * swap it in any case, so unconditional swap instead
2175 * of htons() is used.
2176 */
2177 if ((extsts & EXTSTS_VPKT) != 0) {
2178 VLAN_INPUT_TAG(ifp, m, bswap16(extsts & EXTSTS_VTCI),
2179 continue);
2180 }
2181
2182 /*
2183 * Set the incoming checksum information for the
2184 * packet.
2185 */
2186 if ((extsts & EXTSTS_IPPKT) != 0) {
2187 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
2188 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2189 if (extsts & EXTSTS_Rx_IPERR)
2190 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2191 if (extsts & EXTSTS_TCPPKT) {
2192 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
2193 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
2194 if (extsts & EXTSTS_Rx_TCPERR)
2195 m->m_pkthdr.csum_flags |=
2196 M_CSUM_TCP_UDP_BAD;
2197 } else if (extsts & EXTSTS_UDPPKT) {
2198 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
2199 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
2200 if (extsts & EXTSTS_Rx_UDPERR)
2201 m->m_pkthdr.csum_flags |=
2202 M_CSUM_TCP_UDP_BAD;
2203 }
2204 }
2205
2206 ifp->if_ipackets++;
2207 m->m_pkthdr.rcvif = ifp;
2208 m->m_pkthdr.len = len;
2209
2210 #if NBPFILTER > 0
2211 /*
2212 * Pass this up to any BPF listeners, but only
2213 * pass if up the stack if it's for us.
2214 */
2215 if (ifp->if_bpf)
2216 bpf_mtap(ifp->if_bpf, m);
2217 #endif /* NBPFILTER > 0 */
2218
2219 /* Pass it on. */
2220 (*ifp->if_input)(ifp, m);
2221 }
2222
2223 /* Update the receive pointer. */
2224 sc->sc_rxptr = i;
2225 }
2226
2227 /*
2228 * sip_rxintr:
2229 *
2230 * Helper; handle receive interrupts on 10/100 parts.
2231 */
2232 static void
2233 sip_rxintr(struct sip_softc *sc)
2234 {
2235 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2236 struct sip_rxsoft *rxs;
2237 struct mbuf *m;
2238 u_int32_t cmdsts;
2239 int i, len;
2240
2241 for (i = sc->sc_rxptr;; i = sip_nextrx(sc, i)) {
2242 rxs = &sc->sc_rxsoft[i];
2243
2244 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2245
2246 cmdsts = le32toh(*sipd_cmdsts(sc, &sc->sc_rxdescs[i]));
2247
2248 /*
2249 * NOTE: OWN is set if owned by _consumer_. We're the
2250 * consumer of the receive ring, so if the bit is clear,
2251 * we have processed all of the packets.
2252 */
2253 if ((cmdsts & CMDSTS_OWN) == 0) {
2254 /*
2255 * We have processed all of the receive buffers.
2256 */
2257 break;
2258 }
2259
2260 /*
2261 * If any collisions were seen on the wire, count one.
2262 */
2263 if (cmdsts & CMDSTS_Rx_COL)
2264 ifp->if_collisions++;
2265
2266 /*
2267 * If an error occurred, update stats, clear the status
2268 * word, and leave the packet buffer in place. It will
2269 * simply be reused the next time the ring comes around.
2270 */
2271 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
2272 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
2273 ifp->if_ierrors++;
2274 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
2275 (cmdsts & CMDSTS_Rx_RXO) == 0) {
2276 /* Receive overrun handled elsewhere. */
2277 printf("%s: receive descriptor error\n",
2278 sc->sc_dev.dv_xname);
2279 }
2280 #define PRINTERR(bit, str) \
2281 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
2282 (cmdsts & (bit)) != 0) \
2283 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
2284 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
2285 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
2286 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
2287 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
2288 #undef PRINTERR
2289 SIP_INIT_RXDESC(sc, i);
2290 continue;
2291 }
2292
2293 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2294 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2295
2296 /*
2297 * No errors; receive the packet. Note, the SiS 900
2298 * includes the CRC with every packet.
2299 */
2300 len = CMDSTS_SIZE(sc, cmdsts) - ETHER_CRC_LEN;
2301
2302 #ifdef __NO_STRICT_ALIGNMENT
2303 /*
2304 * If the packet is small enough to fit in a
2305 * single header mbuf, allocate one and copy
2306 * the data into it. This greatly reduces
2307 * memory consumption when we receive lots
2308 * of small packets.
2309 *
2310 * Otherwise, we add a new buffer to the receive
2311 * chain. If this fails, we drop the packet and
2312 * recycle the old buffer.
2313 */
2314 if (sip_copy_small != 0 && len <= MHLEN) {
2315 MGETHDR(m, M_DONTWAIT, MT_DATA);
2316 if (m == NULL)
2317 goto dropit;
2318 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
2319 memcpy(mtod(m, void *),
2320 mtod(rxs->rxs_mbuf, void *), len);
2321 SIP_INIT_RXDESC(sc, i);
2322 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2323 rxs->rxs_dmamap->dm_mapsize,
2324 BUS_DMASYNC_PREREAD);
2325 } else {
2326 m = rxs->rxs_mbuf;
2327 if (sipcom_add_rxbuf(sc, i) != 0) {
2328 dropit:
2329 ifp->if_ierrors++;
2330 SIP_INIT_RXDESC(sc, i);
2331 bus_dmamap_sync(sc->sc_dmat,
2332 rxs->rxs_dmamap, 0,
2333 rxs->rxs_dmamap->dm_mapsize,
2334 BUS_DMASYNC_PREREAD);
2335 continue;
2336 }
2337 }
2338 #else
2339 /*
2340 * The SiS 900's receive buffers must be 4-byte aligned.
2341 * But this means that the data after the Ethernet header
2342 * is misaligned. We must allocate a new buffer and
2343 * copy the data, shifted forward 2 bytes.
2344 */
2345 MGETHDR(m, M_DONTWAIT, MT_DATA);
2346 if (m == NULL) {
2347 dropit:
2348 ifp->if_ierrors++;
2349 SIP_INIT_RXDESC(sc, i);
2350 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2351 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2352 continue;
2353 }
2354 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
2355 if (len > (MHLEN - 2)) {
2356 MCLGET(m, M_DONTWAIT);
2357 if ((m->m_flags & M_EXT) == 0) {
2358 m_freem(m);
2359 goto dropit;
2360 }
2361 }
2362 m->m_data += 2;
2363
2364 /*
2365 * Note that we use clusters for incoming frames, so the
2366 * buffer is virtually contiguous.
2367 */
2368 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len);
2369
2370 /* Allow the receive descriptor to continue using its mbuf. */
2371 SIP_INIT_RXDESC(sc, i);
2372 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2373 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2374 #endif /* __NO_STRICT_ALIGNMENT */
2375
2376 ifp->if_ipackets++;
2377 m->m_pkthdr.rcvif = ifp;
2378 m->m_pkthdr.len = m->m_len = len;
2379
2380 #if NBPFILTER > 0
2381 /*
2382 * Pass this up to any BPF listeners, but only
2383 * pass if up the stack if it's for us.
2384 */
2385 if (ifp->if_bpf)
2386 bpf_mtap(ifp->if_bpf, m);
2387 #endif /* NBPFILTER > 0 */
2388
2389 /* Pass it on. */
2390 (*ifp->if_input)(ifp, m);
2391 }
2392
2393 /* Update the receive pointer. */
2394 sc->sc_rxptr = i;
2395 }
2396
2397 /*
2398 * sip_tick:
2399 *
2400 * One second timer, used to tick the MII.
2401 */
2402 static void
2403 sipcom_tick(void *arg)
2404 {
2405 struct sip_softc *sc = arg;
2406 int s;
2407
2408 s = splnet();
2409 #ifdef SIP_EVENT_COUNTERS
2410 if (sc->sc_gigabit) {
2411 /* Read PAUSE related counts from MIB registers. */
2412 sc->sc_ev_rxpause.ev_count +=
2413 bus_space_read_4(sc->sc_st, sc->sc_sh,
2414 SIP_NS_MIB(MIB_RXPauseFrames)) & 0xffff;
2415 sc->sc_ev_txpause.ev_count +=
2416 bus_space_read_4(sc->sc_st, sc->sc_sh,
2417 SIP_NS_MIB(MIB_TXPauseFrames)) & 0xffff;
2418 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_MIBC, MIBC_ACLR);
2419 }
2420 #endif /* SIP_EVENT_COUNTERS */
2421 mii_tick(&sc->sc_mii);
2422 splx(s);
2423
2424 callout_reset(&sc->sc_tick_ch, hz, sipcom_tick, sc);
2425 }
2426
2427 /*
2428 * sip_reset:
2429 *
2430 * Perform a soft reset on the SiS 900.
2431 */
2432 static bool
2433 sipcom_reset(struct sip_softc *sc)
2434 {
2435 bus_space_tag_t st = sc->sc_st;
2436 bus_space_handle_t sh = sc->sc_sh;
2437 int i;
2438
2439 bus_space_write_4(st, sh, SIP_IER, 0);
2440 bus_space_write_4(st, sh, SIP_IMR, 0);
2441 bus_space_write_4(st, sh, SIP_RFCR, 0);
2442 bus_space_write_4(st, sh, SIP_CR, CR_RST);
2443
2444 for (i = 0; i < SIP_TIMEOUT; i++) {
2445 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
2446 break;
2447 delay(2);
2448 }
2449
2450 if (i == SIP_TIMEOUT) {
2451 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
2452 return false;
2453 }
2454
2455 delay(1000);
2456
2457 if (sc->sc_gigabit) {
2458 /*
2459 * Set the general purpose I/O bits. Do it here in case we
2460 * need to have GPIO set up to talk to the media interface.
2461 */
2462 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
2463 delay(1000);
2464 }
2465 return true;
2466 }
2467
2468 static void
2469 sipcom_dp83820_init(struct sip_softc *sc, uint64_t capenable)
2470 {
2471 u_int32_t reg;
2472 bus_space_tag_t st = sc->sc_st;
2473 bus_space_handle_t sh = sc->sc_sh;
2474 /*
2475 * Initialize the VLAN/IP receive control register.
2476 * We enable checksum computation on all incoming
2477 * packets, and do not reject packets w/ bad checksums.
2478 */
2479 reg = 0;
2480 if (capenable &
2481 (IFCAP_CSUM_IPv4_Rx|IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx))
2482 reg |= VRCR_IPEN;
2483 if (VLAN_ATTACHED(&sc->sc_ethercom))
2484 reg |= VRCR_VTDEN|VRCR_VTREN;
2485 bus_space_write_4(st, sh, SIP_VRCR, reg);
2486
2487 /*
2488 * Initialize the VLAN/IP transmit control register.
2489 * We enable outgoing checksum computation on a
2490 * per-packet basis.
2491 */
2492 reg = 0;
2493 if (capenable &
2494 (IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx))
2495 reg |= VTCR_PPCHK;
2496 if (VLAN_ATTACHED(&sc->sc_ethercom))
2497 reg |= VTCR_VPPTI;
2498 bus_space_write_4(st, sh, SIP_VTCR, reg);
2499
2500 /*
2501 * If we're using VLANs, initialize the VLAN data register.
2502 * To understand why we bswap the VLAN Ethertype, see section
2503 * 4.2.36 of the DP83820 manual.
2504 */
2505 if (VLAN_ATTACHED(&sc->sc_ethercom))
2506 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2507 }
2508
2509 /*
2510 * sip_init: [ ifnet interface function ]
2511 *
2512 * Initialize the interface. Must be called at splnet().
2513 */
2514 static int
2515 sipcom_init(struct ifnet *ifp)
2516 {
2517 struct sip_softc *sc = ifp->if_softc;
2518 bus_space_tag_t st = sc->sc_st;
2519 bus_space_handle_t sh = sc->sc_sh;
2520 struct sip_txsoft *txs;
2521 struct sip_rxsoft *rxs;
2522 struct sip_desc *sipd;
2523 int i, error = 0;
2524
2525 if (!device_has_power(&sc->sc_dev))
2526 return EBUSY;
2527
2528 /*
2529 * Cancel any pending I/O.
2530 */
2531 sipcom_stop(ifp, 0);
2532
2533 /*
2534 * Reset the chip to a known state.
2535 */
2536 if (!sipcom_reset(sc))
2537 return EBUSY;
2538
2539 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) {
2540 /*
2541 * DP83815 manual, page 78:
2542 * 4.4 Recommended Registers Configuration
2543 * For optimum performance of the DP83815, version noted
2544 * as DP83815CVNG (SRR = 203h), the listed register
2545 * modifications must be followed in sequence...
2546 *
2547 * It's not clear if this should be 302h or 203h because that
2548 * chip name is listed as SRR 302h in the description of the
2549 * SRR register. However, my revision 302h DP83815 on the
2550 * Netgear FA311 purchased in 02/2001 needs these settings
2551 * to avoid tons of errors in AcceptPerfectMatch (non-
2552 * IFF_PROMISC) mode. I do not know if other revisions need
2553 * this set or not. [briggs -- 09 March 2001]
2554 *
2555 * Note that only the low-order 12 bits of 0xe4 are documented
2556 * and that this sets reserved bits in that register.
2557 */
2558 bus_space_write_4(st, sh, 0x00cc, 0x0001);
2559
2560 bus_space_write_4(st, sh, 0x00e4, 0x189C);
2561 bus_space_write_4(st, sh, 0x00fc, 0x0000);
2562 bus_space_write_4(st, sh, 0x00f4, 0x5040);
2563 bus_space_write_4(st, sh, 0x00f8, 0x008c);
2564
2565 bus_space_write_4(st, sh, 0x00cc, 0x0000);
2566 }
2567
2568 /*
2569 * Initialize the transmit descriptor ring.
2570 */
2571 for (i = 0; i < sc->sc_ntxdesc; i++) {
2572 sipd = &sc->sc_txdescs[i];
2573 memset(sipd, 0, sizeof(struct sip_desc));
2574 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, sip_nexttx(sc, i)));
2575 }
2576 SIP_CDTXSYNC(sc, 0, sc->sc_ntxdesc,
2577 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2578 sc->sc_txfree = sc->sc_ntxdesc;
2579 sc->sc_txnext = 0;
2580 sc->sc_txwin = 0;
2581
2582 /*
2583 * Initialize the transmit job descriptors.
2584 */
2585 SIMPLEQ_INIT(&sc->sc_txfreeq);
2586 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2587 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2588 txs = &sc->sc_txsoft[i];
2589 txs->txs_mbuf = NULL;
2590 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2591 }
2592
2593 /*
2594 * Initialize the receive descriptor and receive job
2595 * descriptor rings.
2596 */
2597 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) {
2598 rxs = &sc->sc_rxsoft[i];
2599 if (rxs->rxs_mbuf == NULL) {
2600 if ((error = sipcom_add_rxbuf(sc, i)) != 0) {
2601 printf("%s: unable to allocate or map rx "
2602 "buffer %d, error = %d\n",
2603 sc->sc_dev.dv_xname, i, error);
2604 /*
2605 * XXX Should attempt to run with fewer receive
2606 * XXX buffers instead of just failing.
2607 */
2608 sipcom_rxdrain(sc);
2609 goto out;
2610 }
2611 } else
2612 SIP_INIT_RXDESC(sc, i);
2613 }
2614 sc->sc_rxptr = 0;
2615 sc->sc_rxdiscard = 0;
2616 SIP_RXCHAIN_RESET(sc);
2617
2618 /*
2619 * Set the configuration register; it's already initialized
2620 * in sip_attach().
2621 */
2622 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2623
2624 /*
2625 * Initialize the prototype TXCFG register.
2626 */
2627 if (sc->sc_gigabit) {
2628 sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_512;
2629 sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_512;
2630 } else if ((SIP_SIS900_REV(sc, SIS_REV_635) ||
2631 SIP_SIS900_REV(sc, SIS_REV_960) ||
2632 SIP_SIS900_REV(sc, SIS_REV_900B)) &&
2633 (sc->sc_cfg & CFG_EDBMASTEN)) {
2634 sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_64;
2635 sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_64;
2636 } else {
2637 sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_512;
2638 sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_512;
2639 }
2640
2641 sc->sc_txcfg |= TXCFG_ATP |
2642 __SHIFTIN(sc->sc_tx_fill_thresh, sc->sc_bits.b_txcfg_flth_mask) |
2643 sc->sc_tx_drain_thresh;
2644 bus_space_write_4(st, sh, sc->sc_regs.r_txcfg, sc->sc_txcfg);
2645
2646 /*
2647 * Initialize the receive drain threshold if we have never
2648 * done so.
2649 */
2650 if (sc->sc_rx_drain_thresh == 0) {
2651 /*
2652 * XXX This value should be tuned. This is set to the
2653 * maximum of 248 bytes, and we may be able to improve
2654 * performance by decreasing it (although we should never
2655 * set this value lower than 2; 14 bytes are required to
2656 * filter the packet).
2657 */
2658 sc->sc_rx_drain_thresh = __SHIFTOUT_MASK(RXCFG_DRTH_MASK);
2659 }
2660
2661 /*
2662 * Initialize the prototype RXCFG register.
2663 */
2664 sc->sc_rxcfg |= __SHIFTIN(sc->sc_rx_drain_thresh, RXCFG_DRTH_MASK);
2665 /*
2666 * Accept long packets (including FCS) so we can handle
2667 * 802.1q-tagged frames and jumbo frames properly.
2668 */
2669 if ((sc->sc_gigabit && ifp->if_mtu > ETHERMTU) ||
2670 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU))
2671 sc->sc_rxcfg |= RXCFG_ALP;
2672
2673 /*
2674 * Checksum offloading is disabled if the user selects an MTU
2675 * larger than 8109. (FreeBSD says 8152, but there is emperical
2676 * evidence that >8109 does not work on some boards, such as the
2677 * Planex GN-1000TE).
2678 */
2679 if (sc->sc_gigabit && ifp->if_mtu > 8109 &&
2680 (ifp->if_capenable &
2681 (IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx|
2682 IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx|
2683 IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx))) {
2684 printf("%s: Checksum offloading does not work if MTU > 8109 - "
2685 "disabled.\n", sc->sc_dev.dv_xname);
2686 ifp->if_capenable &=
2687 ~(IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx|
2688 IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx|
2689 IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx);
2690 ifp->if_csum_flags_tx = 0;
2691 ifp->if_csum_flags_rx = 0;
2692 }
2693
2694 bus_space_write_4(st, sh, sc->sc_regs.r_rxcfg, sc->sc_rxcfg);
2695
2696 if (sc->sc_gigabit)
2697 sipcom_dp83820_init(sc, ifp->if_capenable);
2698
2699 /*
2700 * Give the transmit and receive rings to the chip.
2701 */
2702 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2703 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2704
2705 /*
2706 * Initialize the interrupt mask.
2707 */
2708 sc->sc_imr = sc->sc_bits.b_isr_dperr |
2709 sc->sc_bits.b_isr_sserr |
2710 sc->sc_bits.b_isr_rmabt |
2711 sc->sc_bits.b_isr_rtabt | ISR_RXSOVR |
2712 ISR_TXURN|ISR_TXDESC|ISR_TXIDLE|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2713 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2714
2715 /* Set up the receive filter. */
2716 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2717
2718 /*
2719 * Tune sc_rx_flow_thresh.
2720 * XXX "More than 8KB" is too short for jumbo frames.
2721 * XXX TODO: Threshold value should be user-settable.
2722 */
2723 sc->sc_rx_flow_thresh = (PCR_PS_STHI_8 | PCR_PS_STLO_4 |
2724 PCR_PS_FFHI_8 | PCR_PS_FFLO_4 |
2725 (PCR_PAUSE_CNT & PCR_PAUSE_CNT_MASK));
2726
2727 /*
2728 * Set the current media. Do this after initializing the prototype
2729 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2730 * control.
2731 */
2732 mii_mediachg(&sc->sc_mii);
2733
2734 /*
2735 * Set the interrupt hold-off timer to 100us.
2736 */
2737 if (sc->sc_gigabit)
2738 bus_space_write_4(st, sh, SIP_IHR, 0x01);
2739
2740 /*
2741 * Enable interrupts.
2742 */
2743 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2744
2745 /*
2746 * Start the transmit and receive processes.
2747 */
2748 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2749
2750 /*
2751 * Start the one second MII clock.
2752 */
2753 callout_reset(&sc->sc_tick_ch, hz, sipcom_tick, sc);
2754
2755 /*
2756 * ...all done!
2757 */
2758 ifp->if_flags |= IFF_RUNNING;
2759 ifp->if_flags &= ~IFF_OACTIVE;
2760 sc->sc_if_flags = ifp->if_flags;
2761 sc->sc_prev.ec_capenable = sc->sc_ethercom.ec_capenable;
2762 sc->sc_prev.is_vlan = VLAN_ATTACHED(&(sc)->sc_ethercom);
2763 sc->sc_prev.if_capenable = ifp->if_capenable;
2764
2765 out:
2766 if (error)
2767 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2768 return (error);
2769 }
2770
2771 /*
2772 * sip_drain:
2773 *
2774 * Drain the receive queue.
2775 */
2776 static void
2777 sipcom_rxdrain(struct sip_softc *sc)
2778 {
2779 struct sip_rxsoft *rxs;
2780 int i;
2781
2782 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) {
2783 rxs = &sc->sc_rxsoft[i];
2784 if (rxs->rxs_mbuf != NULL) {
2785 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2786 m_freem(rxs->rxs_mbuf);
2787 rxs->rxs_mbuf = NULL;
2788 }
2789 }
2790 }
2791
2792 /*
2793 * sip_stop: [ ifnet interface function ]
2794 *
2795 * Stop transmission on the interface.
2796 */
2797 static void
2798 sipcom_stop(struct ifnet *ifp, int disable)
2799 {
2800 struct sip_softc *sc = ifp->if_softc;
2801 bus_space_tag_t st = sc->sc_st;
2802 bus_space_handle_t sh = sc->sc_sh;
2803 struct sip_txsoft *txs;
2804 u_int32_t cmdsts = 0; /* DEBUG */
2805
2806 /*
2807 * Stop the one second clock.
2808 */
2809 callout_stop(&sc->sc_tick_ch);
2810
2811 /* Down the MII. */
2812 mii_down(&sc->sc_mii);
2813
2814 /*
2815 * Disable interrupts.
2816 */
2817 bus_space_write_4(st, sh, SIP_IER, 0);
2818
2819 /*
2820 * Stop receiver and transmitter.
2821 */
2822 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2823
2824 /*
2825 * Release any queued transmit buffers.
2826 */
2827 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2828 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2829 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2830 (le32toh(*sipd_cmdsts(sc, &sc->sc_txdescs[txs->txs_lastdesc])) &
2831 CMDSTS_INTR) == 0)
2832 printf("%s: sip_stop: last descriptor does not "
2833 "have INTR bit set\n", sc->sc_dev.dv_xname);
2834 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
2835 #ifdef DIAGNOSTIC
2836 if (txs->txs_mbuf == NULL) {
2837 printf("%s: dirty txsoft with no mbuf chain\n",
2838 sc->sc_dev.dv_xname);
2839 panic("sip_stop");
2840 }
2841 #endif
2842 cmdsts |= /* DEBUG */
2843 le32toh(*sipd_cmdsts(sc, &sc->sc_txdescs[txs->txs_lastdesc]));
2844 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2845 m_freem(txs->txs_mbuf);
2846 txs->txs_mbuf = NULL;
2847 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2848 }
2849
2850 if (disable)
2851 sipcom_rxdrain(sc);
2852
2853 /*
2854 * Mark the interface down and cancel the watchdog timer.
2855 */
2856 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2857 ifp->if_timer = 0;
2858
2859 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2860 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != sc->sc_ntxdesc)
2861 printf("%s: sip_stop: no INTR bits set in dirty tx "
2862 "descriptors\n", sc->sc_dev.dv_xname);
2863 }
2864
2865 /*
2866 * sip_read_eeprom:
2867 *
2868 * Read data from the serial EEPROM.
2869 */
2870 static void
2871 sipcom_read_eeprom(struct sip_softc *sc, int word, int wordcnt,
2872 u_int16_t *data)
2873 {
2874 bus_space_tag_t st = sc->sc_st;
2875 bus_space_handle_t sh = sc->sc_sh;
2876 u_int16_t reg;
2877 int i, x;
2878
2879 for (i = 0; i < wordcnt; i++) {
2880 /* Send CHIP SELECT. */
2881 reg = EROMAR_EECS;
2882 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2883
2884 /* Shift in the READ opcode. */
2885 for (x = 3; x > 0; x--) {
2886 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2887 reg |= EROMAR_EEDI;
2888 else
2889 reg &= ~EROMAR_EEDI;
2890 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2891 bus_space_write_4(st, sh, SIP_EROMAR,
2892 reg | EROMAR_EESK);
2893 delay(4);
2894 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2895 delay(4);
2896 }
2897
2898 /* Shift in address. */
2899 for (x = 6; x > 0; x--) {
2900 if ((word + i) & (1 << (x - 1)))
2901 reg |= EROMAR_EEDI;
2902 else
2903 reg &= ~EROMAR_EEDI;
2904 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2905 bus_space_write_4(st, sh, SIP_EROMAR,
2906 reg | EROMAR_EESK);
2907 delay(4);
2908 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2909 delay(4);
2910 }
2911
2912 /* Shift out data. */
2913 reg = EROMAR_EECS;
2914 data[i] = 0;
2915 for (x = 16; x > 0; x--) {
2916 bus_space_write_4(st, sh, SIP_EROMAR,
2917 reg | EROMAR_EESK);
2918 delay(4);
2919 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2920 data[i] |= (1 << (x - 1));
2921 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2922 delay(4);
2923 }
2924
2925 /* Clear CHIP SELECT. */
2926 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2927 delay(4);
2928 }
2929 }
2930
2931 /*
2932 * sipcom_add_rxbuf:
2933 *
2934 * Add a receive buffer to the indicated descriptor.
2935 */
2936 static int
2937 sipcom_add_rxbuf(struct sip_softc *sc, int idx)
2938 {
2939 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2940 struct mbuf *m;
2941 int error;
2942
2943 MGETHDR(m, M_DONTWAIT, MT_DATA);
2944 if (m == NULL)
2945 return (ENOBUFS);
2946 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
2947
2948 MCLGET(m, M_DONTWAIT);
2949 if ((m->m_flags & M_EXT) == 0) {
2950 m_freem(m);
2951 return (ENOBUFS);
2952 }
2953
2954 /* XXX I don't believe this is necessary. --dyoung */
2955 if (sc->sc_gigabit)
2956 m->m_len = sc->sc_parm->p_rxbuf_len;
2957
2958 if (rxs->rxs_mbuf != NULL)
2959 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2960
2961 rxs->rxs_mbuf = m;
2962
2963 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2964 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2965 BUS_DMA_READ|BUS_DMA_NOWAIT);
2966 if (error) {
2967 printf("%s: can't load rx DMA map %d, error = %d\n",
2968 sc->sc_dev.dv_xname, idx, error);
2969 panic("%s", __func__); /* XXX */
2970 }
2971
2972 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2973 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2974
2975 SIP_INIT_RXDESC(sc, idx);
2976
2977 return (0);
2978 }
2979
2980 /*
2981 * sip_sis900_set_filter:
2982 *
2983 * Set up the receive filter.
2984 */
2985 static void
2986 sipcom_sis900_set_filter(struct sip_softc *sc)
2987 {
2988 bus_space_tag_t st = sc->sc_st;
2989 bus_space_handle_t sh = sc->sc_sh;
2990 struct ethercom *ec = &sc->sc_ethercom;
2991 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2992 struct ether_multi *enm;
2993 const u_int8_t *cp;
2994 struct ether_multistep step;
2995 u_int32_t crc, mchash[16];
2996
2997 /*
2998 * Initialize the prototype RFCR.
2999 */
3000 sc->sc_rfcr = RFCR_RFEN;
3001 if (ifp->if_flags & IFF_BROADCAST)
3002 sc->sc_rfcr |= RFCR_AAB;
3003 if (ifp->if_flags & IFF_PROMISC) {
3004 sc->sc_rfcr |= RFCR_AAP;
3005 goto allmulti;
3006 }
3007
3008 /*
3009 * Set up the multicast address filter by passing all multicast
3010 * addresses through a CRC generator, and then using the high-order
3011 * 6 bits as an index into the 128 bit multicast hash table (only
3012 * the lower 16 bits of each 32 bit multicast hash register are
3013 * valid). The high order bits select the register, while the
3014 * rest of the bits select the bit within the register.
3015 */
3016
3017 memset(mchash, 0, sizeof(mchash));
3018
3019 /*
3020 * SiS900 (at least SiS963) requires us to register the address of
3021 * the PAUSE packet (01:80:c2:00:00:01) into the address filter.
3022 */
3023 crc = 0x0ed423f9;
3024
3025 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
3026 SIP_SIS900_REV(sc, SIS_REV_960) ||
3027 SIP_SIS900_REV(sc, SIS_REV_900B)) {
3028 /* Just want the 8 most significant bits. */
3029 crc >>= 24;
3030 } else {
3031 /* Just want the 7 most significant bits. */
3032 crc >>= 25;
3033 }
3034
3035 /* Set the corresponding bit in the hash table. */
3036 mchash[crc >> 4] |= 1 << (crc & 0xf);
3037
3038 ETHER_FIRST_MULTI(step, ec, enm);
3039 while (enm != NULL) {
3040 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3041 /*
3042 * We must listen to a range of multicast addresses.
3043 * For now, just accept all multicasts, rather than
3044 * trying to set only those filter bits needed to match
3045 * the range. (At this time, the only use of address
3046 * ranges is for IP multicast routing, for which the
3047 * range is big enough to require all bits set.)
3048 */
3049 goto allmulti;
3050 }
3051
3052 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
3053
3054 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
3055 SIP_SIS900_REV(sc, SIS_REV_960) ||
3056 SIP_SIS900_REV(sc, SIS_REV_900B)) {
3057 /* Just want the 8 most significant bits. */
3058 crc >>= 24;
3059 } else {
3060 /* Just want the 7 most significant bits. */
3061 crc >>= 25;
3062 }
3063
3064 /* Set the corresponding bit in the hash table. */
3065 mchash[crc >> 4] |= 1 << (crc & 0xf);
3066
3067 ETHER_NEXT_MULTI(step, enm);
3068 }
3069
3070 ifp->if_flags &= ~IFF_ALLMULTI;
3071 goto setit;
3072
3073 allmulti:
3074 ifp->if_flags |= IFF_ALLMULTI;
3075 sc->sc_rfcr |= RFCR_AAM;
3076
3077 setit:
3078 #define FILTER_EMIT(addr, data) \
3079 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
3080 delay(1); \
3081 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
3082 delay(1)
3083
3084 /*
3085 * Disable receive filter, and program the node address.
3086 */
3087 cp = CLLADDR(ifp->if_sadl);
3088 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
3089 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
3090 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
3091
3092 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
3093 /*
3094 * Program the multicast hash table.
3095 */
3096 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
3097 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
3098 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
3099 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
3100 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
3101 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
3102 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
3103 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
3104 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
3105 SIP_SIS900_REV(sc, SIS_REV_960) ||
3106 SIP_SIS900_REV(sc, SIS_REV_900B)) {
3107 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]);
3108 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]);
3109 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]);
3110 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]);
3111 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]);
3112 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]);
3113 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]);
3114 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]);
3115 }
3116 }
3117 #undef FILTER_EMIT
3118
3119 /*
3120 * Re-enable the receiver filter.
3121 */
3122 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
3123 }
3124
3125 /*
3126 * sip_dp83815_set_filter:
3127 *
3128 * Set up the receive filter.
3129 */
3130 static void
3131 sipcom_dp83815_set_filter(struct sip_softc *sc)
3132 {
3133 bus_space_tag_t st = sc->sc_st;
3134 bus_space_handle_t sh = sc->sc_sh;
3135 struct ethercom *ec = &sc->sc_ethercom;
3136 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3137 struct ether_multi *enm;
3138 const u_int8_t *cp;
3139 struct ether_multistep step;
3140 u_int32_t crc, hash, slot, bit;
3141 #define MCHASH_NWORDS_83820 128
3142 #define MCHASH_NWORDS_83815 32
3143 #define MCHASH_NWORDS MAX(MCHASH_NWORDS_83820, MCHASH_NWORDS_83815)
3144 u_int16_t mchash[MCHASH_NWORDS];
3145 int i;
3146
3147 /*
3148 * Initialize the prototype RFCR.
3149 * Enable the receive filter, and accept on
3150 * Perfect (destination address) Match
3151 * If IFF_BROADCAST, also accept all broadcast packets.
3152 * If IFF_PROMISC, accept all unicast packets (and later, set
3153 * IFF_ALLMULTI and accept all multicast, too).
3154 */
3155 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
3156 if (ifp->if_flags & IFF_BROADCAST)
3157 sc->sc_rfcr |= RFCR_AAB;
3158 if (ifp->if_flags & IFF_PROMISC) {
3159 sc->sc_rfcr |= RFCR_AAP;
3160 goto allmulti;
3161 }
3162
3163 /*
3164 * Set up the DP83820/DP83815 multicast address filter by
3165 * passing all multicast addresses through a CRC generator,
3166 * and then using the high-order 11/9 bits as an index into
3167 * the 2048/512 bit multicast hash table. The high-order
3168 * 7/5 bits select the slot, while the low-order 4 bits
3169 * select the bit within the slot. Note that only the low
3170 * 16-bits of each filter word are used, and there are
3171 * 128/32 filter words.
3172 */
3173
3174 memset(mchash, 0, sizeof(mchash));
3175
3176 ifp->if_flags &= ~IFF_ALLMULTI;
3177 ETHER_FIRST_MULTI(step, ec, enm);
3178 if (enm == NULL)
3179 goto setit;
3180 while (enm != NULL) {
3181 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3182 /*
3183 * We must listen to a range of multicast addresses.
3184 * For now, just accept all multicasts, rather than
3185 * trying to set only those filter bits needed to match
3186 * the range. (At this time, the only use of address
3187 * ranges is for IP multicast routing, for which the
3188 * range is big enough to require all bits set.)
3189 */
3190 goto allmulti;
3191 }
3192
3193 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
3194
3195 if (sc->sc_gigabit) {
3196 /* Just want the 11 most significant bits. */
3197 hash = crc >> 21;
3198 } else {
3199 /* Just want the 9 most significant bits. */
3200 hash = crc >> 23;
3201 }
3202
3203 slot = hash >> 4;
3204 bit = hash & 0xf;
3205
3206 /* Set the corresponding bit in the hash table. */
3207 mchash[slot] |= 1 << bit;
3208
3209 ETHER_NEXT_MULTI(step, enm);
3210 }
3211 sc->sc_rfcr |= RFCR_MHEN;
3212 goto setit;
3213
3214 allmulti:
3215 ifp->if_flags |= IFF_ALLMULTI;
3216 sc->sc_rfcr |= RFCR_AAM;
3217
3218 setit:
3219 #define FILTER_EMIT(addr, data) \
3220 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
3221 delay(1); \
3222 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
3223 delay(1)
3224
3225 /*
3226 * Disable receive filter, and program the node address.
3227 */
3228 cp = CLLADDR(ifp->if_sadl);
3229 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
3230 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
3231 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
3232
3233 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
3234 int nwords =
3235 sc->sc_gigabit ? MCHASH_NWORDS_83820 : MCHASH_NWORDS_83815;
3236 /*
3237 * Program the multicast hash table.
3238 */
3239 for (i = 0; i < nwords; i++) {
3240 FILTER_EMIT(sc->sc_parm->p_filtmem + (i * 2), mchash[i]);
3241 }
3242 }
3243 #undef FILTER_EMIT
3244 #undef MCHASH_NWORDS
3245 #undef MCHASH_NWORDS_83815
3246 #undef MCHASH_NWORDS_83820
3247
3248 /*
3249 * Re-enable the receiver filter.
3250 */
3251 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
3252 }
3253
3254 /*
3255 * sip_dp83820_mii_readreg: [mii interface function]
3256 *
3257 * Read a PHY register on the MII of the DP83820.
3258 */
3259 static int
3260 sipcom_dp83820_mii_readreg(struct device *self, int phy, int reg)
3261 {
3262 struct sip_softc *sc = (void *) self;
3263
3264 if (sc->sc_cfg & CFG_TBI_EN) {
3265 bus_addr_t tbireg;
3266 int rv;
3267
3268 if (phy != 0)
3269 return (0);
3270
3271 switch (reg) {
3272 case MII_BMCR: tbireg = SIP_TBICR; break;
3273 case MII_BMSR: tbireg = SIP_TBISR; break;
3274 case MII_ANAR: tbireg = SIP_TANAR; break;
3275 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
3276 case MII_ANER: tbireg = SIP_TANER; break;
3277 case MII_EXTSR:
3278 /*
3279 * Don't even bother reading the TESR register.
3280 * The manual documents that the device has
3281 * 1000baseX full/half capability, but the
3282 * register itself seems read back 0 on some
3283 * boards. Just hard-code the result.
3284 */
3285 return (EXTSR_1000XFDX|EXTSR_1000XHDX);
3286
3287 default:
3288 return (0);
3289 }
3290
3291 rv = bus_space_read_4(sc->sc_st, sc->sc_sh, tbireg) & 0xffff;
3292 if (tbireg == SIP_TBISR) {
3293 /* LINK and ACOMP are switched! */
3294 int val = rv;
3295
3296 rv = 0;
3297 if (val & TBISR_MR_LINK_STATUS)
3298 rv |= BMSR_LINK;
3299 if (val & TBISR_MR_AN_COMPLETE)
3300 rv |= BMSR_ACOMP;
3301
3302 /*
3303 * The manual claims this register reads back 0
3304 * on hard and soft reset. But we want to let
3305 * the gentbi driver know that we support auto-
3306 * negotiation, so hard-code this bit in the
3307 * result.
3308 */
3309 rv |= BMSR_ANEG | BMSR_EXTSTAT;
3310 }
3311
3312 return (rv);
3313 }
3314
3315 return mii_bitbang_readreg(self, &sipcom_mii_bitbang_ops, phy, reg);
3316 }
3317
3318 /*
3319 * sip_dp83820_mii_writereg: [mii interface function]
3320 *
3321 * Write a PHY register on the MII of the DP83820.
3322 */
3323 static void
3324 sipcom_dp83820_mii_writereg(struct device *self, int phy, int reg, int val)
3325 {
3326 struct sip_softc *sc = (void *) self;
3327
3328 if (sc->sc_cfg & CFG_TBI_EN) {
3329 bus_addr_t tbireg;
3330
3331 if (phy != 0)
3332 return;
3333
3334 switch (reg) {
3335 case MII_BMCR: tbireg = SIP_TBICR; break;
3336 case MII_ANAR: tbireg = SIP_TANAR; break;
3337 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
3338 default:
3339 return;
3340 }
3341
3342 bus_space_write_4(sc->sc_st, sc->sc_sh, tbireg, val);
3343 return;
3344 }
3345
3346 mii_bitbang_writereg(self, &sipcom_mii_bitbang_ops, phy, reg, val);
3347 }
3348
3349 /*
3350 * sip_dp83820_mii_statchg: [mii interface function]
3351 *
3352 * Callback from MII layer when media changes.
3353 */
3354 static void
3355 sipcom_dp83820_mii_statchg(struct device *self)
3356 {
3357 struct sip_softc *sc = (struct sip_softc *) self;
3358 struct mii_data *mii = &sc->sc_mii;
3359 u_int32_t cfg, pcr;
3360
3361 /*
3362 * Get flow control negotiation result.
3363 */
3364 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3365 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3366 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3367 mii->mii_media_active &= ~IFM_ETH_FMASK;
3368 }
3369
3370 /*
3371 * Update TXCFG for full-duplex operation.
3372 */
3373 if ((mii->mii_media_active & IFM_FDX) != 0)
3374 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3375 else
3376 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3377
3378 /*
3379 * Update RXCFG for full-duplex or loopback.
3380 */
3381 if ((mii->mii_media_active & IFM_FDX) != 0 ||
3382 IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP)
3383 sc->sc_rxcfg |= RXCFG_ATX;
3384 else
3385 sc->sc_rxcfg &= ~RXCFG_ATX;
3386
3387 /*
3388 * Update CFG for MII/GMII.
3389 */
3390 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
3391 cfg = sc->sc_cfg | CFG_MODE_1000;
3392 else
3393 cfg = sc->sc_cfg;
3394
3395 /*
3396 * 802.3x flow control.
3397 */
3398 pcr = 0;
3399 if (sc->sc_flowflags & IFM_FLOW) {
3400 if (sc->sc_flowflags & IFM_ETH_TXPAUSE)
3401 pcr |= sc->sc_rx_flow_thresh;
3402 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
3403 pcr |= PCR_PSEN | PCR_PS_MCAST;
3404 }
3405
3406 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
3407 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg,
3408 sc->sc_txcfg);
3409 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg,
3410 sc->sc_rxcfg);
3411 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PCR, pcr);
3412 }
3413
3414 /*
3415 * sip_mii_bitbang_read: [mii bit-bang interface function]
3416 *
3417 * Read the MII serial port for the MII bit-bang module.
3418 */
3419 static u_int32_t
3420 sipcom_mii_bitbang_read(struct device *self)
3421 {
3422 struct sip_softc *sc = (void *) self;
3423
3424 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
3425 }
3426
3427 /*
3428 * sip_mii_bitbang_write: [mii big-bang interface function]
3429 *
3430 * Write the MII serial port for the MII bit-bang module.
3431 */
3432 static void
3433 sipcom_mii_bitbang_write(struct device *self, u_int32_t val)
3434 {
3435 struct sip_softc *sc = (void *) self;
3436
3437 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
3438 }
3439
3440 /*
3441 * sip_sis900_mii_readreg: [mii interface function]
3442 *
3443 * Read a PHY register on the MII.
3444 */
3445 static int
3446 sipcom_sis900_mii_readreg(struct device *self, int phy, int reg)
3447 {
3448 struct sip_softc *sc = (struct sip_softc *) self;
3449 u_int32_t enphy;
3450
3451 /*
3452 * The PHY of recent SiS chipsets is accessed through bitbang
3453 * operations.
3454 */
3455 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900)
3456 return mii_bitbang_readreg(self, &sipcom_mii_bitbang_ops,
3457 phy, reg);
3458
3459 #ifndef SIS900_MII_RESTRICT
3460 /*
3461 * The SiS 900 has only an internal PHY on the MII. Only allow
3462 * MII address 0.
3463 */
3464 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
3465 return (0);
3466 #endif
3467
3468 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3469 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
3470 ENPHY_RWCMD | ENPHY_ACCESS);
3471 do {
3472 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3473 } while (enphy & ENPHY_ACCESS);
3474 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
3475 }
3476
3477 /*
3478 * sip_sis900_mii_writereg: [mii interface function]
3479 *
3480 * Write a PHY register on the MII.
3481 */
3482 static void
3483 sipcom_sis900_mii_writereg(struct device *self, int phy, int reg, int val)
3484 {
3485 struct sip_softc *sc = (struct sip_softc *) self;
3486 u_int32_t enphy;
3487
3488 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900) {
3489 mii_bitbang_writereg(self, &sipcom_mii_bitbang_ops,
3490 phy, reg, val);
3491 return;
3492 }
3493
3494 #ifndef SIS900_MII_RESTRICT
3495 /*
3496 * The SiS 900 has only an internal PHY on the MII. Only allow
3497 * MII address 0.
3498 */
3499 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
3500 return;
3501 #endif
3502
3503 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3504 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
3505 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
3506 do {
3507 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3508 } while (enphy & ENPHY_ACCESS);
3509 }
3510
3511 /*
3512 * sip_sis900_mii_statchg: [mii interface function]
3513 *
3514 * Callback from MII layer when media changes.
3515 */
3516 static void
3517 sipcom_sis900_mii_statchg(struct device *self)
3518 {
3519 struct sip_softc *sc = (struct sip_softc *) self;
3520 struct mii_data *mii = &sc->sc_mii;
3521 u_int32_t flowctl;
3522
3523 /*
3524 * Get flow control negotiation result.
3525 */
3526 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3527 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3528 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3529 mii->mii_media_active &= ~IFM_ETH_FMASK;
3530 }
3531
3532 /*
3533 * Update TXCFG for full-duplex operation.
3534 */
3535 if ((mii->mii_media_active & IFM_FDX) != 0)
3536 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3537 else
3538 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3539
3540 /*
3541 * Update RXCFG for full-duplex or loopback.
3542 */
3543 if ((mii->mii_media_active & IFM_FDX) != 0 ||
3544 IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP)
3545 sc->sc_rxcfg |= RXCFG_ATX;
3546 else
3547 sc->sc_rxcfg &= ~RXCFG_ATX;
3548
3549 /*
3550 * Update IMR for use of 802.3x flow control.
3551 */
3552 if (sc->sc_flowflags & IFM_FLOW) {
3553 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
3554 flowctl = FLOWCTL_FLOWEN;
3555 } else {
3556 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
3557 flowctl = 0;
3558 }
3559
3560 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg,
3561 sc->sc_txcfg);
3562 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg,
3563 sc->sc_rxcfg);
3564 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
3565 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
3566 }
3567
3568 /*
3569 * sip_dp83815_mii_readreg: [mii interface function]
3570 *
3571 * Read a PHY register on the MII.
3572 */
3573 static int
3574 sipcom_dp83815_mii_readreg(struct device *self, int phy, int reg)
3575 {
3576 struct sip_softc *sc = (struct sip_softc *) self;
3577 u_int32_t val;
3578
3579 /*
3580 * The DP83815 only has an internal PHY. Only allow
3581 * MII address 0.
3582 */
3583 if (phy != 0)
3584 return (0);
3585
3586 /*
3587 * Apparently, after a reset, the DP83815 can take a while
3588 * to respond. During this recovery period, the BMSR returns
3589 * a value of 0. Catch this -- it's not supposed to happen
3590 * (the BMSR has some hardcoded-to-1 bits), and wait for the
3591 * PHY to come back to life.
3592 *
3593 * This works out because the BMSR is the first register
3594 * read during the PHY probe process.
3595 */
3596 do {
3597 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
3598 } while (reg == MII_BMSR && val == 0);
3599
3600 return (val & 0xffff);
3601 }
3602
3603 /*
3604 * sip_dp83815_mii_writereg: [mii interface function]
3605 *
3606 * Write a PHY register to the MII.
3607 */
3608 static void
3609 sipcom_dp83815_mii_writereg(struct device *self, int phy, int reg, int val)
3610 {
3611 struct sip_softc *sc = (struct sip_softc *) self;
3612
3613 /*
3614 * The DP83815 only has an internal PHY. Only allow
3615 * MII address 0.
3616 */
3617 if (phy != 0)
3618 return;
3619
3620 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
3621 }
3622
3623 /*
3624 * sip_dp83815_mii_statchg: [mii interface function]
3625 *
3626 * Callback from MII layer when media changes.
3627 */
3628 static void
3629 sipcom_dp83815_mii_statchg(struct device *self)
3630 {
3631 struct sip_softc *sc = (struct sip_softc *) self;
3632
3633 /*
3634 * Update TXCFG for full-duplex operation.
3635 */
3636 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3637 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3638 else
3639 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3640
3641 /*
3642 * Update RXCFG for full-duplex or loopback.
3643 */
3644 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3645 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3646 sc->sc_rxcfg |= RXCFG_ATX;
3647 else
3648 sc->sc_rxcfg &= ~RXCFG_ATX;
3649
3650 /*
3651 * XXX 802.3x flow control.
3652 */
3653
3654 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg,
3655 sc->sc_txcfg);
3656 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg,
3657 sc->sc_rxcfg);
3658
3659 /*
3660 * Some DP83815s experience problems when used with short
3661 * (< 30m/100ft) Ethernet cables in 100BaseTX mode. This
3662 * sequence adjusts the DSP's signal attenuation to fix the
3663 * problem.
3664 */
3665 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
3666 uint32_t reg;
3667
3668 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0x0001);
3669
3670 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4);
3671 reg &= 0x0fff;
3672 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4, reg | 0x1000);
3673 delay(100);
3674 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00fc);
3675 reg &= 0x00ff;
3676 if ((reg & 0x0080) == 0 || (reg >= 0x00d8)) {
3677 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00fc,
3678 0x00e8);
3679 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4);
3680 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4,
3681 reg | 0x20);
3682 }
3683
3684 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0);
3685 }
3686 }
3687
3688 static void
3689 sipcom_dp83820_read_macaddr(struct sip_softc *sc,
3690 const struct pci_attach_args *pa, u_int8_t *enaddr)
3691 {
3692 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
3693 u_int8_t cksum, *e, match;
3694 int i;
3695
3696 /*
3697 * EEPROM data format for the DP83820 can be found in
3698 * the DP83820 manual, section 4.2.4.
3699 */
3700
3701 sipcom_read_eeprom(sc, 0, __arraycount(eeprom_data), eeprom_data);
3702
3703 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
3704 match = ~(match - 1);
3705
3706 cksum = 0x55;
3707 e = (u_int8_t *) eeprom_data;
3708 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
3709 cksum += *e++;
3710
3711 if (cksum != match)
3712 printf("%s: Checksum (%x) mismatch (%x)",
3713 sc->sc_dev.dv_xname, cksum, match);
3714
3715 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
3716 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
3717 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
3718 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
3719 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
3720 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
3721 }
3722
3723 static void
3724 sipcom_sis900_eeprom_delay(struct sip_softc *sc)
3725 {
3726 int i;
3727
3728 /*
3729 * FreeBSD goes from (300/33)+1 [10] to 0. There must be
3730 * a reason, but I don't know it.
3731 */
3732 for (i = 0; i < 10; i++)
3733 bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR);
3734 }
3735
3736 static void
3737 sipcom_sis900_read_macaddr(struct sip_softc *sc,
3738 const struct pci_attach_args *pa, u_int8_t *enaddr)
3739 {
3740 u_int16_t myea[ETHER_ADDR_LEN / 2];
3741
3742 switch (sc->sc_rev) {
3743 case SIS_REV_630S:
3744 case SIS_REV_630E:
3745 case SIS_REV_630EA1:
3746 case SIS_REV_630ET:
3747 case SIS_REV_635:
3748 /*
3749 * The MAC address for the on-board Ethernet of
3750 * the SiS 630 chipset is in the NVRAM. Kick
3751 * the chip into re-loading it from NVRAM, and
3752 * read the MAC address out of the filter registers.
3753 */
3754 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3755
3756 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3757 RFCR_RFADDR_NODE0);
3758 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3759 0xffff;
3760
3761 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3762 RFCR_RFADDR_NODE2);
3763 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3764 0xffff;
3765
3766 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3767 RFCR_RFADDR_NODE4);
3768 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3769 0xffff;
3770 break;
3771
3772 case SIS_REV_960:
3773 {
3774 #define SIS_SET_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \
3775 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) | (y))
3776
3777 #define SIS_CLR_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \
3778 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) & ~(y))
3779
3780 int waittime, i;
3781
3782 /* Allow to read EEPROM from LAN. It is shared
3783 * between a 1394 controller and the NIC and each
3784 * time we access it, we need to set SIS_EECMD_REQ.
3785 */
3786 SIS_SET_EROMAR(sc, EROMAR_REQ);
3787
3788 for (waittime = 0; waittime < 1000; waittime++) { /* 1 ms max */
3789 /* Force EEPROM to idle state. */
3790
3791 /*
3792 * XXX-cube This is ugly. I'll look for docs about it.
3793 */
3794 SIS_SET_EROMAR(sc, EROMAR_EECS);
3795 sipcom_sis900_eeprom_delay(sc);
3796 for (i = 0; i <= 25; i++) { /* Yes, 26 times. */
3797 SIS_SET_EROMAR(sc, EROMAR_EESK);
3798 sipcom_sis900_eeprom_delay(sc);
3799 SIS_CLR_EROMAR(sc, EROMAR_EESK);
3800 sipcom_sis900_eeprom_delay(sc);
3801 }
3802 SIS_CLR_EROMAR(sc, EROMAR_EECS);
3803 sipcom_sis900_eeprom_delay(sc);
3804 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, 0);
3805
3806 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR) & EROMAR_GNT) {
3807 sipcom_read_eeprom(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3808 sizeof(myea) / sizeof(myea[0]), myea);
3809 break;
3810 }
3811 DELAY(1);
3812 }
3813
3814 /*
3815 * Set SIS_EECTL_CLK to high, so a other master
3816 * can operate on the i2c bus.
3817 */
3818 SIS_SET_EROMAR(sc, EROMAR_EESK);
3819
3820 /* Refuse EEPROM access by LAN */
3821 SIS_SET_EROMAR(sc, EROMAR_DONE);
3822 } break;
3823
3824 default:
3825 sipcom_read_eeprom(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3826 sizeof(myea) / sizeof(myea[0]), myea);
3827 }
3828
3829 enaddr[0] = myea[0] & 0xff;
3830 enaddr[1] = myea[0] >> 8;
3831 enaddr[2] = myea[1] & 0xff;
3832 enaddr[3] = myea[1] >> 8;
3833 enaddr[4] = myea[2] & 0xff;
3834 enaddr[5] = myea[2] >> 8;
3835 }
3836
3837 /* Table and macro to bit-reverse an octet. */
3838 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3839 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3840
3841 static void
3842 sipcom_dp83815_read_macaddr(struct sip_softc *sc,
3843 const struct pci_attach_args *pa, u_int8_t *enaddr)
3844 {
3845 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3846 u_int8_t cksum, *e, match;
3847 int i;
3848
3849 sipcom_read_eeprom(sc, 0, sizeof(eeprom_data) /
3850 sizeof(eeprom_data[0]), eeprom_data);
3851
3852 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3853 match = ~(match - 1);
3854
3855 cksum = 0x55;
3856 e = (u_int8_t *) eeprom_data;
3857 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3858 cksum += *e++;
3859 }
3860 if (cksum != match) {
3861 printf("%s: Checksum (%x) mismatch (%x)",
3862 sc->sc_dev.dv_xname, cksum, match);
3863 }
3864
3865 /*
3866 * Unrolled because it makes slightly more sense this way.
3867 * The DP83815 stores the MAC address in bit 0 of word 6
3868 * through bit 15 of word 8.
3869 */
3870 ea = &eeprom_data[6];
3871 enaddr[0] = ((*ea & 0x1) << 7);
3872 ea++;
3873 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3874 enaddr[1] = ((*ea & 0x1FE) >> 1);
3875 enaddr[2] = ((*ea & 0x1) << 7);
3876 ea++;
3877 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3878 enaddr[3] = ((*ea & 0x1FE) >> 1);
3879 enaddr[4] = ((*ea & 0x1) << 7);
3880 ea++;
3881 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3882 enaddr[5] = ((*ea & 0x1FE) >> 1);
3883
3884 /*
3885 * In case that's not weird enough, we also need to reverse
3886 * the bits in each byte. This all actually makes more sense
3887 * if you think about the EEPROM storage as an array of bits
3888 * being shifted into bytes, but that's not how we're looking
3889 * at it here...
3890 */
3891 for (i = 0; i < 6 ;i++)
3892 enaddr[i] = bbr(enaddr[i]);
3893 }
3894
3895 /*
3896 * sip_mediastatus: [ifmedia interface function]
3897 *
3898 * Get the current interface media status.
3899 */
3900 static void
3901 sipcom_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3902 {
3903 struct sip_softc *sc = ifp->if_softc;
3904
3905 mii_pollstat(&sc->sc_mii);
3906 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3907 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
3908 sc->sc_flowflags;
3909 }
3910
3911 /*
3912 * sip_mediachange: [ifmedia interface function]
3913 *
3914 * Set hardware to newly-selected media.
3915 */
3916 static int
3917 sipcom_mediachange(struct ifnet *ifp)
3918 {
3919 struct sip_softc *sc = ifp->if_softc;
3920
3921 if (ifp->if_flags & IFF_UP)
3922 mii_mediachg(&sc->sc_mii);
3923 return (0);
3924 }
3925