if_wm.c revision 1.239 1 /* $NetBSD: if_wm.c,v 1.239 2012/12/12 09:20:35 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.239 2012/12/12 09:20:35 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
136 | WM_DEBUG_MANAGE;
137
138 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
139 #else
140 #define DPRINTF(x, y) /* nothing */
141 #endif /* WM_DEBUG */
142
143 /*
144 * Transmit descriptor list size. Due to errata, we can only have
145 * 256 hardware descriptors in the ring on < 82544, but we use 4096
146 * on >= 82544. We tell the upper layers that they can queue a lot
147 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
148 * of them at a time.
149 *
150 * We allow up to 256 (!) DMA segments per packet. Pathological packet
151 * chains containing many small mbufs have been observed in zero-copy
152 * situations with jumbo frames.
153 */
154 #define WM_NTXSEGS 256
155 #define WM_IFQUEUELEN 256
156 #define WM_TXQUEUELEN_MAX 64
157 #define WM_TXQUEUELEN_MAX_82547 16
158 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
159 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
160 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
161 #define WM_NTXDESC_82542 256
162 #define WM_NTXDESC_82544 4096
163 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
164 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
165 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
166 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
167 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
168
169 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
170
171 /*
172 * Receive descriptor list size. We have one Rx buffer for normal
173 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
174 * packet. We allocate 256 receive descriptors, each with a 2k
175 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
176 */
177 #define WM_NRXDESC 256
178 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
179 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
180 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
181
182 /*
183 * Control structures are DMA'd to the i82542 chip. We allocate them in
184 * a single clump that maps to a single DMA segment to make several things
185 * easier.
186 */
187 struct wm_control_data_82544 {
188 /*
189 * The receive descriptors.
190 */
191 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
192
193 /*
194 * The transmit descriptors. Put these at the end, because
195 * we might use a smaller number of them.
196 */
197 union {
198 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
199 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
200 } wdc_u;
201 };
202
203 struct wm_control_data_82542 {
204 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
211
212 /*
213 * Software state for transmit jobs.
214 */
215 struct wm_txsoft {
216 struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap; /* our DMA map */
218 int txs_firstdesc; /* first descriptor in packet */
219 int txs_lastdesc; /* last descriptor in packet */
220 int txs_ndesc; /* # of descriptors used */
221 };
222
223 /*
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
227 */
228 struct wm_rxsoft {
229 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap; /* our DMA map */
231 };
232
233 #define WM_LINKUP_TIMEOUT 50
234
235 static uint16_t swfwphysem[] = {
236 SWFW_PHY0_SM,
237 SWFW_PHY1_SM,
238 SWFW_PHY2_SM,
239 SWFW_PHY3_SM
240 };
241
242 /*
243 * Software state per device.
244 */
245 struct wm_softc {
246 device_t sc_dev; /* generic device information */
247 bus_space_tag_t sc_st; /* bus space tag */
248 bus_space_handle_t sc_sh; /* bus space handle */
249 bus_size_t sc_ss; /* bus space size */
250 bus_space_tag_t sc_iot; /* I/O space tag */
251 bus_space_handle_t sc_ioh; /* I/O space handle */
252 bus_size_t sc_ios; /* I/O space size */
253 bus_space_tag_t sc_flasht; /* flash registers space tag */
254 bus_space_handle_t sc_flashh; /* flash registers space handle */
255 bus_dma_tag_t sc_dmat; /* bus DMA tag */
256
257 struct ethercom sc_ethercom; /* ethernet common data */
258 struct mii_data sc_mii; /* MII/media information */
259
260 pci_chipset_tag_t sc_pc;
261 pcitag_t sc_pcitag;
262 int sc_bus_speed; /* PCI/PCIX bus speed */
263 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
264
265 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
266 wm_chip_type sc_type; /* MAC type */
267 int sc_rev; /* MAC revision */
268 wm_phy_type sc_phytype; /* PHY type */
269 int sc_funcid; /* unit number of the chip (0 to 3) */
270 int sc_flags; /* flags; see below */
271 int sc_if_flags; /* last if_flags */
272 int sc_flowflags; /* 802.3x flow control flags */
273 int sc_align_tweak;
274
275 void *sc_ih; /* interrupt cookie */
276 callout_t sc_tick_ch; /* tick callout */
277
278 int sc_ee_addrbits; /* EEPROM address bits */
279 int sc_ich8_flash_base;
280 int sc_ich8_flash_bank_size;
281 int sc_nvm_k1_enabled;
282
283 /*
284 * Software state for the transmit and receive descriptors.
285 */
286 int sc_txnum; /* must be a power of two */
287 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
288 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
289
290 /*
291 * Control data structures.
292 */
293 int sc_ntxdesc; /* must be a power of two */
294 struct wm_control_data_82544 *sc_control_data;
295 bus_dmamap_t sc_cddmamap; /* control data DMA map */
296 bus_dma_segment_t sc_cd_seg; /* control data segment */
297 int sc_cd_rseg; /* real number of control segment */
298 size_t sc_cd_size; /* control data size */
299 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
300 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
301 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
302 #define sc_rxdescs sc_control_data->wcd_rxdescs
303
304 #ifdef WM_EVENT_COUNTERS
305 /* Event counters. */
306 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
307 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
308 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
309 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
310 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
311 struct evcnt sc_ev_rxintr; /* Rx interrupts */
312 struct evcnt sc_ev_linkintr; /* Link interrupts */
313
314 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
315 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
316 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
317 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
318 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
319 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
320 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
321 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
322
323 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
324 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
325
326 struct evcnt sc_ev_tu; /* Tx underrun */
327
328 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
329 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
330 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
331 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
332 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
333 #endif /* WM_EVENT_COUNTERS */
334
335 bus_addr_t sc_tdt_reg; /* offset of TDT register */
336
337 int sc_txfree; /* number of free Tx descriptors */
338 int sc_txnext; /* next ready Tx descriptor */
339
340 int sc_txsfree; /* number of free Tx jobs */
341 int sc_txsnext; /* next free Tx job */
342 int sc_txsdirty; /* dirty Tx jobs */
343
344 /* These 5 variables are used only on the 82547. */
345 int sc_txfifo_size; /* Tx FIFO size */
346 int sc_txfifo_head; /* current head of FIFO */
347 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
348 int sc_txfifo_stall; /* Tx FIFO is stalled */
349 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
350
351 bus_addr_t sc_rdt_reg; /* offset of RDT register */
352
353 int sc_rxptr; /* next ready Rx descriptor/queue ent */
354 int sc_rxdiscard;
355 int sc_rxlen;
356 struct mbuf *sc_rxhead;
357 struct mbuf *sc_rxtail;
358 struct mbuf **sc_rxtailp;
359
360 uint32_t sc_ctrl; /* prototype CTRL register */
361 #if 0
362 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
363 #endif
364 uint32_t sc_icr; /* prototype interrupt bits */
365 uint32_t sc_itr; /* prototype intr throttling reg */
366 uint32_t sc_tctl; /* prototype TCTL register */
367 uint32_t sc_rctl; /* prototype RCTL register */
368 uint32_t sc_txcw; /* prototype TXCW register */
369 uint32_t sc_tipg; /* prototype TIPG register */
370 uint32_t sc_fcrtl; /* prototype FCRTL register */
371 uint32_t sc_pba; /* prototype PBA register */
372
373 int sc_tbi_linkup; /* TBI link status */
374 int sc_tbi_anegticks; /* autonegotiation ticks */
375 int sc_tbi_ticks; /* tbi ticks */
376 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
377 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
378
379 int sc_mchash_type; /* multicast filter offset */
380
381 krndsource_t rnd_source; /* random source */
382 };
383
384 #define WM_RXCHAIN_RESET(sc) \
385 do { \
386 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
387 *(sc)->sc_rxtailp = NULL; \
388 (sc)->sc_rxlen = 0; \
389 } while (/*CONSTCOND*/0)
390
391 #define WM_RXCHAIN_LINK(sc, m) \
392 do { \
393 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
394 (sc)->sc_rxtailp = &(m)->m_next; \
395 } while (/*CONSTCOND*/0)
396
397 #ifdef WM_EVENT_COUNTERS
398 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
399 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
400 #else
401 #define WM_EVCNT_INCR(ev) /* nothing */
402 #define WM_EVCNT_ADD(ev, val) /* nothing */
403 #endif
404
405 #define CSR_READ(sc, reg) \
406 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
407 #define CSR_WRITE(sc, reg, val) \
408 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
409 #define CSR_WRITE_FLUSH(sc) \
410 (void) CSR_READ((sc), WMREG_STATUS)
411
412 #define ICH8_FLASH_READ32(sc, reg) \
413 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
414 #define ICH8_FLASH_WRITE32(sc, reg, data) \
415 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
416
417 #define ICH8_FLASH_READ16(sc, reg) \
418 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
419 #define ICH8_FLASH_WRITE16(sc, reg, data) \
420 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
421
422 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
423 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
424
425 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
426 #define WM_CDTXADDR_HI(sc, x) \
427 (sizeof(bus_addr_t) == 8 ? \
428 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
429
430 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
431 #define WM_CDRXADDR_HI(sc, x) \
432 (sizeof(bus_addr_t) == 8 ? \
433 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
434
435 #define WM_CDTXSYNC(sc, x, n, ops) \
436 do { \
437 int __x, __n; \
438 \
439 __x = (x); \
440 __n = (n); \
441 \
442 /* If it will wrap around, sync to the end of the ring. */ \
443 if ((__x + __n) > WM_NTXDESC(sc)) { \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
446 (WM_NTXDESC(sc) - __x), (ops)); \
447 __n -= (WM_NTXDESC(sc) - __x); \
448 __x = 0; \
449 } \
450 \
451 /* Now sync whatever is left. */ \
452 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
453 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
454 } while (/*CONSTCOND*/0)
455
456 #define WM_CDRXSYNC(sc, x, ops) \
457 do { \
458 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
459 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
460 } while (/*CONSTCOND*/0)
461
462 #define WM_INIT_RXDESC(sc, x) \
463 do { \
464 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
465 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
466 struct mbuf *__m = __rxs->rxs_mbuf; \
467 \
468 /* \
469 * Note: We scoot the packet forward 2 bytes in the buffer \
470 * so that the payload after the Ethernet header is aligned \
471 * to a 4-byte boundary. \
472 * \
473 * XXX BRAINDAMAGE ALERT! \
474 * The stupid chip uses the same size for every buffer, which \
475 * is set in the Receive Control register. We are using the 2K \
476 * size option, but what we REALLY want is (2K - 2)! For this \
477 * reason, we can't "scoot" packets longer than the standard \
478 * Ethernet MTU. On strict-alignment platforms, if the total \
479 * size exceeds (2K - 2) we set align_tweak to 0 and let \
480 * the upper layer copy the headers. \
481 */ \
482 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
483 \
484 wm_set_dma_addr(&__rxd->wrx_addr, \
485 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
486 __rxd->wrx_len = 0; \
487 __rxd->wrx_cksum = 0; \
488 __rxd->wrx_status = 0; \
489 __rxd->wrx_errors = 0; \
490 __rxd->wrx_special = 0; \
491 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
492 \
493 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
494 } while (/*CONSTCOND*/0)
495
496 static void wm_start(struct ifnet *);
497 static void wm_nq_start(struct ifnet *);
498 static void wm_watchdog(struct ifnet *);
499 static int wm_ifflags_cb(struct ethercom *);
500 static int wm_ioctl(struct ifnet *, u_long, void *);
501 static int wm_init(struct ifnet *);
502 static void wm_stop(struct ifnet *, int);
503 static bool wm_suspend(device_t, const pmf_qual_t *);
504 static bool wm_resume(device_t, const pmf_qual_t *);
505
506 static void wm_reset(struct wm_softc *);
507 static void wm_rxdrain(struct wm_softc *);
508 static int wm_add_rxbuf(struct wm_softc *, int);
509 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
510 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_validate_eeprom_checksum(struct wm_softc *);
512 static int wm_check_alt_mac_addr(struct wm_softc *);
513 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
514 static void wm_tick(void *);
515
516 static void wm_set_filter(struct wm_softc *);
517 static void wm_set_vlan(struct wm_softc *);
518
519 static int wm_intr(void *);
520 static void wm_txintr(struct wm_softc *);
521 static void wm_rxintr(struct wm_softc *);
522 static void wm_linkintr(struct wm_softc *, uint32_t);
523
524 static void wm_tbi_mediainit(struct wm_softc *);
525 static int wm_tbi_mediachange(struct ifnet *);
526 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
527
528 static void wm_tbi_set_linkled(struct wm_softc *);
529 static void wm_tbi_check_link(struct wm_softc *);
530
531 static void wm_gmii_reset(struct wm_softc *);
532
533 static int wm_gmii_i82543_readreg(device_t, int, int);
534 static void wm_gmii_i82543_writereg(device_t, int, int, int);
535
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538
539 static int wm_gmii_i80003_readreg(device_t, int, int);
540 static void wm_gmii_i80003_writereg(device_t, int, int, int);
541 static int wm_gmii_bm_readreg(device_t, int, int);
542 static void wm_gmii_bm_writereg(device_t, int, int, int);
543 static int wm_gmii_hv_readreg(device_t, int, int);
544 static void wm_gmii_hv_writereg(device_t, int, int, int);
545 static int wm_sgmii_readreg(device_t, int, int);
546 static void wm_sgmii_writereg(device_t, int, int, int);
547
548 static void wm_gmii_statchg(struct ifnet *);
549
550 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
551 static int wm_gmii_mediachange(struct ifnet *);
552 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
553
554 static int wm_kmrn_readreg(struct wm_softc *, int);
555 static void wm_kmrn_writereg(struct wm_softc *, int, int);
556
557 static void wm_set_spiaddrbits(struct wm_softc *);
558 static int wm_match(device_t, cfdata_t, void *);
559 static void wm_attach(device_t, device_t, void *);
560 static int wm_detach(device_t, int);
561 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
562 static void wm_get_auto_rd_done(struct wm_softc *);
563 static void wm_lan_init_done(struct wm_softc *);
564 static void wm_get_cfg_done(struct wm_softc *);
565 static int wm_get_swsm_semaphore(struct wm_softc *);
566 static void wm_put_swsm_semaphore(struct wm_softc *);
567 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
568 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
569 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
570 static int wm_get_swfwhw_semaphore(struct wm_softc *);
571 static void wm_put_swfwhw_semaphore(struct wm_softc *);
572
573 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
574 static int32_t wm_ich8_cycle_init(struct wm_softc *);
575 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
576 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
577 uint32_t, uint16_t *);
578 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
579 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
580 static void wm_82547_txfifo_stall(void *);
581 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
582 static int wm_check_mng_mode(struct wm_softc *);
583 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
584 static int wm_check_mng_mode_82574(struct wm_softc *);
585 static int wm_check_mng_mode_generic(struct wm_softc *);
586 static int wm_enable_mng_pass_thru(struct wm_softc *);
587 static int wm_check_reset_block(struct wm_softc *);
588 static void wm_get_hw_control(struct wm_softc *);
589 static int wm_check_for_link(struct wm_softc *);
590 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
591 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
592 #ifdef WM_WOL
593 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
594 #endif
595 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
596 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
598 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
599 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
600 static void wm_smbustopci(struct wm_softc *);
601 static void wm_set_pcie_completion_timeout(struct wm_softc *);
602 static void wm_reset_init_script_82575(struct wm_softc *);
603 static void wm_release_manageability(struct wm_softc *);
604 static void wm_release_hw_control(struct wm_softc *);
605 static void wm_get_wakeup(struct wm_softc *);
606 #ifdef WM_WOL
607 static void wm_enable_phy_wakeup(struct wm_softc *);
608 static void wm_enable_wakeup(struct wm_softc *);
609 #endif
610 static void wm_init_manageability(struct wm_softc *);
611 static void wm_set_eee_i350(struct wm_softc *);
612
613 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
614 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
615
616 /*
617 * Devices supported by this driver.
618 */
619 static const struct wm_product {
620 pci_vendor_id_t wmp_vendor;
621 pci_product_id_t wmp_product;
622 const char *wmp_name;
623 wm_chip_type wmp_type;
624 int wmp_flags;
625 #define WMP_F_1000X 0x01
626 #define WMP_F_1000T 0x02
627 #define WMP_F_SERDES 0x04
628 } wm_products[] = {
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
630 "Intel i82542 1000BASE-X Ethernet",
631 WM_T_82542_2_1, WMP_F_1000X },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
634 "Intel i82543GC 1000BASE-X Ethernet",
635 WM_T_82543, WMP_F_1000X },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
638 "Intel i82543GC 1000BASE-T Ethernet",
639 WM_T_82543, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
642 "Intel i82544EI 1000BASE-T Ethernet",
643 WM_T_82544, WMP_F_1000T },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
646 "Intel i82544EI 1000BASE-X Ethernet",
647 WM_T_82544, WMP_F_1000X },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
650 "Intel i82544GC 1000BASE-T Ethernet",
651 WM_T_82544, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
654 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
655 WM_T_82544, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
658 "Intel i82540EM 1000BASE-T Ethernet",
659 WM_T_82540, WMP_F_1000T },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
662 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
663 WM_T_82540, WMP_F_1000T },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
666 "Intel i82540EP 1000BASE-T Ethernet",
667 WM_T_82540, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
670 "Intel i82540EP 1000BASE-T Ethernet",
671 WM_T_82540, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
674 "Intel i82540EP 1000BASE-T Ethernet",
675 WM_T_82540, WMP_F_1000T },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
678 "Intel i82545EM 1000BASE-T Ethernet",
679 WM_T_82545, WMP_F_1000T },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
682 "Intel i82545GM 1000BASE-T Ethernet",
683 WM_T_82545_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
686 "Intel i82545GM 1000BASE-X Ethernet",
687 WM_T_82545_3, WMP_F_1000X },
688 #if 0
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
690 "Intel i82545GM Gigabit Ethernet (SERDES)",
691 WM_T_82545_3, WMP_F_SERDES },
692 #endif
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
694 "Intel i82546EB 1000BASE-T Ethernet",
695 WM_T_82546, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
698 "Intel i82546EB 1000BASE-T Ethernet",
699 WM_T_82546, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
702 "Intel i82545EM 1000BASE-X Ethernet",
703 WM_T_82545, WMP_F_1000X },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
706 "Intel i82546EB 1000BASE-X Ethernet",
707 WM_T_82546, WMP_F_1000X },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
710 "Intel i82546GB 1000BASE-T Ethernet",
711 WM_T_82546_3, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
714 "Intel i82546GB 1000BASE-X Ethernet",
715 WM_T_82546_3, WMP_F_1000X },
716 #if 0
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
718 "Intel i82546GB Gigabit Ethernet (SERDES)",
719 WM_T_82546_3, WMP_F_SERDES },
720 #endif
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
722 "i82546GB quad-port Gigabit Ethernet",
723 WM_T_82546_3, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
726 "i82546GB quad-port Gigabit Ethernet (KSP3)",
727 WM_T_82546_3, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
730 "Intel PRO/1000MT (82546GB)",
731 WM_T_82546_3, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
734 "Intel i82541EI 1000BASE-T Ethernet",
735 WM_T_82541, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
738 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
739 WM_T_82541, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
742 "Intel i82541EI Mobile 1000BASE-T Ethernet",
743 WM_T_82541, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
746 "Intel i82541ER 1000BASE-T Ethernet",
747 WM_T_82541_2, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
750 "Intel i82541GI 1000BASE-T Ethernet",
751 WM_T_82541_2, WMP_F_1000T },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
754 "Intel i82541GI Mobile 1000BASE-T Ethernet",
755 WM_T_82541_2, WMP_F_1000T },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
758 "Intel i82541PI 1000BASE-T Ethernet",
759 WM_T_82541_2, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
762 "Intel i82547EI 1000BASE-T Ethernet",
763 WM_T_82547, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
766 "Intel i82547EI Mobile 1000BASE-T Ethernet",
767 WM_T_82547, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
770 "Intel i82547GI 1000BASE-T Ethernet",
771 WM_T_82547_2, WMP_F_1000T },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
774 "Intel PRO/1000 PT (82571EB)",
775 WM_T_82571, WMP_F_1000T },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
778 "Intel PRO/1000 PF (82571EB)",
779 WM_T_82571, WMP_F_1000X },
780 #if 0
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
782 "Intel PRO/1000 PB (82571EB)",
783 WM_T_82571, WMP_F_SERDES },
784 #endif
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
786 "Intel PRO/1000 QT (82571EB)",
787 WM_T_82571, WMP_F_1000T },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
790 "Intel i82572EI 1000baseT Ethernet",
791 WM_T_82572, WMP_F_1000T },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
794 "Intel PRO/1000 PT Quad Port Server Adapter",
795 WM_T_82571, WMP_F_1000T, },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
798 "Intel i82572EI 1000baseX Ethernet",
799 WM_T_82572, WMP_F_1000X },
800 #if 0
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
802 "Intel i82572EI Gigabit Ethernet (SERDES)",
803 WM_T_82572, WMP_F_SERDES },
804 #endif
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
807 "Intel i82572EI 1000baseT Ethernet",
808 WM_T_82572, WMP_F_1000T },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
811 "Intel i82573E",
812 WM_T_82573, WMP_F_1000T },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
815 "Intel i82573E IAMT",
816 WM_T_82573, WMP_F_1000T },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
819 "Intel i82573L Gigabit Ethernet",
820 WM_T_82573, WMP_F_1000T },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
823 "Intel i82574L",
824 WM_T_82574, WMP_F_1000T },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
827 "Intel i82583V",
828 WM_T_82583, WMP_F_1000T },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
831 "i80003 dual 1000baseT Ethernet",
832 WM_T_80003, WMP_F_1000T },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
835 "i80003 dual 1000baseX Ethernet",
836 WM_T_80003, WMP_F_1000T },
837 #if 0
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
839 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
840 WM_T_80003, WMP_F_SERDES },
841 #endif
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
844 "Intel i80003 1000baseT Ethernet",
845 WM_T_80003, WMP_F_1000T },
846 #if 0
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
848 "Intel i80003 Gigabit Ethernet (SERDES)",
849 WM_T_80003, WMP_F_SERDES },
850 #endif
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
852 "Intel i82801H (M_AMT) LAN Controller",
853 WM_T_ICH8, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
855 "Intel i82801H (AMT) LAN Controller",
856 WM_T_ICH8, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
858 "Intel i82801H LAN Controller",
859 WM_T_ICH8, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
861 "Intel i82801H (IFE) LAN Controller",
862 WM_T_ICH8, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
864 "Intel i82801H (M) LAN Controller",
865 WM_T_ICH8, WMP_F_1000T },
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
867 "Intel i82801H IFE (GT) LAN Controller",
868 WM_T_ICH8, WMP_F_1000T },
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
870 "Intel i82801H IFE (G) LAN Controller",
871 WM_T_ICH8, WMP_F_1000T },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
873 "82801I (AMT) LAN Controller",
874 WM_T_ICH9, WMP_F_1000T },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
876 "82801I LAN Controller",
877 WM_T_ICH9, WMP_F_1000T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
879 "82801I (G) LAN Controller",
880 WM_T_ICH9, WMP_F_1000T },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
882 "82801I (GT) LAN Controller",
883 WM_T_ICH9, WMP_F_1000T },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
885 "82801I (C) LAN Controller",
886 WM_T_ICH9, WMP_F_1000T },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
888 "82801I mobile LAN Controller",
889 WM_T_ICH9, WMP_F_1000T },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
891 "82801I mobile (V) LAN Controller",
892 WM_T_ICH9, WMP_F_1000T },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
894 "82801I mobile (AMT) LAN Controller",
895 WM_T_ICH9, WMP_F_1000T },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
897 "82567LM-4 LAN Controller",
898 WM_T_ICH9, WMP_F_1000T },
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
900 "82567V-3 LAN Controller",
901 WM_T_ICH9, WMP_F_1000T },
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
903 "82567LM-2 LAN Controller",
904 WM_T_ICH10, WMP_F_1000T },
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
906 "82567LF-2 LAN Controller",
907 WM_T_ICH10, WMP_F_1000T },
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
909 "82567LM-3 LAN Controller",
910 WM_T_ICH10, WMP_F_1000T },
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
912 "82567LF-3 LAN Controller",
913 WM_T_ICH10, WMP_F_1000T },
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
915 "82567V-2 LAN Controller",
916 WM_T_ICH10, WMP_F_1000T },
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
918 "82567V-3? LAN Controller",
919 WM_T_ICH10, WMP_F_1000T },
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
921 "HANKSVILLE LAN Controller",
922 WM_T_ICH10, WMP_F_1000T },
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
924 "PCH LAN (82577LM) Controller",
925 WM_T_PCH, WMP_F_1000T },
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
927 "PCH LAN (82577LC) Controller",
928 WM_T_PCH, WMP_F_1000T },
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
930 "PCH LAN (82578DM) Controller",
931 WM_T_PCH, WMP_F_1000T },
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
933 "PCH LAN (82578DC) Controller",
934 WM_T_PCH, WMP_F_1000T },
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
936 "PCH2 LAN (82579LM) Controller",
937 WM_T_PCH2, WMP_F_1000T },
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
939 "PCH2 LAN (82579V) Controller",
940 WM_T_PCH2, WMP_F_1000T },
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
942 "82575EB dual-1000baseT Ethernet",
943 WM_T_82575, WMP_F_1000T },
944 #if 0
945 /*
946 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
947 * disabled for now ...
948 */
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
950 "82575EB dual-1000baseX Ethernet (SERDES)",
951 WM_T_82575, WMP_F_SERDES },
952 #endif
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
954 "82575GB quad-1000baseT Ethernet",
955 WM_T_82575, WMP_F_1000T },
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
957 "82575GB quad-1000baseT Ethernet (PM)",
958 WM_T_82575, WMP_F_1000T },
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
960 "82576 1000BaseT Ethernet",
961 WM_T_82576, WMP_F_1000T },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
963 "82576 1000BaseX Ethernet",
964 WM_T_82576, WMP_F_1000X },
965 #if 0
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
967 "82576 gigabit Ethernet (SERDES)",
968 WM_T_82576, WMP_F_SERDES },
969 #endif
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
971 "82576 quad-1000BaseT Ethernet",
972 WM_T_82576, WMP_F_1000T },
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
974 "82576 gigabit Ethernet",
975 WM_T_82576, WMP_F_1000T },
976 #if 0
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
978 "82576 gigabit Ethernet (SERDES)",
979 WM_T_82576, WMP_F_SERDES },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
981 "82576 quad-gigabit Ethernet (SERDES)",
982 WM_T_82576, WMP_F_SERDES },
983 #endif
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
985 "82580 1000BaseT Ethernet",
986 WM_T_82580, WMP_F_1000T },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
988 "82580 1000BaseX Ethernet",
989 WM_T_82580, WMP_F_1000X },
990 #if 0
991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
992 "82580 1000BaseT Ethernet (SERDES)",
993 WM_T_82580, WMP_F_SERDES },
994 #endif
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
996 "82580 gigabit Ethernet (SGMII)",
997 WM_T_82580, WMP_F_1000T },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
999 "82580 dual-1000BaseT Ethernet",
1000 WM_T_82580, WMP_F_1000T },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1002 "82580 1000BaseT Ethernet",
1003 WM_T_82580ER, WMP_F_1000T },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1005 "82580 dual-1000BaseT Ethernet",
1006 WM_T_82580ER, WMP_F_1000T },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1008 "82580 quad-1000BaseX Ethernet",
1009 WM_T_82580, WMP_F_1000X },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1011 "I350 Gigabit Network Connection",
1012 WM_T_I350, WMP_F_1000T },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1014 "I350 Gigabit Fiber Network Connection",
1015 WM_T_I350, WMP_F_1000X },
1016 #if 0
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1018 "I350 Gigabit Backplane Connection",
1019 WM_T_I350, WMP_F_SERDES },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1021 "I350 Gigabit Connection",
1022 WM_T_I350, WMP_F_1000T },
1023 #endif
1024 { 0, 0,
1025 NULL,
1026 0, 0 },
1027 };
1028
1029 #ifdef WM_EVENT_COUNTERS
1030 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1031 #endif /* WM_EVENT_COUNTERS */
1032
1033 #if 0 /* Not currently used */
1034 static inline uint32_t
1035 wm_io_read(struct wm_softc *sc, int reg)
1036 {
1037
1038 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1039 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1040 }
1041 #endif
1042
1043 static inline void
1044 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1045 {
1046
1047 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1048 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1049 }
1050
1051 static inline void
1052 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1053 uint32_t data)
1054 {
1055 uint32_t regval;
1056 int i;
1057
1058 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1059
1060 CSR_WRITE(sc, reg, regval);
1061
1062 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1063 delay(5);
1064 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1065 break;
1066 }
1067 if (i == SCTL_CTL_POLL_TIMEOUT) {
1068 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1069 device_xname(sc->sc_dev), reg);
1070 }
1071 }
1072
1073 static inline void
1074 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1075 {
1076 wa->wa_low = htole32(v & 0xffffffffU);
1077 if (sizeof(bus_addr_t) == 8)
1078 wa->wa_high = htole32((uint64_t) v >> 32);
1079 else
1080 wa->wa_high = 0;
1081 }
1082
1083 static void
1084 wm_set_spiaddrbits(struct wm_softc *sc)
1085 {
1086 uint32_t reg;
1087
1088 sc->sc_flags |= WM_F_EEPROM_SPI;
1089 reg = CSR_READ(sc, WMREG_EECD);
1090 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1091 }
1092
1093 static const struct wm_product *
1094 wm_lookup(const struct pci_attach_args *pa)
1095 {
1096 const struct wm_product *wmp;
1097
1098 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1099 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1100 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1101 return wmp;
1102 }
1103 return NULL;
1104 }
1105
1106 static int
1107 wm_match(device_t parent, cfdata_t cf, void *aux)
1108 {
1109 struct pci_attach_args *pa = aux;
1110
1111 if (wm_lookup(pa) != NULL)
1112 return 1;
1113
1114 return 0;
1115 }
1116
1117 static void
1118 wm_attach(device_t parent, device_t self, void *aux)
1119 {
1120 struct wm_softc *sc = device_private(self);
1121 struct pci_attach_args *pa = aux;
1122 prop_dictionary_t dict;
1123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1124 pci_chipset_tag_t pc = pa->pa_pc;
1125 pci_intr_handle_t ih;
1126 const char *intrstr = NULL;
1127 const char *eetype, *xname;
1128 bus_space_tag_t memt;
1129 bus_space_handle_t memh;
1130 bus_size_t memsize;
1131 int memh_valid;
1132 int i, error;
1133 const struct wm_product *wmp;
1134 prop_data_t ea;
1135 prop_number_t pn;
1136 uint8_t enaddr[ETHER_ADDR_LEN];
1137 uint16_t cfg1, cfg2, swdpin, io3;
1138 pcireg_t preg, memtype;
1139 uint16_t eeprom_data, apme_mask;
1140 uint32_t reg;
1141
1142 sc->sc_dev = self;
1143 callout_init(&sc->sc_tick_ch, 0);
1144
1145 sc->sc_wmp = wmp = wm_lookup(pa);
1146 if (wmp == NULL) {
1147 printf("\n");
1148 panic("wm_attach: impossible");
1149 }
1150
1151 sc->sc_pc = pa->pa_pc;
1152 sc->sc_pcitag = pa->pa_tag;
1153
1154 if (pci_dma64_available(pa))
1155 sc->sc_dmat = pa->pa_dmat64;
1156 else
1157 sc->sc_dmat = pa->pa_dmat;
1158
1159 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1160 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1161
1162 sc->sc_type = wmp->wmp_type;
1163 if (sc->sc_type < WM_T_82543) {
1164 if (sc->sc_rev < 2) {
1165 aprint_error_dev(sc->sc_dev,
1166 "i82542 must be at least rev. 2\n");
1167 return;
1168 }
1169 if (sc->sc_rev < 3)
1170 sc->sc_type = WM_T_82542_2_0;
1171 }
1172
1173 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1174 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1175 || (sc->sc_type == WM_T_I350))
1176 sc->sc_flags |= WM_F_NEWQUEUE;
1177
1178 /* Set device properties (mactype) */
1179 dict = device_properties(sc->sc_dev);
1180 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1181
1182 /*
1183 * Map the device. All devices support memory-mapped acccess,
1184 * and it is really required for normal operation.
1185 */
1186 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1187 switch (memtype) {
1188 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1189 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1190 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1191 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1192 break;
1193 default:
1194 memh_valid = 0;
1195 break;
1196 }
1197
1198 if (memh_valid) {
1199 sc->sc_st = memt;
1200 sc->sc_sh = memh;
1201 sc->sc_ss = memsize;
1202 } else {
1203 aprint_error_dev(sc->sc_dev,
1204 "unable to map device registers\n");
1205 return;
1206 }
1207
1208 wm_get_wakeup(sc);
1209
1210 /*
1211 * In addition, i82544 and later support I/O mapped indirect
1212 * register access. It is not desirable (nor supported in
1213 * this driver) to use it for normal operation, though it is
1214 * required to work around bugs in some chip versions.
1215 */
1216 if (sc->sc_type >= WM_T_82544) {
1217 /* First we have to find the I/O BAR. */
1218 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1219 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1220 PCI_MAPREG_TYPE_IO)
1221 break;
1222 }
1223 if (i != PCI_MAPREG_END) {
1224 /*
1225 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1226 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1227 * It's no problem because newer chips has no this
1228 * bug.
1229 *
1230 * The i8254x doesn't apparently respond when the
1231 * I/O BAR is 0, which looks somewhat like it's not
1232 * been configured.
1233 */
1234 preg = pci_conf_read(pc, pa->pa_tag, i);
1235 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1236 aprint_error_dev(sc->sc_dev,
1237 "WARNING: I/O BAR at zero.\n");
1238 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1239 0, &sc->sc_iot, &sc->sc_ioh,
1240 NULL, &sc->sc_ios) == 0) {
1241 sc->sc_flags |= WM_F_IOH_VALID;
1242 } else {
1243 aprint_error_dev(sc->sc_dev,
1244 "WARNING: unable to map I/O space\n");
1245 }
1246 }
1247
1248 }
1249
1250 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1251 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1252 preg |= PCI_COMMAND_MASTER_ENABLE;
1253 if (sc->sc_type < WM_T_82542_2_1)
1254 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1255 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1256
1257 /* power up chip */
1258 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1259 NULL)) && error != EOPNOTSUPP) {
1260 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1261 return;
1262 }
1263
1264 /*
1265 * Map and establish our interrupt.
1266 */
1267 if (pci_intr_map(pa, &ih)) {
1268 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1269 return;
1270 }
1271 intrstr = pci_intr_string(pc, ih);
1272 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1273 if (sc->sc_ih == NULL) {
1274 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1275 if (intrstr != NULL)
1276 aprint_error(" at %s", intrstr);
1277 aprint_error("\n");
1278 return;
1279 }
1280 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1281
1282 /*
1283 * Check the function ID (unit number of the chip).
1284 */
1285 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1286 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1287 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1288 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1289 || (sc->sc_type == WM_T_I350))
1290 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1291 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1292 else
1293 sc->sc_funcid = 0;
1294
1295 /*
1296 * Determine a few things about the bus we're connected to.
1297 */
1298 if (sc->sc_type < WM_T_82543) {
1299 /* We don't really know the bus characteristics here. */
1300 sc->sc_bus_speed = 33;
1301 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1302 /*
1303 * CSA (Communication Streaming Architecture) is about as fast
1304 * a 32-bit 66MHz PCI Bus.
1305 */
1306 sc->sc_flags |= WM_F_CSA;
1307 sc->sc_bus_speed = 66;
1308 aprint_verbose_dev(sc->sc_dev,
1309 "Communication Streaming Architecture\n");
1310 if (sc->sc_type == WM_T_82547) {
1311 callout_init(&sc->sc_txfifo_ch, 0);
1312 callout_setfunc(&sc->sc_txfifo_ch,
1313 wm_82547_txfifo_stall, sc);
1314 aprint_verbose_dev(sc->sc_dev,
1315 "using 82547 Tx FIFO stall work-around\n");
1316 }
1317 } else if (sc->sc_type >= WM_T_82571) {
1318 sc->sc_flags |= WM_F_PCIE;
1319 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1320 && (sc->sc_type != WM_T_ICH10)
1321 && (sc->sc_type != WM_T_PCH)
1322 && (sc->sc_type != WM_T_PCH2)) {
1323 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1324 /* ICH* and PCH* have no PCIe capability registers */
1325 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1326 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1327 NULL) == 0)
1328 aprint_error_dev(sc->sc_dev,
1329 "unable to find PCIe capability\n");
1330 }
1331 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1332 } else {
1333 reg = CSR_READ(sc, WMREG_STATUS);
1334 if (reg & STATUS_BUS64)
1335 sc->sc_flags |= WM_F_BUS64;
1336 if ((reg & STATUS_PCIX_MODE) != 0) {
1337 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1338
1339 sc->sc_flags |= WM_F_PCIX;
1340 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1341 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1342 aprint_error_dev(sc->sc_dev,
1343 "unable to find PCIX capability\n");
1344 else if (sc->sc_type != WM_T_82545_3 &&
1345 sc->sc_type != WM_T_82546_3) {
1346 /*
1347 * Work around a problem caused by the BIOS
1348 * setting the max memory read byte count
1349 * incorrectly.
1350 */
1351 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1352 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1353 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1354 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1355
1356 bytecnt =
1357 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1358 PCI_PCIX_CMD_BYTECNT_SHIFT;
1359 maxb =
1360 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1361 PCI_PCIX_STATUS_MAXB_SHIFT;
1362 if (bytecnt > maxb) {
1363 aprint_verbose_dev(sc->sc_dev,
1364 "resetting PCI-X MMRBC: %d -> %d\n",
1365 512 << bytecnt, 512 << maxb);
1366 pcix_cmd = (pcix_cmd &
1367 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1368 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1369 pci_conf_write(pa->pa_pc, pa->pa_tag,
1370 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1371 pcix_cmd);
1372 }
1373 }
1374 }
1375 /*
1376 * The quad port adapter is special; it has a PCIX-PCIX
1377 * bridge on the board, and can run the secondary bus at
1378 * a higher speed.
1379 */
1380 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1381 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1382 : 66;
1383 } else if (sc->sc_flags & WM_F_PCIX) {
1384 switch (reg & STATUS_PCIXSPD_MASK) {
1385 case STATUS_PCIXSPD_50_66:
1386 sc->sc_bus_speed = 66;
1387 break;
1388 case STATUS_PCIXSPD_66_100:
1389 sc->sc_bus_speed = 100;
1390 break;
1391 case STATUS_PCIXSPD_100_133:
1392 sc->sc_bus_speed = 133;
1393 break;
1394 default:
1395 aprint_error_dev(sc->sc_dev,
1396 "unknown PCIXSPD %d; assuming 66MHz\n",
1397 reg & STATUS_PCIXSPD_MASK);
1398 sc->sc_bus_speed = 66;
1399 break;
1400 }
1401 } else
1402 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1403 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1404 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1405 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1406 }
1407
1408 /*
1409 * Allocate the control data structures, and create and load the
1410 * DMA map for it.
1411 *
1412 * NOTE: All Tx descriptors must be in the same 4G segment of
1413 * memory. So must Rx descriptors. We simplify by allocating
1414 * both sets within the same 4G segment.
1415 */
1416 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1417 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1418 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1419 sizeof(struct wm_control_data_82542) :
1420 sizeof(struct wm_control_data_82544);
1421 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1422 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1423 &sc->sc_cd_rseg, 0)) != 0) {
1424 aprint_error_dev(sc->sc_dev,
1425 "unable to allocate control data, error = %d\n",
1426 error);
1427 goto fail_0;
1428 }
1429
1430 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1431 sc->sc_cd_rseg, sc->sc_cd_size,
1432 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1433 aprint_error_dev(sc->sc_dev,
1434 "unable to map control data, error = %d\n", error);
1435 goto fail_1;
1436 }
1437
1438 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1439 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1440 aprint_error_dev(sc->sc_dev,
1441 "unable to create control data DMA map, error = %d\n",
1442 error);
1443 goto fail_2;
1444 }
1445
1446 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1447 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1448 aprint_error_dev(sc->sc_dev,
1449 "unable to load control data DMA map, error = %d\n",
1450 error);
1451 goto fail_3;
1452 }
1453
1454 /*
1455 * Create the transmit buffer DMA maps.
1456 */
1457 WM_TXQUEUELEN(sc) =
1458 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1459 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1460 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1461 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1462 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1463 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1464 aprint_error_dev(sc->sc_dev,
1465 "unable to create Tx DMA map %d, error = %d\n",
1466 i, error);
1467 goto fail_4;
1468 }
1469 }
1470
1471 /*
1472 * Create the receive buffer DMA maps.
1473 */
1474 for (i = 0; i < WM_NRXDESC; i++) {
1475 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1476 MCLBYTES, 0, 0,
1477 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1478 aprint_error_dev(sc->sc_dev,
1479 "unable to create Rx DMA map %d error = %d\n",
1480 i, error);
1481 goto fail_5;
1482 }
1483 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1484 }
1485
1486 /* clear interesting stat counters */
1487 CSR_READ(sc, WMREG_COLC);
1488 CSR_READ(sc, WMREG_RXERRC);
1489
1490 /* get PHY control from SMBus to PCIe */
1491 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1492 wm_smbustopci(sc);
1493
1494 /*
1495 * Reset the chip to a known state.
1496 */
1497 wm_reset(sc);
1498
1499 switch (sc->sc_type) {
1500 case WM_T_82571:
1501 case WM_T_82572:
1502 case WM_T_82573:
1503 case WM_T_82574:
1504 case WM_T_82583:
1505 case WM_T_80003:
1506 case WM_T_ICH8:
1507 case WM_T_ICH9:
1508 case WM_T_ICH10:
1509 case WM_T_PCH:
1510 case WM_T_PCH2:
1511 if (wm_check_mng_mode(sc) != 0)
1512 wm_get_hw_control(sc);
1513 break;
1514 default:
1515 break;
1516 }
1517
1518 /*
1519 * Get some information about the EEPROM.
1520 */
1521 switch (sc->sc_type) {
1522 case WM_T_82542_2_0:
1523 case WM_T_82542_2_1:
1524 case WM_T_82543:
1525 case WM_T_82544:
1526 /* Microwire */
1527 sc->sc_ee_addrbits = 6;
1528 break;
1529 case WM_T_82540:
1530 case WM_T_82545:
1531 case WM_T_82545_3:
1532 case WM_T_82546:
1533 case WM_T_82546_3:
1534 /* Microwire */
1535 reg = CSR_READ(sc, WMREG_EECD);
1536 if (reg & EECD_EE_SIZE)
1537 sc->sc_ee_addrbits = 8;
1538 else
1539 sc->sc_ee_addrbits = 6;
1540 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1541 break;
1542 case WM_T_82541:
1543 case WM_T_82541_2:
1544 case WM_T_82547:
1545 case WM_T_82547_2:
1546 reg = CSR_READ(sc, WMREG_EECD);
1547 if (reg & EECD_EE_TYPE) {
1548 /* SPI */
1549 wm_set_spiaddrbits(sc);
1550 } else
1551 /* Microwire */
1552 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1553 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1554 break;
1555 case WM_T_82571:
1556 case WM_T_82572:
1557 /* SPI */
1558 wm_set_spiaddrbits(sc);
1559 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1560 break;
1561 case WM_T_82573:
1562 case WM_T_82574:
1563 case WM_T_82583:
1564 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1565 sc->sc_flags |= WM_F_EEPROM_FLASH;
1566 else {
1567 /* SPI */
1568 wm_set_spiaddrbits(sc);
1569 }
1570 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1571 break;
1572 case WM_T_82575:
1573 case WM_T_82576:
1574 case WM_T_82580:
1575 case WM_T_82580ER:
1576 case WM_T_I350:
1577 case WM_T_80003:
1578 /* SPI */
1579 wm_set_spiaddrbits(sc);
1580 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1581 break;
1582 case WM_T_ICH8:
1583 case WM_T_ICH9:
1584 case WM_T_ICH10:
1585 case WM_T_PCH:
1586 case WM_T_PCH2:
1587 /* FLASH */
1588 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1589 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1590 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1591 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1592 aprint_error_dev(sc->sc_dev,
1593 "can't map FLASH registers\n");
1594 return;
1595 }
1596 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1597 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1598 ICH_FLASH_SECTOR_SIZE;
1599 sc->sc_ich8_flash_bank_size =
1600 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1601 sc->sc_ich8_flash_bank_size -=
1602 (reg & ICH_GFPREG_BASE_MASK);
1603 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1604 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1605 break;
1606 default:
1607 break;
1608 }
1609
1610 /*
1611 * Defer printing the EEPROM type until after verifying the checksum
1612 * This allows the EEPROM type to be printed correctly in the case
1613 * that no EEPROM is attached.
1614 */
1615 /*
1616 * Validate the EEPROM checksum. If the checksum fails, flag
1617 * this for later, so we can fail future reads from the EEPROM.
1618 */
1619 if (wm_validate_eeprom_checksum(sc)) {
1620 /*
1621 * Read twice again because some PCI-e parts fail the
1622 * first check due to the link being in sleep state.
1623 */
1624 if (wm_validate_eeprom_checksum(sc))
1625 sc->sc_flags |= WM_F_EEPROM_INVALID;
1626 }
1627
1628 /* Set device properties (macflags) */
1629 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1630
1631 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1632 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1633 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1634 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1635 } else {
1636 if (sc->sc_flags & WM_F_EEPROM_SPI)
1637 eetype = "SPI";
1638 else
1639 eetype = "MicroWire";
1640 aprint_verbose_dev(sc->sc_dev,
1641 "%u word (%d address bits) %s EEPROM\n",
1642 1U << sc->sc_ee_addrbits,
1643 sc->sc_ee_addrbits, eetype);
1644 }
1645
1646 /*
1647 * Read the Ethernet address from the EEPROM, if not first found
1648 * in device properties.
1649 */
1650 ea = prop_dictionary_get(dict, "mac-address");
1651 if (ea != NULL) {
1652 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1653 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1654 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1655 } else {
1656 if (wm_read_mac_addr(sc, enaddr) != 0) {
1657 aprint_error_dev(sc->sc_dev,
1658 "unable to read Ethernet address\n");
1659 return;
1660 }
1661 }
1662
1663 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1664 ether_sprintf(enaddr));
1665
1666 /*
1667 * Read the config info from the EEPROM, and set up various
1668 * bits in the control registers based on their contents.
1669 */
1670 pn = prop_dictionary_get(dict, "i82543-cfg1");
1671 if (pn != NULL) {
1672 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1673 cfg1 = (uint16_t) prop_number_integer_value(pn);
1674 } else {
1675 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1676 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1677 return;
1678 }
1679 }
1680
1681 pn = prop_dictionary_get(dict, "i82543-cfg2");
1682 if (pn != NULL) {
1683 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1684 cfg2 = (uint16_t) prop_number_integer_value(pn);
1685 } else {
1686 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1687 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1688 return;
1689 }
1690 }
1691
1692 /* check for WM_F_WOL */
1693 switch (sc->sc_type) {
1694 case WM_T_82542_2_0:
1695 case WM_T_82542_2_1:
1696 case WM_T_82543:
1697 /* dummy? */
1698 eeprom_data = 0;
1699 apme_mask = EEPROM_CFG3_APME;
1700 break;
1701 case WM_T_82544:
1702 apme_mask = EEPROM_CFG2_82544_APM_EN;
1703 eeprom_data = cfg2;
1704 break;
1705 case WM_T_82546:
1706 case WM_T_82546_3:
1707 case WM_T_82571:
1708 case WM_T_82572:
1709 case WM_T_82573:
1710 case WM_T_82574:
1711 case WM_T_82583:
1712 case WM_T_80003:
1713 default:
1714 apme_mask = EEPROM_CFG3_APME;
1715 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1716 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1717 break;
1718 case WM_T_82575:
1719 case WM_T_82576:
1720 case WM_T_82580:
1721 case WM_T_82580ER:
1722 case WM_T_I350:
1723 case WM_T_ICH8:
1724 case WM_T_ICH9:
1725 case WM_T_ICH10:
1726 case WM_T_PCH:
1727 case WM_T_PCH2:
1728 /* XXX The funcid should be checked on some devices */
1729 apme_mask = WUC_APME;
1730 eeprom_data = CSR_READ(sc, WMREG_WUC);
1731 break;
1732 }
1733
1734 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1735 if ((eeprom_data & apme_mask) != 0)
1736 sc->sc_flags |= WM_F_WOL;
1737 #ifdef WM_DEBUG
1738 if ((sc->sc_flags & WM_F_WOL) != 0)
1739 printf("WOL\n");
1740 #endif
1741
1742 /*
1743 * XXX need special handling for some multiple port cards
1744 * to disable a paticular port.
1745 */
1746
1747 if (sc->sc_type >= WM_T_82544) {
1748 pn = prop_dictionary_get(dict, "i82543-swdpin");
1749 if (pn != NULL) {
1750 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1751 swdpin = (uint16_t) prop_number_integer_value(pn);
1752 } else {
1753 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1754 aprint_error_dev(sc->sc_dev,
1755 "unable to read SWDPIN\n");
1756 return;
1757 }
1758 }
1759 }
1760
1761 if (cfg1 & EEPROM_CFG1_ILOS)
1762 sc->sc_ctrl |= CTRL_ILOS;
1763 if (sc->sc_type >= WM_T_82544) {
1764 sc->sc_ctrl |=
1765 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1766 CTRL_SWDPIO_SHIFT;
1767 sc->sc_ctrl |=
1768 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1769 CTRL_SWDPINS_SHIFT;
1770 } else {
1771 sc->sc_ctrl |=
1772 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1773 CTRL_SWDPIO_SHIFT;
1774 }
1775
1776 #if 0
1777 if (sc->sc_type >= WM_T_82544) {
1778 if (cfg1 & EEPROM_CFG1_IPS0)
1779 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1780 if (cfg1 & EEPROM_CFG1_IPS1)
1781 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1782 sc->sc_ctrl_ext |=
1783 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1784 CTRL_EXT_SWDPIO_SHIFT;
1785 sc->sc_ctrl_ext |=
1786 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1787 CTRL_EXT_SWDPINS_SHIFT;
1788 } else {
1789 sc->sc_ctrl_ext |=
1790 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1791 CTRL_EXT_SWDPIO_SHIFT;
1792 }
1793 #endif
1794
1795 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1796 #if 0
1797 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1798 #endif
1799
1800 /*
1801 * Set up some register offsets that are different between
1802 * the i82542 and the i82543 and later chips.
1803 */
1804 if (sc->sc_type < WM_T_82543) {
1805 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1806 sc->sc_tdt_reg = WMREG_OLD_TDT;
1807 } else {
1808 sc->sc_rdt_reg = WMREG_RDT;
1809 sc->sc_tdt_reg = WMREG_TDT;
1810 }
1811
1812 if (sc->sc_type == WM_T_PCH) {
1813 uint16_t val;
1814
1815 /* Save the NVM K1 bit setting */
1816 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1817
1818 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1819 sc->sc_nvm_k1_enabled = 1;
1820 else
1821 sc->sc_nvm_k1_enabled = 0;
1822 }
1823
1824 /*
1825 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1826 * media structures accordingly.
1827 */
1828 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1829 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1830 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1831 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1832 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1833 wm_gmii_mediainit(sc, wmp->wmp_product);
1834 } else if (sc->sc_type < WM_T_82543 ||
1835 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1836 if (wmp->wmp_flags & WMP_F_1000T)
1837 aprint_error_dev(sc->sc_dev,
1838 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1839 wm_tbi_mediainit(sc);
1840 } else {
1841 switch (sc->sc_type) {
1842 case WM_T_82575:
1843 case WM_T_82576:
1844 case WM_T_82580:
1845 case WM_T_82580ER:
1846 case WM_T_I350:
1847 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1848 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1849 case CTRL_EXT_LINK_MODE_SGMII:
1850 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1851 sc->sc_flags |= WM_F_SGMII;
1852 CSR_WRITE(sc, WMREG_CTRL_EXT,
1853 reg | CTRL_EXT_I2C_ENA);
1854 wm_gmii_mediainit(sc, wmp->wmp_product);
1855 break;
1856 case CTRL_EXT_LINK_MODE_1000KX:
1857 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1858 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1859 CSR_WRITE(sc, WMREG_CTRL_EXT,
1860 reg | CTRL_EXT_I2C_ENA);
1861 panic("not supported yet\n");
1862 break;
1863 case CTRL_EXT_LINK_MODE_GMII:
1864 default:
1865 CSR_WRITE(sc, WMREG_CTRL_EXT,
1866 reg & ~CTRL_EXT_I2C_ENA);
1867 wm_gmii_mediainit(sc, wmp->wmp_product);
1868 break;
1869 }
1870 break;
1871 default:
1872 if (wmp->wmp_flags & WMP_F_1000X)
1873 aprint_error_dev(sc->sc_dev,
1874 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1875 wm_gmii_mediainit(sc, wmp->wmp_product);
1876 }
1877 }
1878
1879 ifp = &sc->sc_ethercom.ec_if;
1880 xname = device_xname(sc->sc_dev);
1881 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1882 ifp->if_softc = sc;
1883 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1884 ifp->if_ioctl = wm_ioctl;
1885 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1886 ifp->if_start = wm_nq_start;
1887 else
1888 ifp->if_start = wm_start;
1889 ifp->if_watchdog = wm_watchdog;
1890 ifp->if_init = wm_init;
1891 ifp->if_stop = wm_stop;
1892 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1893 IFQ_SET_READY(&ifp->if_snd);
1894
1895 /* Check for jumbo frame */
1896 switch (sc->sc_type) {
1897 case WM_T_82573:
1898 /* XXX limited to 9234 if ASPM is disabled */
1899 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1900 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1901 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1902 break;
1903 case WM_T_82571:
1904 case WM_T_82572:
1905 case WM_T_82574:
1906 case WM_T_82575:
1907 case WM_T_82576:
1908 case WM_T_82580:
1909 case WM_T_82580ER:
1910 case WM_T_I350:
1911 case WM_T_80003:
1912 case WM_T_ICH9:
1913 case WM_T_ICH10:
1914 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1915 /* XXX limited to 9234 */
1916 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1917 break;
1918 case WM_T_PCH:
1919 /* XXX limited to 4096 */
1920 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1921 break;
1922 case WM_T_82542_2_0:
1923 case WM_T_82542_2_1:
1924 case WM_T_82583:
1925 case WM_T_ICH8:
1926 /* No support for jumbo frame */
1927 break;
1928 default:
1929 /* ETHER_MAX_LEN_JUMBO */
1930 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1931 break;
1932 }
1933
1934 /*
1935 * If we're a i82543 or greater, we can support VLANs.
1936 */
1937 if (sc->sc_type >= WM_T_82543)
1938 sc->sc_ethercom.ec_capabilities |=
1939 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1940
1941 /*
1942 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1943 * on i82543 and later.
1944 */
1945 if (sc->sc_type >= WM_T_82543) {
1946 ifp->if_capabilities |=
1947 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1948 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1949 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1950 IFCAP_CSUM_TCPv6_Tx |
1951 IFCAP_CSUM_UDPv6_Tx;
1952 }
1953
1954 /*
1955 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1956 *
1957 * 82541GI (8086:1076) ... no
1958 * 82572EI (8086:10b9) ... yes
1959 */
1960 if (sc->sc_type >= WM_T_82571) {
1961 ifp->if_capabilities |=
1962 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1963 }
1964
1965 /*
1966 * If we're a i82544 or greater (except i82547), we can do
1967 * TCP segmentation offload.
1968 */
1969 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1970 ifp->if_capabilities |= IFCAP_TSOv4;
1971 }
1972
1973 if (sc->sc_type >= WM_T_82571) {
1974 ifp->if_capabilities |= IFCAP_TSOv6;
1975 }
1976
1977 /*
1978 * Attach the interface.
1979 */
1980 if_attach(ifp);
1981 ether_ifattach(ifp, enaddr);
1982 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1983 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1984
1985 #ifdef WM_EVENT_COUNTERS
1986 /* Attach event counters. */
1987 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1988 NULL, xname, "txsstall");
1989 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1990 NULL, xname, "txdstall");
1991 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1992 NULL, xname, "txfifo_stall");
1993 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1994 NULL, xname, "txdw");
1995 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1996 NULL, xname, "txqe");
1997 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1998 NULL, xname, "rxintr");
1999 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2000 NULL, xname, "linkintr");
2001
2002 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2003 NULL, xname, "rxipsum");
2004 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2005 NULL, xname, "rxtusum");
2006 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2007 NULL, xname, "txipsum");
2008 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2009 NULL, xname, "txtusum");
2010 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2011 NULL, xname, "txtusum6");
2012
2013 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2014 NULL, xname, "txtso");
2015 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2016 NULL, xname, "txtso6");
2017 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2018 NULL, xname, "txtsopain");
2019
2020 for (i = 0; i < WM_NTXSEGS; i++) {
2021 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2022 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2023 NULL, xname, wm_txseg_evcnt_names[i]);
2024 }
2025
2026 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2027 NULL, xname, "txdrop");
2028
2029 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2030 NULL, xname, "tu");
2031
2032 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2033 NULL, xname, "tx_xoff");
2034 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2035 NULL, xname, "tx_xon");
2036 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2037 NULL, xname, "rx_xoff");
2038 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2039 NULL, xname, "rx_xon");
2040 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2041 NULL, xname, "rx_macctl");
2042 #endif /* WM_EVENT_COUNTERS */
2043
2044 if (pmf_device_register(self, wm_suspend, wm_resume))
2045 pmf_class_network_register(self, ifp);
2046 else
2047 aprint_error_dev(self, "couldn't establish power handler\n");
2048
2049 return;
2050
2051 /*
2052 * Free any resources we've allocated during the failed attach
2053 * attempt. Do this in reverse order and fall through.
2054 */
2055 fail_5:
2056 for (i = 0; i < WM_NRXDESC; i++) {
2057 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2058 bus_dmamap_destroy(sc->sc_dmat,
2059 sc->sc_rxsoft[i].rxs_dmamap);
2060 }
2061 fail_4:
2062 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2063 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2064 bus_dmamap_destroy(sc->sc_dmat,
2065 sc->sc_txsoft[i].txs_dmamap);
2066 }
2067 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2068 fail_3:
2069 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2070 fail_2:
2071 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2072 sc->sc_cd_size);
2073 fail_1:
2074 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2075 fail_0:
2076 return;
2077 }
2078
2079 static int
2080 wm_detach(device_t self, int flags __unused)
2081 {
2082 struct wm_softc *sc = device_private(self);
2083 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2084 int i, s;
2085
2086 s = splnet();
2087 /* Stop the interface. Callouts are stopped in it. */
2088 wm_stop(ifp, 1);
2089 splx(s);
2090
2091 pmf_device_deregister(self);
2092
2093 /* Tell the firmware about the release */
2094 wm_release_manageability(sc);
2095 wm_release_hw_control(sc);
2096
2097 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2098
2099 /* Delete all remaining media. */
2100 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2101
2102 ether_ifdetach(ifp);
2103 if_detach(ifp);
2104
2105
2106 /* Unload RX dmamaps and free mbufs */
2107 wm_rxdrain(sc);
2108
2109 /* Free dmamap. It's the same as the end of the wm_attach() function */
2110 for (i = 0; i < WM_NRXDESC; i++) {
2111 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2112 bus_dmamap_destroy(sc->sc_dmat,
2113 sc->sc_rxsoft[i].rxs_dmamap);
2114 }
2115 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2116 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2117 bus_dmamap_destroy(sc->sc_dmat,
2118 sc->sc_txsoft[i].txs_dmamap);
2119 }
2120 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2121 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2122 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2123 sc->sc_cd_size);
2124 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2125
2126 /* Disestablish the interrupt handler */
2127 if (sc->sc_ih != NULL) {
2128 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2129 sc->sc_ih = NULL;
2130 }
2131
2132 /* Unmap the registers */
2133 if (sc->sc_ss) {
2134 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2135 sc->sc_ss = 0;
2136 }
2137
2138 if (sc->sc_ios) {
2139 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2140 sc->sc_ios = 0;
2141 }
2142
2143 return 0;
2144 }
2145
2146 /*
2147 * wm_tx_offload:
2148 *
2149 * Set up TCP/IP checksumming parameters for the
2150 * specified packet.
2151 */
2152 static int
2153 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2154 uint8_t *fieldsp)
2155 {
2156 struct mbuf *m0 = txs->txs_mbuf;
2157 struct livengood_tcpip_ctxdesc *t;
2158 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2159 uint32_t ipcse;
2160 struct ether_header *eh;
2161 int offset, iphl;
2162 uint8_t fields;
2163
2164 /*
2165 * XXX It would be nice if the mbuf pkthdr had offset
2166 * fields for the protocol headers.
2167 */
2168
2169 eh = mtod(m0, struct ether_header *);
2170 switch (htons(eh->ether_type)) {
2171 case ETHERTYPE_IP:
2172 case ETHERTYPE_IPV6:
2173 offset = ETHER_HDR_LEN;
2174 break;
2175
2176 case ETHERTYPE_VLAN:
2177 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2178 break;
2179
2180 default:
2181 /*
2182 * Don't support this protocol or encapsulation.
2183 */
2184 *fieldsp = 0;
2185 *cmdp = 0;
2186 return 0;
2187 }
2188
2189 if ((m0->m_pkthdr.csum_flags &
2190 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2191 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2192 } else {
2193 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2194 }
2195 ipcse = offset + iphl - 1;
2196
2197 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2198 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2199 seg = 0;
2200 fields = 0;
2201
2202 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2203 int hlen = offset + iphl;
2204 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2205
2206 if (__predict_false(m0->m_len <
2207 (hlen + sizeof(struct tcphdr)))) {
2208 /*
2209 * TCP/IP headers are not in the first mbuf; we need
2210 * to do this the slow and painful way. Let's just
2211 * hope this doesn't happen very often.
2212 */
2213 struct tcphdr th;
2214
2215 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2216
2217 m_copydata(m0, hlen, sizeof(th), &th);
2218 if (v4) {
2219 struct ip ip;
2220
2221 m_copydata(m0, offset, sizeof(ip), &ip);
2222 ip.ip_len = 0;
2223 m_copyback(m0,
2224 offset + offsetof(struct ip, ip_len),
2225 sizeof(ip.ip_len), &ip.ip_len);
2226 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2227 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2228 } else {
2229 struct ip6_hdr ip6;
2230
2231 m_copydata(m0, offset, sizeof(ip6), &ip6);
2232 ip6.ip6_plen = 0;
2233 m_copyback(m0,
2234 offset + offsetof(struct ip6_hdr, ip6_plen),
2235 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2236 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2237 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2238 }
2239 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2240 sizeof(th.th_sum), &th.th_sum);
2241
2242 hlen += th.th_off << 2;
2243 } else {
2244 /*
2245 * TCP/IP headers are in the first mbuf; we can do
2246 * this the easy way.
2247 */
2248 struct tcphdr *th;
2249
2250 if (v4) {
2251 struct ip *ip =
2252 (void *)(mtod(m0, char *) + offset);
2253 th = (void *)(mtod(m0, char *) + hlen);
2254
2255 ip->ip_len = 0;
2256 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2257 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2258 } else {
2259 struct ip6_hdr *ip6 =
2260 (void *)(mtod(m0, char *) + offset);
2261 th = (void *)(mtod(m0, char *) + hlen);
2262
2263 ip6->ip6_plen = 0;
2264 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2265 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2266 }
2267 hlen += th->th_off << 2;
2268 }
2269
2270 if (v4) {
2271 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2272 cmdlen |= WTX_TCPIP_CMD_IP;
2273 } else {
2274 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2275 ipcse = 0;
2276 }
2277 cmd |= WTX_TCPIP_CMD_TSE;
2278 cmdlen |= WTX_TCPIP_CMD_TSE |
2279 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2280 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2281 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2282 }
2283
2284 /*
2285 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2286 * offload feature, if we load the context descriptor, we
2287 * MUST provide valid values for IPCSS and TUCSS fields.
2288 */
2289
2290 ipcs = WTX_TCPIP_IPCSS(offset) |
2291 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2292 WTX_TCPIP_IPCSE(ipcse);
2293 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2294 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2295 fields |= WTX_IXSM;
2296 }
2297
2298 offset += iphl;
2299
2300 if (m0->m_pkthdr.csum_flags &
2301 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2302 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2303 fields |= WTX_TXSM;
2304 tucs = WTX_TCPIP_TUCSS(offset) |
2305 WTX_TCPIP_TUCSO(offset +
2306 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2307 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2308 } else if ((m0->m_pkthdr.csum_flags &
2309 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2310 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2311 fields |= WTX_TXSM;
2312 tucs = WTX_TCPIP_TUCSS(offset) |
2313 WTX_TCPIP_TUCSO(offset +
2314 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2315 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2316 } else {
2317 /* Just initialize it to a valid TCP context. */
2318 tucs = WTX_TCPIP_TUCSS(offset) |
2319 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2320 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2321 }
2322
2323 /* Fill in the context descriptor. */
2324 t = (struct livengood_tcpip_ctxdesc *)
2325 &sc->sc_txdescs[sc->sc_txnext];
2326 t->tcpip_ipcs = htole32(ipcs);
2327 t->tcpip_tucs = htole32(tucs);
2328 t->tcpip_cmdlen = htole32(cmdlen);
2329 t->tcpip_seg = htole32(seg);
2330 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2331
2332 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2333 txs->txs_ndesc++;
2334
2335 *cmdp = cmd;
2336 *fieldsp = fields;
2337
2338 return 0;
2339 }
2340
2341 static void
2342 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2343 {
2344 struct mbuf *m;
2345 int i;
2346
2347 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2348 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2349 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2350 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2351 m->m_data, m->m_len, m->m_flags);
2352 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2353 i, i == 1 ? "" : "s");
2354 }
2355
2356 /*
2357 * wm_82547_txfifo_stall:
2358 *
2359 * Callout used to wait for the 82547 Tx FIFO to drain,
2360 * reset the FIFO pointers, and restart packet transmission.
2361 */
2362 static void
2363 wm_82547_txfifo_stall(void *arg)
2364 {
2365 struct wm_softc *sc = arg;
2366 int s;
2367
2368 s = splnet();
2369
2370 if (sc->sc_txfifo_stall) {
2371 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2372 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2373 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2374 /*
2375 * Packets have drained. Stop transmitter, reset
2376 * FIFO pointers, restart transmitter, and kick
2377 * the packet queue.
2378 */
2379 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2380 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2381 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2382 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2383 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2384 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2385 CSR_WRITE(sc, WMREG_TCTL, tctl);
2386 CSR_WRITE_FLUSH(sc);
2387
2388 sc->sc_txfifo_head = 0;
2389 sc->sc_txfifo_stall = 0;
2390 wm_start(&sc->sc_ethercom.ec_if);
2391 } else {
2392 /*
2393 * Still waiting for packets to drain; try again in
2394 * another tick.
2395 */
2396 callout_schedule(&sc->sc_txfifo_ch, 1);
2397 }
2398 }
2399
2400 splx(s);
2401 }
2402
2403 static void
2404 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2405 {
2406 uint32_t reg;
2407
2408 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2409
2410 if (on != 0)
2411 reg |= EXTCNFCTR_GATE_PHY_CFG;
2412 else
2413 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2414
2415 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2416 }
2417
2418 /*
2419 * wm_82547_txfifo_bugchk:
2420 *
2421 * Check for bug condition in the 82547 Tx FIFO. We need to
2422 * prevent enqueueing a packet that would wrap around the end
2423 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2424 *
2425 * We do this by checking the amount of space before the end
2426 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2427 * the Tx FIFO, wait for all remaining packets to drain, reset
2428 * the internal FIFO pointers to the beginning, and restart
2429 * transmission on the interface.
2430 */
2431 #define WM_FIFO_HDR 0x10
2432 #define WM_82547_PAD_LEN 0x3e0
2433 static int
2434 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2435 {
2436 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2437 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2438
2439 /* Just return if already stalled. */
2440 if (sc->sc_txfifo_stall)
2441 return 1;
2442
2443 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2444 /* Stall only occurs in half-duplex mode. */
2445 goto send_packet;
2446 }
2447
2448 if (len >= WM_82547_PAD_LEN + space) {
2449 sc->sc_txfifo_stall = 1;
2450 callout_schedule(&sc->sc_txfifo_ch, 1);
2451 return 1;
2452 }
2453
2454 send_packet:
2455 sc->sc_txfifo_head += len;
2456 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2457 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2458
2459 return 0;
2460 }
2461
2462 /*
2463 * wm_start: [ifnet interface function]
2464 *
2465 * Start packet transmission on the interface.
2466 */
2467 static void
2468 wm_start(struct ifnet *ifp)
2469 {
2470 struct wm_softc *sc = ifp->if_softc;
2471 struct mbuf *m0;
2472 struct m_tag *mtag;
2473 struct wm_txsoft *txs;
2474 bus_dmamap_t dmamap;
2475 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2476 bus_addr_t curaddr;
2477 bus_size_t seglen, curlen;
2478 uint32_t cksumcmd;
2479 uint8_t cksumfields;
2480
2481 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2482 return;
2483
2484 /*
2485 * Remember the previous number of free descriptors.
2486 */
2487 ofree = sc->sc_txfree;
2488
2489 /*
2490 * Loop through the send queue, setting up transmit descriptors
2491 * until we drain the queue, or use up all available transmit
2492 * descriptors.
2493 */
2494 for (;;) {
2495 /* Grab a packet off the queue. */
2496 IFQ_POLL(&ifp->if_snd, m0);
2497 if (m0 == NULL)
2498 break;
2499
2500 DPRINTF(WM_DEBUG_TX,
2501 ("%s: TX: have packet to transmit: %p\n",
2502 device_xname(sc->sc_dev), m0));
2503
2504 /* Get a work queue entry. */
2505 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2506 wm_txintr(sc);
2507 if (sc->sc_txsfree == 0) {
2508 DPRINTF(WM_DEBUG_TX,
2509 ("%s: TX: no free job descriptors\n",
2510 device_xname(sc->sc_dev)));
2511 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2512 break;
2513 }
2514 }
2515
2516 txs = &sc->sc_txsoft[sc->sc_txsnext];
2517 dmamap = txs->txs_dmamap;
2518
2519 use_tso = (m0->m_pkthdr.csum_flags &
2520 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2521
2522 /*
2523 * So says the Linux driver:
2524 * The controller does a simple calculation to make sure
2525 * there is enough room in the FIFO before initiating the
2526 * DMA for each buffer. The calc is:
2527 * 4 = ceil(buffer len / MSS)
2528 * To make sure we don't overrun the FIFO, adjust the max
2529 * buffer len if the MSS drops.
2530 */
2531 dmamap->dm_maxsegsz =
2532 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2533 ? m0->m_pkthdr.segsz << 2
2534 : WTX_MAX_LEN;
2535
2536 /*
2537 * Load the DMA map. If this fails, the packet either
2538 * didn't fit in the allotted number of segments, or we
2539 * were short on resources. For the too-many-segments
2540 * case, we simply report an error and drop the packet,
2541 * since we can't sanely copy a jumbo packet to a single
2542 * buffer.
2543 */
2544 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2545 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2546 if (error) {
2547 if (error == EFBIG) {
2548 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2549 log(LOG_ERR, "%s: Tx packet consumes too many "
2550 "DMA segments, dropping...\n",
2551 device_xname(sc->sc_dev));
2552 IFQ_DEQUEUE(&ifp->if_snd, m0);
2553 wm_dump_mbuf_chain(sc, m0);
2554 m_freem(m0);
2555 continue;
2556 }
2557 /*
2558 * Short on resources, just stop for now.
2559 */
2560 DPRINTF(WM_DEBUG_TX,
2561 ("%s: TX: dmamap load failed: %d\n",
2562 device_xname(sc->sc_dev), error));
2563 break;
2564 }
2565
2566 segs_needed = dmamap->dm_nsegs;
2567 if (use_tso) {
2568 /* For sentinel descriptor; see below. */
2569 segs_needed++;
2570 }
2571
2572 /*
2573 * Ensure we have enough descriptors free to describe
2574 * the packet. Note, we always reserve one descriptor
2575 * at the end of the ring due to the semantics of the
2576 * TDT register, plus one more in the event we need
2577 * to load offload context.
2578 */
2579 if (segs_needed > sc->sc_txfree - 2) {
2580 /*
2581 * Not enough free descriptors to transmit this
2582 * packet. We haven't committed anything yet,
2583 * so just unload the DMA map, put the packet
2584 * pack on the queue, and punt. Notify the upper
2585 * layer that there are no more slots left.
2586 */
2587 DPRINTF(WM_DEBUG_TX,
2588 ("%s: TX: need %d (%d) descriptors, have %d\n",
2589 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2590 segs_needed, sc->sc_txfree - 1));
2591 ifp->if_flags |= IFF_OACTIVE;
2592 bus_dmamap_unload(sc->sc_dmat, dmamap);
2593 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2594 break;
2595 }
2596
2597 /*
2598 * Check for 82547 Tx FIFO bug. We need to do this
2599 * once we know we can transmit the packet, since we
2600 * do some internal FIFO space accounting here.
2601 */
2602 if (sc->sc_type == WM_T_82547 &&
2603 wm_82547_txfifo_bugchk(sc, m0)) {
2604 DPRINTF(WM_DEBUG_TX,
2605 ("%s: TX: 82547 Tx FIFO bug detected\n",
2606 device_xname(sc->sc_dev)));
2607 ifp->if_flags |= IFF_OACTIVE;
2608 bus_dmamap_unload(sc->sc_dmat, dmamap);
2609 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2610 break;
2611 }
2612
2613 IFQ_DEQUEUE(&ifp->if_snd, m0);
2614
2615 /*
2616 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2617 */
2618
2619 DPRINTF(WM_DEBUG_TX,
2620 ("%s: TX: packet has %d (%d) DMA segments\n",
2621 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2622
2623 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2624
2625 /*
2626 * Store a pointer to the packet so that we can free it
2627 * later.
2628 *
2629 * Initially, we consider the number of descriptors the
2630 * packet uses the number of DMA segments. This may be
2631 * incremented by 1 if we do checksum offload (a descriptor
2632 * is used to set the checksum context).
2633 */
2634 txs->txs_mbuf = m0;
2635 txs->txs_firstdesc = sc->sc_txnext;
2636 txs->txs_ndesc = segs_needed;
2637
2638 /* Set up offload parameters for this packet. */
2639 if (m0->m_pkthdr.csum_flags &
2640 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2641 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2642 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2643 if (wm_tx_offload(sc, txs, &cksumcmd,
2644 &cksumfields) != 0) {
2645 /* Error message already displayed. */
2646 bus_dmamap_unload(sc->sc_dmat, dmamap);
2647 continue;
2648 }
2649 } else {
2650 cksumcmd = 0;
2651 cksumfields = 0;
2652 }
2653
2654 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2655
2656 /* Sync the DMA map. */
2657 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2658 BUS_DMASYNC_PREWRITE);
2659
2660 /*
2661 * Initialize the transmit descriptor.
2662 */
2663 for (nexttx = sc->sc_txnext, seg = 0;
2664 seg < dmamap->dm_nsegs; seg++) {
2665 for (seglen = dmamap->dm_segs[seg].ds_len,
2666 curaddr = dmamap->dm_segs[seg].ds_addr;
2667 seglen != 0;
2668 curaddr += curlen, seglen -= curlen,
2669 nexttx = WM_NEXTTX(sc, nexttx)) {
2670 curlen = seglen;
2671
2672 /*
2673 * So says the Linux driver:
2674 * Work around for premature descriptor
2675 * write-backs in TSO mode. Append a
2676 * 4-byte sentinel descriptor.
2677 */
2678 if (use_tso &&
2679 seg == dmamap->dm_nsegs - 1 &&
2680 curlen > 8)
2681 curlen -= 4;
2682
2683 wm_set_dma_addr(
2684 &sc->sc_txdescs[nexttx].wtx_addr,
2685 curaddr);
2686 sc->sc_txdescs[nexttx].wtx_cmdlen =
2687 htole32(cksumcmd | curlen);
2688 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2689 0;
2690 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2691 cksumfields;
2692 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2693 lasttx = nexttx;
2694
2695 DPRINTF(WM_DEBUG_TX,
2696 ("%s: TX: desc %d: low %#" PRIx64 ", "
2697 "len %#04zx\n",
2698 device_xname(sc->sc_dev), nexttx,
2699 (uint64_t)curaddr, curlen));
2700 }
2701 }
2702
2703 KASSERT(lasttx != -1);
2704
2705 /*
2706 * Set up the command byte on the last descriptor of
2707 * the packet. If we're in the interrupt delay window,
2708 * delay the interrupt.
2709 */
2710 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2711 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2712
2713 /*
2714 * If VLANs are enabled and the packet has a VLAN tag, set
2715 * up the descriptor to encapsulate the packet for us.
2716 *
2717 * This is only valid on the last descriptor of the packet.
2718 */
2719 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2720 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2721 htole32(WTX_CMD_VLE);
2722 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2723 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2724 }
2725
2726 txs->txs_lastdesc = lasttx;
2727
2728 DPRINTF(WM_DEBUG_TX,
2729 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2730 device_xname(sc->sc_dev),
2731 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2732
2733 /* Sync the descriptors we're using. */
2734 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2735 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2736
2737 /* Give the packet to the chip. */
2738 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2739
2740 DPRINTF(WM_DEBUG_TX,
2741 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2742
2743 DPRINTF(WM_DEBUG_TX,
2744 ("%s: TX: finished transmitting packet, job %d\n",
2745 device_xname(sc->sc_dev), sc->sc_txsnext));
2746
2747 /* Advance the tx pointer. */
2748 sc->sc_txfree -= txs->txs_ndesc;
2749 sc->sc_txnext = nexttx;
2750
2751 sc->sc_txsfree--;
2752 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2753
2754 /* Pass the packet to any BPF listeners. */
2755 bpf_mtap(ifp, m0);
2756 }
2757
2758 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2759 /* No more slots; notify upper layer. */
2760 ifp->if_flags |= IFF_OACTIVE;
2761 }
2762
2763 if (sc->sc_txfree != ofree) {
2764 /* Set a watchdog timer in case the chip flakes out. */
2765 ifp->if_timer = 5;
2766 }
2767 }
2768
2769 /*
2770 * wm_nq_tx_offload:
2771 *
2772 * Set up TCP/IP checksumming parameters for the
2773 * specified packet, for NEWQUEUE devices
2774 */
2775 static int
2776 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2777 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2778 {
2779 struct mbuf *m0 = txs->txs_mbuf;
2780 struct m_tag *mtag;
2781 uint32_t vl_len, mssidx, cmdc;
2782 struct ether_header *eh;
2783 int offset, iphl;
2784
2785 /*
2786 * XXX It would be nice if the mbuf pkthdr had offset
2787 * fields for the protocol headers.
2788 */
2789 *cmdlenp = 0;
2790 *fieldsp = 0;
2791
2792 eh = mtod(m0, struct ether_header *);
2793 switch (htons(eh->ether_type)) {
2794 case ETHERTYPE_IP:
2795 case ETHERTYPE_IPV6:
2796 offset = ETHER_HDR_LEN;
2797 break;
2798
2799 case ETHERTYPE_VLAN:
2800 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2801 break;
2802
2803 default:
2804 /*
2805 * Don't support this protocol or encapsulation.
2806 */
2807 *do_csum = false;
2808 return 0;
2809 }
2810 *do_csum = true;
2811 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2812 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2813
2814 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2815 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2816
2817 if ((m0->m_pkthdr.csum_flags &
2818 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2819 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2820 } else {
2821 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2822 }
2823 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2824 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2825
2826 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2827 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2828 << NQTXC_VLLEN_VLAN_SHIFT);
2829 *cmdlenp |= NQTX_CMD_VLE;
2830 }
2831
2832 mssidx = 0;
2833
2834 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2835 int hlen = offset + iphl;
2836 int tcp_hlen;
2837 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2838
2839 if (__predict_false(m0->m_len <
2840 (hlen + sizeof(struct tcphdr)))) {
2841 /*
2842 * TCP/IP headers are not in the first mbuf; we need
2843 * to do this the slow and painful way. Let's just
2844 * hope this doesn't happen very often.
2845 */
2846 struct tcphdr th;
2847
2848 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2849
2850 m_copydata(m0, hlen, sizeof(th), &th);
2851 if (v4) {
2852 struct ip ip;
2853
2854 m_copydata(m0, offset, sizeof(ip), &ip);
2855 ip.ip_len = 0;
2856 m_copyback(m0,
2857 offset + offsetof(struct ip, ip_len),
2858 sizeof(ip.ip_len), &ip.ip_len);
2859 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2860 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2861 } else {
2862 struct ip6_hdr ip6;
2863
2864 m_copydata(m0, offset, sizeof(ip6), &ip6);
2865 ip6.ip6_plen = 0;
2866 m_copyback(m0,
2867 offset + offsetof(struct ip6_hdr, ip6_plen),
2868 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2869 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2870 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2871 }
2872 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2873 sizeof(th.th_sum), &th.th_sum);
2874
2875 tcp_hlen = th.th_off << 2;
2876 } else {
2877 /*
2878 * TCP/IP headers are in the first mbuf; we can do
2879 * this the easy way.
2880 */
2881 struct tcphdr *th;
2882
2883 if (v4) {
2884 struct ip *ip =
2885 (void *)(mtod(m0, char *) + offset);
2886 th = (void *)(mtod(m0, char *) + hlen);
2887
2888 ip->ip_len = 0;
2889 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2890 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2891 } else {
2892 struct ip6_hdr *ip6 =
2893 (void *)(mtod(m0, char *) + offset);
2894 th = (void *)(mtod(m0, char *) + hlen);
2895
2896 ip6->ip6_plen = 0;
2897 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2898 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2899 }
2900 tcp_hlen = th->th_off << 2;
2901 }
2902 hlen += tcp_hlen;
2903 *cmdlenp |= NQTX_CMD_TSE;
2904
2905 if (v4) {
2906 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2907 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2908 } else {
2909 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2910 *fieldsp |= NQTXD_FIELDS_TUXSM;
2911 }
2912 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2913 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2914 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2915 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2916 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2917 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2918 } else {
2919 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2920 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2921 }
2922
2923 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2924 *fieldsp |= NQTXD_FIELDS_IXSM;
2925 cmdc |= NQTXC_CMD_IP4;
2926 }
2927
2928 if (m0->m_pkthdr.csum_flags &
2929 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2930 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2931 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2932 cmdc |= NQTXC_CMD_TCP;
2933 } else {
2934 cmdc |= NQTXC_CMD_UDP;
2935 }
2936 cmdc |= NQTXC_CMD_IP4;
2937 *fieldsp |= NQTXD_FIELDS_TUXSM;
2938 }
2939 if (m0->m_pkthdr.csum_flags &
2940 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2941 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2942 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2943 cmdc |= NQTXC_CMD_TCP;
2944 } else {
2945 cmdc |= NQTXC_CMD_UDP;
2946 }
2947 cmdc |= NQTXC_CMD_IP6;
2948 *fieldsp |= NQTXD_FIELDS_TUXSM;
2949 }
2950
2951 /* Fill in the context descriptor. */
2952 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
2953 htole32(vl_len);
2954 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
2955 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
2956 htole32(cmdc);
2957 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
2958 htole32(mssidx);
2959 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2960 DPRINTF(WM_DEBUG_TX,
2961 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
2962 sc->sc_txnext, 0, vl_len));
2963 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
2964 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2965 txs->txs_ndesc++;
2966 return 0;
2967 }
2968
2969 /*
2970 * wm_nq_start: [ifnet interface function]
2971 *
2972 * Start packet transmission on the interface for NEWQUEUE devices
2973 */
2974 static void
2975 wm_nq_start(struct ifnet *ifp)
2976 {
2977 struct wm_softc *sc = ifp->if_softc;
2978 struct mbuf *m0;
2979 struct m_tag *mtag;
2980 struct wm_txsoft *txs;
2981 bus_dmamap_t dmamap;
2982 int error, nexttx, lasttx = -1, seg, segs_needed;
2983 bool do_csum, sent;
2984
2985 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2986 return;
2987
2988 sent = false;
2989
2990 /*
2991 * Loop through the send queue, setting up transmit descriptors
2992 * until we drain the queue, or use up all available transmit
2993 * descriptors.
2994 */
2995 for (;;) {
2996 /* Grab a packet off the queue. */
2997 IFQ_POLL(&ifp->if_snd, m0);
2998 if (m0 == NULL)
2999 break;
3000
3001 DPRINTF(WM_DEBUG_TX,
3002 ("%s: TX: have packet to transmit: %p\n",
3003 device_xname(sc->sc_dev), m0));
3004
3005 /* Get a work queue entry. */
3006 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3007 wm_txintr(sc);
3008 if (sc->sc_txsfree == 0) {
3009 DPRINTF(WM_DEBUG_TX,
3010 ("%s: TX: no free job descriptors\n",
3011 device_xname(sc->sc_dev)));
3012 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3013 break;
3014 }
3015 }
3016
3017 txs = &sc->sc_txsoft[sc->sc_txsnext];
3018 dmamap = txs->txs_dmamap;
3019
3020 /*
3021 * Load the DMA map. If this fails, the packet either
3022 * didn't fit in the allotted number of segments, or we
3023 * were short on resources. For the too-many-segments
3024 * case, we simply report an error and drop the packet,
3025 * since we can't sanely copy a jumbo packet to a single
3026 * buffer.
3027 */
3028 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3029 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3030 if (error) {
3031 if (error == EFBIG) {
3032 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3033 log(LOG_ERR, "%s: Tx packet consumes too many "
3034 "DMA segments, dropping...\n",
3035 device_xname(sc->sc_dev));
3036 IFQ_DEQUEUE(&ifp->if_snd, m0);
3037 wm_dump_mbuf_chain(sc, m0);
3038 m_freem(m0);
3039 continue;
3040 }
3041 /*
3042 * Short on resources, just stop for now.
3043 */
3044 DPRINTF(WM_DEBUG_TX,
3045 ("%s: TX: dmamap load failed: %d\n",
3046 device_xname(sc->sc_dev), error));
3047 break;
3048 }
3049
3050 segs_needed = dmamap->dm_nsegs;
3051
3052 /*
3053 * Ensure we have enough descriptors free to describe
3054 * the packet. Note, we always reserve one descriptor
3055 * at the end of the ring due to the semantics of the
3056 * TDT register, plus one more in the event we need
3057 * to load offload context.
3058 */
3059 if (segs_needed > sc->sc_txfree - 2) {
3060 /*
3061 * Not enough free descriptors to transmit this
3062 * packet. We haven't committed anything yet,
3063 * so just unload the DMA map, put the packet
3064 * pack on the queue, and punt. Notify the upper
3065 * layer that there are no more slots left.
3066 */
3067 DPRINTF(WM_DEBUG_TX,
3068 ("%s: TX: need %d (%d) descriptors, have %d\n",
3069 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3070 segs_needed, sc->sc_txfree - 1));
3071 ifp->if_flags |= IFF_OACTIVE;
3072 bus_dmamap_unload(sc->sc_dmat, dmamap);
3073 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3074 break;
3075 }
3076
3077 IFQ_DEQUEUE(&ifp->if_snd, m0);
3078
3079 /*
3080 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3081 */
3082
3083 DPRINTF(WM_DEBUG_TX,
3084 ("%s: TX: packet has %d (%d) DMA segments\n",
3085 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3086
3087 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3088
3089 /*
3090 * Store a pointer to the packet so that we can free it
3091 * later.
3092 *
3093 * Initially, we consider the number of descriptors the
3094 * packet uses the number of DMA segments. This may be
3095 * incremented by 1 if we do checksum offload (a descriptor
3096 * is used to set the checksum context).
3097 */
3098 txs->txs_mbuf = m0;
3099 txs->txs_firstdesc = sc->sc_txnext;
3100 txs->txs_ndesc = segs_needed;
3101
3102 /* Set up offload parameters for this packet. */
3103 uint32_t cmdlen, fields, dcmdlen;
3104 if (m0->m_pkthdr.csum_flags &
3105 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3106 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3107 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3108 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3109 &do_csum) != 0) {
3110 /* Error message already displayed. */
3111 bus_dmamap_unload(sc->sc_dmat, dmamap);
3112 continue;
3113 }
3114 } else {
3115 do_csum = false;
3116 cmdlen = 0;
3117 fields = 0;
3118 }
3119
3120 /* Sync the DMA map. */
3121 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3122 BUS_DMASYNC_PREWRITE);
3123
3124 /*
3125 * Initialize the first transmit descriptor.
3126 */
3127 nexttx = sc->sc_txnext;
3128 if (!do_csum) {
3129 /* setup a legacy descriptor */
3130 wm_set_dma_addr(
3131 &sc->sc_txdescs[nexttx].wtx_addr,
3132 dmamap->dm_segs[0].ds_addr);
3133 sc->sc_txdescs[nexttx].wtx_cmdlen =
3134 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3135 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3136 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3137 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3138 NULL) {
3139 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3140 htole32(WTX_CMD_VLE);
3141 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3142 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3143 } else {
3144 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3145 }
3146 dcmdlen = 0;
3147 } else {
3148 /* setup an advanced data descriptor */
3149 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3150 htole64(dmamap->dm_segs[0].ds_addr);
3151 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3152 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3153 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3154 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3155 htole32(fields);
3156 DPRINTF(WM_DEBUG_TX,
3157 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3158 device_xname(sc->sc_dev), nexttx,
3159 (uint64_t)dmamap->dm_segs[0].ds_addr));
3160 DPRINTF(WM_DEBUG_TX,
3161 ("\t 0x%08x%08x\n", fields,
3162 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3163 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3164 }
3165
3166 lasttx = nexttx;
3167 nexttx = WM_NEXTTX(sc, nexttx);
3168 /*
3169 * fill in the next descriptors. legacy or adcanced format
3170 * is the same here
3171 */
3172 for (seg = 1; seg < dmamap->dm_nsegs;
3173 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3174 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3175 htole64(dmamap->dm_segs[seg].ds_addr);
3176 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3177 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3178 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3179 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3180 lasttx = nexttx;
3181
3182 DPRINTF(WM_DEBUG_TX,
3183 ("%s: TX: desc %d: %#" PRIx64 ", "
3184 "len %#04zx\n",
3185 device_xname(sc->sc_dev), nexttx,
3186 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3187 dmamap->dm_segs[seg].ds_len));
3188 }
3189
3190 KASSERT(lasttx != -1);
3191
3192 /*
3193 * Set up the command byte on the last descriptor of
3194 * the packet. If we're in the interrupt delay window,
3195 * delay the interrupt.
3196 */
3197 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3198 (NQTX_CMD_EOP | NQTX_CMD_RS));
3199 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3200 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3201
3202 txs->txs_lastdesc = lasttx;
3203
3204 DPRINTF(WM_DEBUG_TX,
3205 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3206 device_xname(sc->sc_dev),
3207 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3208
3209 /* Sync the descriptors we're using. */
3210 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3211 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3212
3213 /* Give the packet to the chip. */
3214 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3215 sent = true;
3216
3217 DPRINTF(WM_DEBUG_TX,
3218 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3219
3220 DPRINTF(WM_DEBUG_TX,
3221 ("%s: TX: finished transmitting packet, job %d\n",
3222 device_xname(sc->sc_dev), sc->sc_txsnext));
3223
3224 /* Advance the tx pointer. */
3225 sc->sc_txfree -= txs->txs_ndesc;
3226 sc->sc_txnext = nexttx;
3227
3228 sc->sc_txsfree--;
3229 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3230
3231 /* Pass the packet to any BPF listeners. */
3232 bpf_mtap(ifp, m0);
3233 }
3234
3235 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3236 /* No more slots; notify upper layer. */
3237 ifp->if_flags |= IFF_OACTIVE;
3238 }
3239
3240 if (sent) {
3241 /* Set a watchdog timer in case the chip flakes out. */
3242 ifp->if_timer = 5;
3243 }
3244 }
3245
3246 /*
3247 * wm_watchdog: [ifnet interface function]
3248 *
3249 * Watchdog timer handler.
3250 */
3251 static void
3252 wm_watchdog(struct ifnet *ifp)
3253 {
3254 struct wm_softc *sc = ifp->if_softc;
3255
3256 /*
3257 * Since we're using delayed interrupts, sweep up
3258 * before we report an error.
3259 */
3260 wm_txintr(sc);
3261
3262 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3263 #ifdef WM_DEBUG
3264 int i, j;
3265 struct wm_txsoft *txs;
3266 #endif
3267 log(LOG_ERR,
3268 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3269 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3270 sc->sc_txnext);
3271 ifp->if_oerrors++;
3272 #ifdef WM_DEBUG
3273 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3274 i = WM_NEXTTXS(sc, i)) {
3275 txs = &sc->sc_txsoft[i];
3276 printf("txs %d tx %d -> %d\n",
3277 i, txs->txs_firstdesc, txs->txs_lastdesc);
3278 for (j = txs->txs_firstdesc; ;
3279 j = WM_NEXTTX(sc, j)) {
3280 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3281 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3282 printf("\t %#08x%08x\n",
3283 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3284 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3285 if (j == txs->txs_lastdesc)
3286 break;
3287 }
3288 }
3289 #endif
3290 /* Reset the interface. */
3291 (void) wm_init(ifp);
3292 }
3293
3294 /* Try to get more packets going. */
3295 ifp->if_start(ifp);
3296 }
3297
3298 static int
3299 wm_ifflags_cb(struct ethercom *ec)
3300 {
3301 struct ifnet *ifp = &ec->ec_if;
3302 struct wm_softc *sc = ifp->if_softc;
3303 int change = ifp->if_flags ^ sc->sc_if_flags;
3304
3305 if (change != 0)
3306 sc->sc_if_flags = ifp->if_flags;
3307
3308 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3309 return ENETRESET;
3310
3311 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3312 wm_set_filter(sc);
3313
3314 wm_set_vlan(sc);
3315
3316 return 0;
3317 }
3318
3319 /*
3320 * wm_ioctl: [ifnet interface function]
3321 *
3322 * Handle control requests from the operator.
3323 */
3324 static int
3325 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3326 {
3327 struct wm_softc *sc = ifp->if_softc;
3328 struct ifreq *ifr = (struct ifreq *) data;
3329 struct ifaddr *ifa = (struct ifaddr *)data;
3330 struct sockaddr_dl *sdl;
3331 int s, error;
3332
3333 s = splnet();
3334
3335 switch (cmd) {
3336 case SIOCSIFMEDIA:
3337 case SIOCGIFMEDIA:
3338 /* Flow control requires full-duplex mode. */
3339 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3340 (ifr->ifr_media & IFM_FDX) == 0)
3341 ifr->ifr_media &= ~IFM_ETH_FMASK;
3342 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3343 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3344 /* We can do both TXPAUSE and RXPAUSE. */
3345 ifr->ifr_media |=
3346 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3347 }
3348 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3349 }
3350 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3351 break;
3352 case SIOCINITIFADDR:
3353 if (ifa->ifa_addr->sa_family == AF_LINK) {
3354 sdl = satosdl(ifp->if_dl->ifa_addr);
3355 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3356 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3357 /* unicast address is first multicast entry */
3358 wm_set_filter(sc);
3359 error = 0;
3360 break;
3361 }
3362 /*FALLTHROUGH*/
3363 default:
3364 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3365 break;
3366
3367 error = 0;
3368
3369 if (cmd == SIOCSIFCAP)
3370 error = (*ifp->if_init)(ifp);
3371 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3372 ;
3373 else if (ifp->if_flags & IFF_RUNNING) {
3374 /*
3375 * Multicast list has changed; set the hardware filter
3376 * accordingly.
3377 */
3378 wm_set_filter(sc);
3379 }
3380 break;
3381 }
3382
3383 /* Try to get more packets going. */
3384 ifp->if_start(ifp);
3385
3386 splx(s);
3387 return error;
3388 }
3389
3390 /*
3391 * wm_intr:
3392 *
3393 * Interrupt service routine.
3394 */
3395 static int
3396 wm_intr(void *arg)
3397 {
3398 struct wm_softc *sc = arg;
3399 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3400 uint32_t icr;
3401 int handled = 0;
3402
3403 while (1 /* CONSTCOND */) {
3404 icr = CSR_READ(sc, WMREG_ICR);
3405 if ((icr & sc->sc_icr) == 0)
3406 break;
3407 rnd_add_uint32(&sc->rnd_source, icr);
3408
3409 handled = 1;
3410
3411 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3412 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3413 DPRINTF(WM_DEBUG_RX,
3414 ("%s: RX: got Rx intr 0x%08x\n",
3415 device_xname(sc->sc_dev),
3416 icr & (ICR_RXDMT0|ICR_RXT0)));
3417 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3418 }
3419 #endif
3420 wm_rxintr(sc);
3421
3422 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3423 if (icr & ICR_TXDW) {
3424 DPRINTF(WM_DEBUG_TX,
3425 ("%s: TX: got TXDW interrupt\n",
3426 device_xname(sc->sc_dev)));
3427 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3428 }
3429 #endif
3430 wm_txintr(sc);
3431
3432 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3433 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3434 wm_linkintr(sc, icr);
3435 }
3436
3437 if (icr & ICR_RXO) {
3438 #if defined(WM_DEBUG)
3439 log(LOG_WARNING, "%s: Receive overrun\n",
3440 device_xname(sc->sc_dev));
3441 #endif /* defined(WM_DEBUG) */
3442 }
3443 }
3444
3445 if (handled) {
3446 /* Try to get more packets going. */
3447 ifp->if_start(ifp);
3448 }
3449
3450 return handled;
3451 }
3452
3453 /*
3454 * wm_txintr:
3455 *
3456 * Helper; handle transmit interrupts.
3457 */
3458 static void
3459 wm_txintr(struct wm_softc *sc)
3460 {
3461 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3462 struct wm_txsoft *txs;
3463 uint8_t status;
3464 int i;
3465
3466 ifp->if_flags &= ~IFF_OACTIVE;
3467
3468 /*
3469 * Go through the Tx list and free mbufs for those
3470 * frames which have been transmitted.
3471 */
3472 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3473 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3474 txs = &sc->sc_txsoft[i];
3475
3476 DPRINTF(WM_DEBUG_TX,
3477 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3478
3479 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3480 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3481
3482 status =
3483 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3484 if ((status & WTX_ST_DD) == 0) {
3485 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3486 BUS_DMASYNC_PREREAD);
3487 break;
3488 }
3489
3490 DPRINTF(WM_DEBUG_TX,
3491 ("%s: TX: job %d done: descs %d..%d\n",
3492 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3493 txs->txs_lastdesc));
3494
3495 /*
3496 * XXX We should probably be using the statistics
3497 * XXX registers, but I don't know if they exist
3498 * XXX on chips before the i82544.
3499 */
3500
3501 #ifdef WM_EVENT_COUNTERS
3502 if (status & WTX_ST_TU)
3503 WM_EVCNT_INCR(&sc->sc_ev_tu);
3504 #endif /* WM_EVENT_COUNTERS */
3505
3506 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3507 ifp->if_oerrors++;
3508 if (status & WTX_ST_LC)
3509 log(LOG_WARNING, "%s: late collision\n",
3510 device_xname(sc->sc_dev));
3511 else if (status & WTX_ST_EC) {
3512 ifp->if_collisions += 16;
3513 log(LOG_WARNING, "%s: excessive collisions\n",
3514 device_xname(sc->sc_dev));
3515 }
3516 } else
3517 ifp->if_opackets++;
3518
3519 sc->sc_txfree += txs->txs_ndesc;
3520 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3521 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3522 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3523 m_freem(txs->txs_mbuf);
3524 txs->txs_mbuf = NULL;
3525 }
3526
3527 /* Update the dirty transmit buffer pointer. */
3528 sc->sc_txsdirty = i;
3529 DPRINTF(WM_DEBUG_TX,
3530 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3531
3532 /*
3533 * If there are no more pending transmissions, cancel the watchdog
3534 * timer.
3535 */
3536 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3537 ifp->if_timer = 0;
3538 }
3539
3540 /*
3541 * wm_rxintr:
3542 *
3543 * Helper; handle receive interrupts.
3544 */
3545 static void
3546 wm_rxintr(struct wm_softc *sc)
3547 {
3548 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3549 struct wm_rxsoft *rxs;
3550 struct mbuf *m;
3551 int i, len;
3552 uint8_t status, errors;
3553 uint16_t vlantag;
3554
3555 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3556 rxs = &sc->sc_rxsoft[i];
3557
3558 DPRINTF(WM_DEBUG_RX,
3559 ("%s: RX: checking descriptor %d\n",
3560 device_xname(sc->sc_dev), i));
3561
3562 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3563
3564 status = sc->sc_rxdescs[i].wrx_status;
3565 errors = sc->sc_rxdescs[i].wrx_errors;
3566 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3567 vlantag = sc->sc_rxdescs[i].wrx_special;
3568
3569 if ((status & WRX_ST_DD) == 0) {
3570 /*
3571 * We have processed all of the receive descriptors.
3572 */
3573 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3574 break;
3575 }
3576
3577 if (__predict_false(sc->sc_rxdiscard)) {
3578 DPRINTF(WM_DEBUG_RX,
3579 ("%s: RX: discarding contents of descriptor %d\n",
3580 device_xname(sc->sc_dev), i));
3581 WM_INIT_RXDESC(sc, i);
3582 if (status & WRX_ST_EOP) {
3583 /* Reset our state. */
3584 DPRINTF(WM_DEBUG_RX,
3585 ("%s: RX: resetting rxdiscard -> 0\n",
3586 device_xname(sc->sc_dev)));
3587 sc->sc_rxdiscard = 0;
3588 }
3589 continue;
3590 }
3591
3592 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3593 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3594
3595 m = rxs->rxs_mbuf;
3596
3597 /*
3598 * Add a new receive buffer to the ring, unless of
3599 * course the length is zero. Treat the latter as a
3600 * failed mapping.
3601 */
3602 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3603 /*
3604 * Failed, throw away what we've done so
3605 * far, and discard the rest of the packet.
3606 */
3607 ifp->if_ierrors++;
3608 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3609 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3610 WM_INIT_RXDESC(sc, i);
3611 if ((status & WRX_ST_EOP) == 0)
3612 sc->sc_rxdiscard = 1;
3613 if (sc->sc_rxhead != NULL)
3614 m_freem(sc->sc_rxhead);
3615 WM_RXCHAIN_RESET(sc);
3616 DPRINTF(WM_DEBUG_RX,
3617 ("%s: RX: Rx buffer allocation failed, "
3618 "dropping packet%s\n", device_xname(sc->sc_dev),
3619 sc->sc_rxdiscard ? " (discard)" : ""));
3620 continue;
3621 }
3622
3623 m->m_len = len;
3624 sc->sc_rxlen += len;
3625 DPRINTF(WM_DEBUG_RX,
3626 ("%s: RX: buffer at %p len %d\n",
3627 device_xname(sc->sc_dev), m->m_data, len));
3628
3629 /*
3630 * If this is not the end of the packet, keep
3631 * looking.
3632 */
3633 if ((status & WRX_ST_EOP) == 0) {
3634 WM_RXCHAIN_LINK(sc, m);
3635 DPRINTF(WM_DEBUG_RX,
3636 ("%s: RX: not yet EOP, rxlen -> %d\n",
3637 device_xname(sc->sc_dev), sc->sc_rxlen));
3638 continue;
3639 }
3640
3641 /*
3642 * Okay, we have the entire packet now. The chip is
3643 * configured to include the FCS except I350
3644 * (not all chips can be configured to strip it),
3645 * so we need to trim it.
3646 * May need to adjust length of previous mbuf in the
3647 * chain if the current mbuf is too short.
3648 * For an eratta, the RCTL_SECRC bit in RCTL register
3649 * is always set in I350, so we don't trim it.
3650 */
3651 if (sc->sc_type != WM_T_I350) {
3652 if (m->m_len < ETHER_CRC_LEN) {
3653 sc->sc_rxtail->m_len
3654 -= (ETHER_CRC_LEN - m->m_len);
3655 m->m_len = 0;
3656 } else
3657 m->m_len -= ETHER_CRC_LEN;
3658 len = sc->sc_rxlen - ETHER_CRC_LEN;
3659 } else
3660 len = sc->sc_rxlen;
3661
3662 WM_RXCHAIN_LINK(sc, m);
3663
3664 *sc->sc_rxtailp = NULL;
3665 m = sc->sc_rxhead;
3666
3667 WM_RXCHAIN_RESET(sc);
3668
3669 DPRINTF(WM_DEBUG_RX,
3670 ("%s: RX: have entire packet, len -> %d\n",
3671 device_xname(sc->sc_dev), len));
3672
3673 /*
3674 * If an error occurred, update stats and drop the packet.
3675 */
3676 if (errors &
3677 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3678 if (errors & WRX_ER_SE)
3679 log(LOG_WARNING, "%s: symbol error\n",
3680 device_xname(sc->sc_dev));
3681 else if (errors & WRX_ER_SEQ)
3682 log(LOG_WARNING, "%s: receive sequence error\n",
3683 device_xname(sc->sc_dev));
3684 else if (errors & WRX_ER_CE)
3685 log(LOG_WARNING, "%s: CRC error\n",
3686 device_xname(sc->sc_dev));
3687 m_freem(m);
3688 continue;
3689 }
3690
3691 /*
3692 * No errors. Receive the packet.
3693 */
3694 m->m_pkthdr.rcvif = ifp;
3695 m->m_pkthdr.len = len;
3696
3697 /*
3698 * If VLANs are enabled, VLAN packets have been unwrapped
3699 * for us. Associate the tag with the packet.
3700 */
3701 if ((status & WRX_ST_VP) != 0) {
3702 VLAN_INPUT_TAG(ifp, m,
3703 le16toh(vlantag),
3704 continue);
3705 }
3706
3707 /*
3708 * Set up checksum info for this packet.
3709 */
3710 if ((status & WRX_ST_IXSM) == 0) {
3711 if (status & WRX_ST_IPCS) {
3712 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3713 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3714 if (errors & WRX_ER_IPE)
3715 m->m_pkthdr.csum_flags |=
3716 M_CSUM_IPv4_BAD;
3717 }
3718 if (status & WRX_ST_TCPCS) {
3719 /*
3720 * Note: we don't know if this was TCP or UDP,
3721 * so we just set both bits, and expect the
3722 * upper layers to deal.
3723 */
3724 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3725 m->m_pkthdr.csum_flags |=
3726 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3727 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3728 if (errors & WRX_ER_TCPE)
3729 m->m_pkthdr.csum_flags |=
3730 M_CSUM_TCP_UDP_BAD;
3731 }
3732 }
3733
3734 ifp->if_ipackets++;
3735
3736 /* Pass this up to any BPF listeners. */
3737 bpf_mtap(ifp, m);
3738
3739 /* Pass it on. */
3740 (*ifp->if_input)(ifp, m);
3741 }
3742
3743 /* Update the receive pointer. */
3744 sc->sc_rxptr = i;
3745
3746 DPRINTF(WM_DEBUG_RX,
3747 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3748 }
3749
3750 /*
3751 * wm_linkintr_gmii:
3752 *
3753 * Helper; handle link interrupts for GMII.
3754 */
3755 static void
3756 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3757 {
3758
3759 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3760 __func__));
3761
3762 if (icr & ICR_LSC) {
3763 DPRINTF(WM_DEBUG_LINK,
3764 ("%s: LINK: LSC -> mii_tick\n",
3765 device_xname(sc->sc_dev)));
3766 mii_tick(&sc->sc_mii);
3767 if (sc->sc_type == WM_T_82543) {
3768 int miistatus, active;
3769
3770 /*
3771 * With 82543, we need to force speed and
3772 * duplex on the MAC equal to what the PHY
3773 * speed and duplex configuration is.
3774 */
3775 miistatus = sc->sc_mii.mii_media_status;
3776
3777 if (miistatus & IFM_ACTIVE) {
3778 active = sc->sc_mii.mii_media_active;
3779 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3780 switch (IFM_SUBTYPE(active)) {
3781 case IFM_10_T:
3782 sc->sc_ctrl |= CTRL_SPEED_10;
3783 break;
3784 case IFM_100_TX:
3785 sc->sc_ctrl |= CTRL_SPEED_100;
3786 break;
3787 case IFM_1000_T:
3788 sc->sc_ctrl |= CTRL_SPEED_1000;
3789 break;
3790 default:
3791 /*
3792 * fiber?
3793 * Shoud not enter here.
3794 */
3795 printf("unknown media (%x)\n",
3796 active);
3797 break;
3798 }
3799 if (active & IFM_FDX)
3800 sc->sc_ctrl |= CTRL_FD;
3801 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3802 }
3803 } else if ((sc->sc_type == WM_T_ICH8)
3804 && (sc->sc_phytype == WMPHY_IGP_3)) {
3805 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3806 } else if (sc->sc_type == WM_T_PCH) {
3807 wm_k1_gig_workaround_hv(sc,
3808 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3809 }
3810
3811 if ((sc->sc_phytype == WMPHY_82578)
3812 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3813 == IFM_1000_T)) {
3814
3815 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3816 delay(200*1000); /* XXX too big */
3817
3818 /* Link stall fix for link up */
3819 wm_gmii_hv_writereg(sc->sc_dev, 1,
3820 HV_MUX_DATA_CTRL,
3821 HV_MUX_DATA_CTRL_GEN_TO_MAC
3822 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3823 wm_gmii_hv_writereg(sc->sc_dev, 1,
3824 HV_MUX_DATA_CTRL,
3825 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3826 }
3827 }
3828 } else if (icr & ICR_RXSEQ) {
3829 DPRINTF(WM_DEBUG_LINK,
3830 ("%s: LINK Receive sequence error\n",
3831 device_xname(sc->sc_dev)));
3832 }
3833 }
3834
3835 /*
3836 * wm_linkintr_tbi:
3837 *
3838 * Helper; handle link interrupts for TBI mode.
3839 */
3840 static void
3841 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3842 {
3843 uint32_t status;
3844
3845 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3846 __func__));
3847
3848 status = CSR_READ(sc, WMREG_STATUS);
3849 if (icr & ICR_LSC) {
3850 if (status & STATUS_LU) {
3851 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3852 device_xname(sc->sc_dev),
3853 (status & STATUS_FD) ? "FDX" : "HDX"));
3854 /*
3855 * NOTE: CTRL will update TFCE and RFCE automatically,
3856 * so we should update sc->sc_ctrl
3857 */
3858
3859 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3860 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3861 sc->sc_fcrtl &= ~FCRTL_XONE;
3862 if (status & STATUS_FD)
3863 sc->sc_tctl |=
3864 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3865 else
3866 sc->sc_tctl |=
3867 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3868 if (sc->sc_ctrl & CTRL_TFCE)
3869 sc->sc_fcrtl |= FCRTL_XONE;
3870 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3871 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3872 WMREG_OLD_FCRTL : WMREG_FCRTL,
3873 sc->sc_fcrtl);
3874 sc->sc_tbi_linkup = 1;
3875 } else {
3876 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3877 device_xname(sc->sc_dev)));
3878 sc->sc_tbi_linkup = 0;
3879 }
3880 wm_tbi_set_linkled(sc);
3881 } else if (icr & ICR_RXCFG) {
3882 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3883 device_xname(sc->sc_dev)));
3884 sc->sc_tbi_nrxcfg++;
3885 wm_check_for_link(sc);
3886 } else if (icr & ICR_RXSEQ) {
3887 DPRINTF(WM_DEBUG_LINK,
3888 ("%s: LINK: Receive sequence error\n",
3889 device_xname(sc->sc_dev)));
3890 }
3891 }
3892
3893 /*
3894 * wm_linkintr:
3895 *
3896 * Helper; handle link interrupts.
3897 */
3898 static void
3899 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3900 {
3901
3902 if (sc->sc_flags & WM_F_HAS_MII)
3903 wm_linkintr_gmii(sc, icr);
3904 else
3905 wm_linkintr_tbi(sc, icr);
3906 }
3907
3908 /*
3909 * wm_tick:
3910 *
3911 * One second timer, used to check link status, sweep up
3912 * completed transmit jobs, etc.
3913 */
3914 static void
3915 wm_tick(void *arg)
3916 {
3917 struct wm_softc *sc = arg;
3918 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3919 int s;
3920
3921 s = splnet();
3922
3923 if (sc->sc_type >= WM_T_82542_2_1) {
3924 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3925 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3926 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3927 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3928 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3929 }
3930
3931 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3932 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3933 + CSR_READ(sc, WMREG_CRCERRS)
3934 + CSR_READ(sc, WMREG_ALGNERRC)
3935 + CSR_READ(sc, WMREG_SYMERRC)
3936 + CSR_READ(sc, WMREG_RXERRC)
3937 + CSR_READ(sc, WMREG_SEC)
3938 + CSR_READ(sc, WMREG_CEXTERR)
3939 + CSR_READ(sc, WMREG_RLEC);
3940 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3941
3942 if (sc->sc_flags & WM_F_HAS_MII)
3943 mii_tick(&sc->sc_mii);
3944 else
3945 wm_tbi_check_link(sc);
3946
3947 splx(s);
3948
3949 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3950 }
3951
3952 /*
3953 * wm_reset:
3954 *
3955 * Reset the i82542 chip.
3956 */
3957 static void
3958 wm_reset(struct wm_softc *sc)
3959 {
3960 int phy_reset = 0;
3961 uint32_t reg, mask;
3962 int i;
3963
3964 /*
3965 * Allocate on-chip memory according to the MTU size.
3966 * The Packet Buffer Allocation register must be written
3967 * before the chip is reset.
3968 */
3969 switch (sc->sc_type) {
3970 case WM_T_82547:
3971 case WM_T_82547_2:
3972 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3973 PBA_22K : PBA_30K;
3974 sc->sc_txfifo_head = 0;
3975 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3976 sc->sc_txfifo_size =
3977 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3978 sc->sc_txfifo_stall = 0;
3979 break;
3980 case WM_T_82571:
3981 case WM_T_82572:
3982 case WM_T_82575: /* XXX need special handing for jumbo frames */
3983 case WM_T_I350:
3984 case WM_T_80003:
3985 sc->sc_pba = PBA_32K;
3986 break;
3987 case WM_T_82580:
3988 case WM_T_82580ER:
3989 sc->sc_pba = PBA_35K;
3990 break;
3991 case WM_T_82576:
3992 sc->sc_pba = PBA_64K;
3993 break;
3994 case WM_T_82573:
3995 sc->sc_pba = PBA_12K;
3996 break;
3997 case WM_T_82574:
3998 case WM_T_82583:
3999 sc->sc_pba = PBA_20K;
4000 break;
4001 case WM_T_ICH8:
4002 sc->sc_pba = PBA_8K;
4003 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4004 break;
4005 case WM_T_ICH9:
4006 case WM_T_ICH10:
4007 sc->sc_pba = PBA_10K;
4008 break;
4009 case WM_T_PCH:
4010 case WM_T_PCH2:
4011 sc->sc_pba = PBA_26K;
4012 break;
4013 default:
4014 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4015 PBA_40K : PBA_48K;
4016 break;
4017 }
4018 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4019
4020 /* Prevent the PCI-E bus from sticking */
4021 if (sc->sc_flags & WM_F_PCIE) {
4022 int timeout = 800;
4023
4024 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4025 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4026
4027 while (timeout--) {
4028 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4029 == 0)
4030 break;
4031 delay(100);
4032 }
4033 }
4034
4035 /* Set the completion timeout for interface */
4036 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4037 || (sc->sc_type == WM_T_I350))
4038 wm_set_pcie_completion_timeout(sc);
4039
4040 /* Clear interrupt */
4041 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4042
4043 /* Stop the transmit and receive processes. */
4044 CSR_WRITE(sc, WMREG_RCTL, 0);
4045 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4046 sc->sc_rctl &= ~RCTL_EN;
4047
4048 /* XXX set_tbi_sbp_82543() */
4049
4050 delay(10*1000);
4051
4052 /* Must acquire the MDIO ownership before MAC reset */
4053 switch (sc->sc_type) {
4054 case WM_T_82573:
4055 case WM_T_82574:
4056 case WM_T_82583:
4057 i = 0;
4058 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4059 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4060 do {
4061 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4062 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4063 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4064 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4065 break;
4066 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4067 delay(2*1000);
4068 i++;
4069 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4070 break;
4071 default:
4072 break;
4073 }
4074
4075 /*
4076 * 82541 Errata 29? & 82547 Errata 28?
4077 * See also the description about PHY_RST bit in CTRL register
4078 * in 8254x_GBe_SDM.pdf.
4079 */
4080 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4081 CSR_WRITE(sc, WMREG_CTRL,
4082 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4083 delay(5000);
4084 }
4085
4086 switch (sc->sc_type) {
4087 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4088 case WM_T_82541:
4089 case WM_T_82541_2:
4090 case WM_T_82547:
4091 case WM_T_82547_2:
4092 /*
4093 * On some chipsets, a reset through a memory-mapped write
4094 * cycle can cause the chip to reset before completing the
4095 * write cycle. This causes major headache that can be
4096 * avoided by issuing the reset via indirect register writes
4097 * through I/O space.
4098 *
4099 * So, if we successfully mapped the I/O BAR at attach time,
4100 * use that. Otherwise, try our luck with a memory-mapped
4101 * reset.
4102 */
4103 if (sc->sc_flags & WM_F_IOH_VALID)
4104 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4105 else
4106 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4107 break;
4108 case WM_T_82545_3:
4109 case WM_T_82546_3:
4110 /* Use the shadow control register on these chips. */
4111 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4112 break;
4113 case WM_T_80003:
4114 mask = swfwphysem[sc->sc_funcid];
4115 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4116 wm_get_swfw_semaphore(sc, mask);
4117 CSR_WRITE(sc, WMREG_CTRL, reg);
4118 wm_put_swfw_semaphore(sc, mask);
4119 break;
4120 case WM_T_ICH8:
4121 case WM_T_ICH9:
4122 case WM_T_ICH10:
4123 case WM_T_PCH:
4124 case WM_T_PCH2:
4125 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4126 if (wm_check_reset_block(sc) == 0) {
4127 /*
4128 * Gate automatic PHY configuration by hardware on
4129 * non-managed 82579
4130 */
4131 if ((sc->sc_type == WM_T_PCH2)
4132 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4133 != 0))
4134 wm_gate_hw_phy_config_ich8lan(sc, 1);
4135
4136
4137 reg |= CTRL_PHY_RESET;
4138 phy_reset = 1;
4139 }
4140 wm_get_swfwhw_semaphore(sc);
4141 CSR_WRITE(sc, WMREG_CTRL, reg);
4142 delay(20*1000);
4143 wm_put_swfwhw_semaphore(sc);
4144 break;
4145 case WM_T_82542_2_0:
4146 case WM_T_82542_2_1:
4147 case WM_T_82543:
4148 case WM_T_82540:
4149 case WM_T_82545:
4150 case WM_T_82546:
4151 case WM_T_82571:
4152 case WM_T_82572:
4153 case WM_T_82573:
4154 case WM_T_82574:
4155 case WM_T_82575:
4156 case WM_T_82576:
4157 case WM_T_82580:
4158 case WM_T_82580ER:
4159 case WM_T_82583:
4160 case WM_T_I350:
4161 default:
4162 /* Everything else can safely use the documented method. */
4163 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4164 break;
4165 }
4166
4167 if (phy_reset != 0)
4168 wm_get_cfg_done(sc);
4169
4170 /* reload EEPROM */
4171 switch (sc->sc_type) {
4172 case WM_T_82542_2_0:
4173 case WM_T_82542_2_1:
4174 case WM_T_82543:
4175 case WM_T_82544:
4176 delay(10);
4177 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4178 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4179 delay(2000);
4180 break;
4181 case WM_T_82540:
4182 case WM_T_82545:
4183 case WM_T_82545_3:
4184 case WM_T_82546:
4185 case WM_T_82546_3:
4186 delay(5*1000);
4187 /* XXX Disable HW ARPs on ASF enabled adapters */
4188 break;
4189 case WM_T_82541:
4190 case WM_T_82541_2:
4191 case WM_T_82547:
4192 case WM_T_82547_2:
4193 delay(20000);
4194 /* XXX Disable HW ARPs on ASF enabled adapters */
4195 break;
4196 case WM_T_82571:
4197 case WM_T_82572:
4198 case WM_T_82573:
4199 case WM_T_82574:
4200 case WM_T_82583:
4201 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4202 delay(10);
4203 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4204 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4205 }
4206 /* check EECD_EE_AUTORD */
4207 wm_get_auto_rd_done(sc);
4208 /*
4209 * Phy configuration from NVM just starts after EECD_AUTO_RD
4210 * is set.
4211 */
4212 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4213 || (sc->sc_type == WM_T_82583))
4214 delay(25*1000);
4215 break;
4216 case WM_T_82575:
4217 case WM_T_82576:
4218 case WM_T_82580:
4219 case WM_T_82580ER:
4220 case WM_T_I350:
4221 case WM_T_80003:
4222 case WM_T_ICH8:
4223 case WM_T_ICH9:
4224 /* check EECD_EE_AUTORD */
4225 wm_get_auto_rd_done(sc);
4226 break;
4227 case WM_T_ICH10:
4228 case WM_T_PCH:
4229 case WM_T_PCH2:
4230 wm_lan_init_done(sc);
4231 break;
4232 default:
4233 panic("%s: unknown type\n", __func__);
4234 }
4235
4236 /* Check whether EEPROM is present or not */
4237 switch (sc->sc_type) {
4238 case WM_T_82575:
4239 case WM_T_82576:
4240 #if 0 /* XXX */
4241 case WM_T_82580:
4242 case WM_T_82580ER:
4243 #endif
4244 case WM_T_I350:
4245 case WM_T_ICH8:
4246 case WM_T_ICH9:
4247 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4248 /* Not found */
4249 sc->sc_flags |= WM_F_EEPROM_INVALID;
4250 if ((sc->sc_type == WM_T_82575)
4251 || (sc->sc_type == WM_T_82576)
4252 || (sc->sc_type == WM_T_82580)
4253 || (sc->sc_type == WM_T_82580ER)
4254 || (sc->sc_type == WM_T_I350))
4255 wm_reset_init_script_82575(sc);
4256 }
4257 break;
4258 default:
4259 break;
4260 }
4261
4262 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4263 || (sc->sc_type == WM_T_I350)) {
4264 /* clear global device reset status bit */
4265 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4266 }
4267
4268 /* Clear any pending interrupt events. */
4269 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4270 reg = CSR_READ(sc, WMREG_ICR);
4271
4272 /* reload sc_ctrl */
4273 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4274
4275 if (sc->sc_type == WM_T_I350)
4276 wm_set_eee_i350(sc);
4277
4278 /* dummy read from WUC */
4279 if (sc->sc_type == WM_T_PCH)
4280 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4281 /*
4282 * For PCH, this write will make sure that any noise will be detected
4283 * as a CRC error and be dropped rather than show up as a bad packet
4284 * to the DMA engine
4285 */
4286 if (sc->sc_type == WM_T_PCH)
4287 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4288
4289 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4290 CSR_WRITE(sc, WMREG_WUC, 0);
4291
4292 /* XXX need special handling for 82580 */
4293 }
4294
4295 static void
4296 wm_set_vlan(struct wm_softc *sc)
4297 {
4298 /* Deal with VLAN enables. */
4299 if (VLAN_ATTACHED(&sc->sc_ethercom))
4300 sc->sc_ctrl |= CTRL_VME;
4301 else
4302 sc->sc_ctrl &= ~CTRL_VME;
4303
4304 /* Write the control registers. */
4305 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4306 }
4307
4308 /*
4309 * wm_init: [ifnet interface function]
4310 *
4311 * Initialize the interface. Must be called at splnet().
4312 */
4313 static int
4314 wm_init(struct ifnet *ifp)
4315 {
4316 struct wm_softc *sc = ifp->if_softc;
4317 struct wm_rxsoft *rxs;
4318 int i, j, trynum, error = 0;
4319 uint32_t reg;
4320
4321 /*
4322 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4323 * There is a small but measurable benefit to avoiding the adjusment
4324 * of the descriptor so that the headers are aligned, for normal mtu,
4325 * on such platforms. One possibility is that the DMA itself is
4326 * slightly more efficient if the front of the entire packet (instead
4327 * of the front of the headers) is aligned.
4328 *
4329 * Note we must always set align_tweak to 0 if we are using
4330 * jumbo frames.
4331 */
4332 #ifdef __NO_STRICT_ALIGNMENT
4333 sc->sc_align_tweak = 0;
4334 #else
4335 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4336 sc->sc_align_tweak = 0;
4337 else
4338 sc->sc_align_tweak = 2;
4339 #endif /* __NO_STRICT_ALIGNMENT */
4340
4341 /* Cancel any pending I/O. */
4342 wm_stop(ifp, 0);
4343
4344 /* update statistics before reset */
4345 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4346 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4347
4348 /* Reset the chip to a known state. */
4349 wm_reset(sc);
4350
4351 switch (sc->sc_type) {
4352 case WM_T_82571:
4353 case WM_T_82572:
4354 case WM_T_82573:
4355 case WM_T_82574:
4356 case WM_T_82583:
4357 case WM_T_80003:
4358 case WM_T_ICH8:
4359 case WM_T_ICH9:
4360 case WM_T_ICH10:
4361 case WM_T_PCH:
4362 case WM_T_PCH2:
4363 if (wm_check_mng_mode(sc) != 0)
4364 wm_get_hw_control(sc);
4365 break;
4366 default:
4367 break;
4368 }
4369
4370 /* Reset the PHY. */
4371 if (sc->sc_flags & WM_F_HAS_MII)
4372 wm_gmii_reset(sc);
4373
4374 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4375 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4376 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
4377 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4378
4379 /* Initialize the transmit descriptor ring. */
4380 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4381 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4382 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4383 sc->sc_txfree = WM_NTXDESC(sc);
4384 sc->sc_txnext = 0;
4385
4386 if (sc->sc_type < WM_T_82543) {
4387 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4388 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4389 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4390 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4391 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4392 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4393 } else {
4394 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4395 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4396 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4397 CSR_WRITE(sc, WMREG_TDH, 0);
4398 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4399 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4400
4401 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4402 /*
4403 * Don't write TDT before TCTL.EN is set.
4404 * See the document.
4405 */
4406 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4407 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4408 | TXDCTL_WTHRESH(0));
4409 else {
4410 CSR_WRITE(sc, WMREG_TDT, 0);
4411 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4412 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4413 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4414 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4415 }
4416 }
4417 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4418 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4419
4420 /* Initialize the transmit job descriptors. */
4421 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4422 sc->sc_txsoft[i].txs_mbuf = NULL;
4423 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4424 sc->sc_txsnext = 0;
4425 sc->sc_txsdirty = 0;
4426
4427 /*
4428 * Initialize the receive descriptor and receive job
4429 * descriptor rings.
4430 */
4431 if (sc->sc_type < WM_T_82543) {
4432 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4433 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4434 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4435 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4436 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4437 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4438
4439 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4440 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4441 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4442 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4443 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4444 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4445 } else {
4446 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4447 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4448 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4449 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4450 CSR_WRITE(sc, WMREG_EITR(0), 450);
4451 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4452 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4453 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4454 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4455 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4456 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4457 | RXDCTL_WTHRESH(1));
4458 } else {
4459 CSR_WRITE(sc, WMREG_RDH, 0);
4460 CSR_WRITE(sc, WMREG_RDT, 0);
4461 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4462 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4463 }
4464 }
4465 for (i = 0; i < WM_NRXDESC; i++) {
4466 rxs = &sc->sc_rxsoft[i];
4467 if (rxs->rxs_mbuf == NULL) {
4468 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4469 log(LOG_ERR, "%s: unable to allocate or map "
4470 "rx buffer %d, error = %d\n",
4471 device_xname(sc->sc_dev), i, error);
4472 /*
4473 * XXX Should attempt to run with fewer receive
4474 * XXX buffers instead of just failing.
4475 */
4476 wm_rxdrain(sc);
4477 goto out;
4478 }
4479 } else {
4480 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4481 WM_INIT_RXDESC(sc, i);
4482 /*
4483 * For 82575 and newer device, the RX descriptors
4484 * must be initialized after the setting of RCTL.EN in
4485 * wm_set_filter()
4486 */
4487 }
4488 }
4489 sc->sc_rxptr = 0;
4490 sc->sc_rxdiscard = 0;
4491 WM_RXCHAIN_RESET(sc);
4492
4493 /*
4494 * Clear out the VLAN table -- we don't use it (yet).
4495 */
4496 CSR_WRITE(sc, WMREG_VET, 0);
4497 if (sc->sc_type == WM_T_I350)
4498 trynum = 10; /* Due to hw errata */
4499 else
4500 trynum = 1;
4501 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4502 for (j = 0; j < trynum; j++)
4503 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4504
4505 /*
4506 * Set up flow-control parameters.
4507 *
4508 * XXX Values could probably stand some tuning.
4509 */
4510 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4511 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4512 && (sc->sc_type != WM_T_PCH2)) {
4513 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4514 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4515 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4516 }
4517
4518 sc->sc_fcrtl = FCRTL_DFLT;
4519 if (sc->sc_type < WM_T_82543) {
4520 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4521 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4522 } else {
4523 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4524 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4525 }
4526
4527 if (sc->sc_type == WM_T_80003)
4528 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4529 else
4530 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4531
4532 /* Writes the control register. */
4533 wm_set_vlan(sc);
4534
4535 if (sc->sc_flags & WM_F_HAS_MII) {
4536 int val;
4537
4538 switch (sc->sc_type) {
4539 case WM_T_80003:
4540 case WM_T_ICH8:
4541 case WM_T_ICH9:
4542 case WM_T_ICH10:
4543 case WM_T_PCH:
4544 case WM_T_PCH2:
4545 /*
4546 * Set the mac to wait the maximum time between each
4547 * iteration and increase the max iterations when
4548 * polling the phy; this fixes erroneous timeouts at
4549 * 10Mbps.
4550 */
4551 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4552 0xFFFF);
4553 val = wm_kmrn_readreg(sc,
4554 KUMCTRLSTA_OFFSET_INB_PARAM);
4555 val |= 0x3F;
4556 wm_kmrn_writereg(sc,
4557 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4558 break;
4559 default:
4560 break;
4561 }
4562
4563 if (sc->sc_type == WM_T_80003) {
4564 val = CSR_READ(sc, WMREG_CTRL_EXT);
4565 val &= ~CTRL_EXT_LINK_MODE_MASK;
4566 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4567
4568 /* Bypass RX and TX FIFO's */
4569 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4570 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4571 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4572 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4573 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4574 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4575 }
4576 }
4577 #if 0
4578 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4579 #endif
4580
4581 /*
4582 * Set up checksum offload parameters.
4583 */
4584 reg = CSR_READ(sc, WMREG_RXCSUM);
4585 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4586 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4587 reg |= RXCSUM_IPOFL;
4588 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4589 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4590 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4591 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4592 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4593
4594 /* Reset TBI's RXCFG count */
4595 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4596
4597 /*
4598 * Set up the interrupt registers.
4599 */
4600 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4601 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4602 ICR_RXO | ICR_RXT0;
4603 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4604 sc->sc_icr |= ICR_RXCFG;
4605 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4606
4607 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4608 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4609 || (sc->sc_type == WM_T_PCH2)) {
4610 reg = CSR_READ(sc, WMREG_KABGTXD);
4611 reg |= KABGTXD_BGSQLBIAS;
4612 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4613 }
4614
4615 /* Set up the inter-packet gap. */
4616 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4617
4618 if (sc->sc_type >= WM_T_82543) {
4619 /*
4620 * Set up the interrupt throttling register (units of 256ns)
4621 * Note that a footnote in Intel's documentation says this
4622 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4623 * or 10Mbit mode. Empirically, it appears to be the case
4624 * that that is also true for the 1024ns units of the other
4625 * interrupt-related timer registers -- so, really, we ought
4626 * to divide this value by 4 when the link speed is low.
4627 *
4628 * XXX implement this division at link speed change!
4629 */
4630
4631 /*
4632 * For N interrupts/sec, set this value to:
4633 * 1000000000 / (N * 256). Note that we set the
4634 * absolute and packet timer values to this value
4635 * divided by 4 to get "simple timer" behavior.
4636 */
4637
4638 sc->sc_itr = 1500; /* 2604 ints/sec */
4639 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4640 }
4641
4642 /* Set the VLAN ethernetype. */
4643 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4644
4645 /*
4646 * Set up the transmit control register; we start out with
4647 * a collision distance suitable for FDX, but update it whe
4648 * we resolve the media type.
4649 */
4650 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4651 | TCTL_CT(TX_COLLISION_THRESHOLD)
4652 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4653 if (sc->sc_type >= WM_T_82571)
4654 sc->sc_tctl |= TCTL_MULR;
4655 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4656
4657 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4658 /*
4659 * Write TDT after TCTL.EN is set.
4660 * See the document.
4661 */
4662 CSR_WRITE(sc, WMREG_TDT, 0);
4663 }
4664
4665 if (sc->sc_type == WM_T_80003) {
4666 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4667 reg &= ~TCTL_EXT_GCEX_MASK;
4668 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4669 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4670 }
4671
4672 /* Set the media. */
4673 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4674 goto out;
4675
4676 /* Configure for OS presence */
4677 wm_init_manageability(sc);
4678
4679 /*
4680 * Set up the receive control register; we actually program
4681 * the register when we set the receive filter. Use multicast
4682 * address offset type 0.
4683 *
4684 * Only the i82544 has the ability to strip the incoming
4685 * CRC, so we don't enable that feature.
4686 */
4687 sc->sc_mchash_type = 0;
4688 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4689 | RCTL_MO(sc->sc_mchash_type);
4690
4691 /*
4692 * The I350 has a bug where it always strips the CRC whether
4693 * asked to or not. So ask for stripped CRC here and cope in rxeof
4694 */
4695 if (sc->sc_type == WM_T_I350)
4696 sc->sc_rctl |= RCTL_SECRC;
4697
4698 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4699 && (ifp->if_mtu > ETHERMTU)) {
4700 sc->sc_rctl |= RCTL_LPE;
4701 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4702 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4703 }
4704
4705 if (MCLBYTES == 2048) {
4706 sc->sc_rctl |= RCTL_2k;
4707 } else {
4708 if (sc->sc_type >= WM_T_82543) {
4709 switch (MCLBYTES) {
4710 case 4096:
4711 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4712 break;
4713 case 8192:
4714 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4715 break;
4716 case 16384:
4717 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4718 break;
4719 default:
4720 panic("wm_init: MCLBYTES %d unsupported",
4721 MCLBYTES);
4722 break;
4723 }
4724 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4725 }
4726
4727 /* Set the receive filter. */
4728 wm_set_filter(sc);
4729
4730 /* On 575 and later set RDT only if RX enabled */
4731 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4732 for (i = 0; i < WM_NRXDESC; i++)
4733 WM_INIT_RXDESC(sc, i);
4734
4735 /* Start the one second link check clock. */
4736 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4737
4738 /* ...all done! */
4739 ifp->if_flags |= IFF_RUNNING;
4740 ifp->if_flags &= ~IFF_OACTIVE;
4741
4742 out:
4743 sc->sc_if_flags = ifp->if_flags;
4744 if (error)
4745 log(LOG_ERR, "%s: interface not running\n",
4746 device_xname(sc->sc_dev));
4747 return error;
4748 }
4749
4750 /*
4751 * wm_rxdrain:
4752 *
4753 * Drain the receive queue.
4754 */
4755 static void
4756 wm_rxdrain(struct wm_softc *sc)
4757 {
4758 struct wm_rxsoft *rxs;
4759 int i;
4760
4761 for (i = 0; i < WM_NRXDESC; i++) {
4762 rxs = &sc->sc_rxsoft[i];
4763 if (rxs->rxs_mbuf != NULL) {
4764 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4765 m_freem(rxs->rxs_mbuf);
4766 rxs->rxs_mbuf = NULL;
4767 }
4768 }
4769 }
4770
4771 /*
4772 * wm_stop: [ifnet interface function]
4773 *
4774 * Stop transmission on the interface.
4775 */
4776 static void
4777 wm_stop(struct ifnet *ifp, int disable)
4778 {
4779 struct wm_softc *sc = ifp->if_softc;
4780 struct wm_txsoft *txs;
4781 int i;
4782
4783 /* Stop the one second clock. */
4784 callout_stop(&sc->sc_tick_ch);
4785
4786 /* Stop the 82547 Tx FIFO stall check timer. */
4787 if (sc->sc_type == WM_T_82547)
4788 callout_stop(&sc->sc_txfifo_ch);
4789
4790 if (sc->sc_flags & WM_F_HAS_MII) {
4791 /* Down the MII. */
4792 mii_down(&sc->sc_mii);
4793 } else {
4794 #if 0
4795 /* Should we clear PHY's status properly? */
4796 wm_reset(sc);
4797 #endif
4798 }
4799
4800 /* Stop the transmit and receive processes. */
4801 CSR_WRITE(sc, WMREG_TCTL, 0);
4802 CSR_WRITE(sc, WMREG_RCTL, 0);
4803 sc->sc_rctl &= ~RCTL_EN;
4804
4805 /*
4806 * Clear the interrupt mask to ensure the device cannot assert its
4807 * interrupt line.
4808 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4809 * any currently pending or shared interrupt.
4810 */
4811 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4812 sc->sc_icr = 0;
4813
4814 /* Release any queued transmit buffers. */
4815 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4816 txs = &sc->sc_txsoft[i];
4817 if (txs->txs_mbuf != NULL) {
4818 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4819 m_freem(txs->txs_mbuf);
4820 txs->txs_mbuf = NULL;
4821 }
4822 }
4823
4824 /* Mark the interface as down and cancel the watchdog timer. */
4825 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4826 ifp->if_timer = 0;
4827
4828 if (disable)
4829 wm_rxdrain(sc);
4830
4831 #if 0 /* notyet */
4832 if (sc->sc_type >= WM_T_82544)
4833 CSR_WRITE(sc, WMREG_WUC, 0);
4834 #endif
4835 }
4836
4837 void
4838 wm_get_auto_rd_done(struct wm_softc *sc)
4839 {
4840 int i;
4841
4842 /* wait for eeprom to reload */
4843 switch (sc->sc_type) {
4844 case WM_T_82571:
4845 case WM_T_82572:
4846 case WM_T_82573:
4847 case WM_T_82574:
4848 case WM_T_82583:
4849 case WM_T_82575:
4850 case WM_T_82576:
4851 case WM_T_82580:
4852 case WM_T_82580ER:
4853 case WM_T_I350:
4854 case WM_T_80003:
4855 case WM_T_ICH8:
4856 case WM_T_ICH9:
4857 for (i = 0; i < 10; i++) {
4858 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4859 break;
4860 delay(1000);
4861 }
4862 if (i == 10) {
4863 log(LOG_ERR, "%s: auto read from eeprom failed to "
4864 "complete\n", device_xname(sc->sc_dev));
4865 }
4866 break;
4867 default:
4868 break;
4869 }
4870 }
4871
4872 void
4873 wm_lan_init_done(struct wm_softc *sc)
4874 {
4875 uint32_t reg = 0;
4876 int i;
4877
4878 /* wait for eeprom to reload */
4879 switch (sc->sc_type) {
4880 case WM_T_ICH10:
4881 case WM_T_PCH:
4882 case WM_T_PCH2:
4883 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4884 reg = CSR_READ(sc, WMREG_STATUS);
4885 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4886 break;
4887 delay(100);
4888 }
4889 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4890 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4891 "complete\n", device_xname(sc->sc_dev), __func__);
4892 }
4893 break;
4894 default:
4895 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4896 __func__);
4897 break;
4898 }
4899
4900 reg &= ~STATUS_LAN_INIT_DONE;
4901 CSR_WRITE(sc, WMREG_STATUS, reg);
4902 }
4903
4904 void
4905 wm_get_cfg_done(struct wm_softc *sc)
4906 {
4907 int mask;
4908 uint32_t reg;
4909 int i;
4910
4911 /* wait for eeprom to reload */
4912 switch (sc->sc_type) {
4913 case WM_T_82542_2_0:
4914 case WM_T_82542_2_1:
4915 /* null */
4916 break;
4917 case WM_T_82543:
4918 case WM_T_82544:
4919 case WM_T_82540:
4920 case WM_T_82545:
4921 case WM_T_82545_3:
4922 case WM_T_82546:
4923 case WM_T_82546_3:
4924 case WM_T_82541:
4925 case WM_T_82541_2:
4926 case WM_T_82547:
4927 case WM_T_82547_2:
4928 case WM_T_82573:
4929 case WM_T_82574:
4930 case WM_T_82583:
4931 /* generic */
4932 delay(10*1000);
4933 break;
4934 case WM_T_80003:
4935 case WM_T_82571:
4936 case WM_T_82572:
4937 case WM_T_82575:
4938 case WM_T_82576:
4939 case WM_T_82580:
4940 case WM_T_82580ER:
4941 case WM_T_I350:
4942 if (sc->sc_type == WM_T_82571) {
4943 /* Only 82571 shares port 0 */
4944 mask = EEMNGCTL_CFGDONE_0;
4945 } else
4946 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4947 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4948 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4949 break;
4950 delay(1000);
4951 }
4952 if (i >= WM_PHY_CFG_TIMEOUT) {
4953 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4954 device_xname(sc->sc_dev), __func__));
4955 }
4956 break;
4957 case WM_T_ICH8:
4958 case WM_T_ICH9:
4959 case WM_T_ICH10:
4960 case WM_T_PCH:
4961 case WM_T_PCH2:
4962 if (sc->sc_type >= WM_T_PCH) {
4963 reg = CSR_READ(sc, WMREG_STATUS);
4964 if ((reg & STATUS_PHYRA) != 0)
4965 CSR_WRITE(sc, WMREG_STATUS,
4966 reg & ~STATUS_PHYRA);
4967 }
4968 delay(10*1000);
4969 break;
4970 default:
4971 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4972 __func__);
4973 break;
4974 }
4975 }
4976
4977 /*
4978 * wm_acquire_eeprom:
4979 *
4980 * Perform the EEPROM handshake required on some chips.
4981 */
4982 static int
4983 wm_acquire_eeprom(struct wm_softc *sc)
4984 {
4985 uint32_t reg;
4986 int x;
4987 int ret = 0;
4988
4989 /* always success */
4990 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4991 return 0;
4992
4993 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4994 ret = wm_get_swfwhw_semaphore(sc);
4995 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4996 /* this will also do wm_get_swsm_semaphore() if needed */
4997 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4998 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4999 ret = wm_get_swsm_semaphore(sc);
5000 }
5001
5002 if (ret) {
5003 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5004 __func__);
5005 return 1;
5006 }
5007
5008 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5009 reg = CSR_READ(sc, WMREG_EECD);
5010
5011 /* Request EEPROM access. */
5012 reg |= EECD_EE_REQ;
5013 CSR_WRITE(sc, WMREG_EECD, reg);
5014
5015 /* ..and wait for it to be granted. */
5016 for (x = 0; x < 1000; x++) {
5017 reg = CSR_READ(sc, WMREG_EECD);
5018 if (reg & EECD_EE_GNT)
5019 break;
5020 delay(5);
5021 }
5022 if ((reg & EECD_EE_GNT) == 0) {
5023 aprint_error_dev(sc->sc_dev,
5024 "could not acquire EEPROM GNT\n");
5025 reg &= ~EECD_EE_REQ;
5026 CSR_WRITE(sc, WMREG_EECD, reg);
5027 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5028 wm_put_swfwhw_semaphore(sc);
5029 if (sc->sc_flags & WM_F_SWFW_SYNC)
5030 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5031 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5032 wm_put_swsm_semaphore(sc);
5033 return 1;
5034 }
5035 }
5036
5037 return 0;
5038 }
5039
5040 /*
5041 * wm_release_eeprom:
5042 *
5043 * Release the EEPROM mutex.
5044 */
5045 static void
5046 wm_release_eeprom(struct wm_softc *sc)
5047 {
5048 uint32_t reg;
5049
5050 /* always success */
5051 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5052 return;
5053
5054 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5055 reg = CSR_READ(sc, WMREG_EECD);
5056 reg &= ~EECD_EE_REQ;
5057 CSR_WRITE(sc, WMREG_EECD, reg);
5058 }
5059
5060 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5061 wm_put_swfwhw_semaphore(sc);
5062 if (sc->sc_flags & WM_F_SWFW_SYNC)
5063 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5064 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5065 wm_put_swsm_semaphore(sc);
5066 }
5067
5068 /*
5069 * wm_eeprom_sendbits:
5070 *
5071 * Send a series of bits to the EEPROM.
5072 */
5073 static void
5074 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5075 {
5076 uint32_t reg;
5077 int x;
5078
5079 reg = CSR_READ(sc, WMREG_EECD);
5080
5081 for (x = nbits; x > 0; x--) {
5082 if (bits & (1U << (x - 1)))
5083 reg |= EECD_DI;
5084 else
5085 reg &= ~EECD_DI;
5086 CSR_WRITE(sc, WMREG_EECD, reg);
5087 delay(2);
5088 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5089 delay(2);
5090 CSR_WRITE(sc, WMREG_EECD, reg);
5091 delay(2);
5092 }
5093 }
5094
5095 /*
5096 * wm_eeprom_recvbits:
5097 *
5098 * Receive a series of bits from the EEPROM.
5099 */
5100 static void
5101 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5102 {
5103 uint32_t reg, val;
5104 int x;
5105
5106 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5107
5108 val = 0;
5109 for (x = nbits; x > 0; x--) {
5110 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5111 delay(2);
5112 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5113 val |= (1U << (x - 1));
5114 CSR_WRITE(sc, WMREG_EECD, reg);
5115 delay(2);
5116 }
5117 *valp = val;
5118 }
5119
5120 /*
5121 * wm_read_eeprom_uwire:
5122 *
5123 * Read a word from the EEPROM using the MicroWire protocol.
5124 */
5125 static int
5126 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5127 {
5128 uint32_t reg, val;
5129 int i;
5130
5131 for (i = 0; i < wordcnt; i++) {
5132 /* Clear SK and DI. */
5133 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5134 CSR_WRITE(sc, WMREG_EECD, reg);
5135
5136 /*
5137 * XXX: workaround for a bug in qemu-0.12.x and prior
5138 * and Xen.
5139 *
5140 * We use this workaround only for 82540 because qemu's
5141 * e1000 act as 82540.
5142 */
5143 if (sc->sc_type == WM_T_82540) {
5144 reg |= EECD_SK;
5145 CSR_WRITE(sc, WMREG_EECD, reg);
5146 reg &= ~EECD_SK;
5147 CSR_WRITE(sc, WMREG_EECD, reg);
5148 delay(2);
5149 }
5150 /* XXX: end of workaround */
5151
5152 /* Set CHIP SELECT. */
5153 reg |= EECD_CS;
5154 CSR_WRITE(sc, WMREG_EECD, reg);
5155 delay(2);
5156
5157 /* Shift in the READ command. */
5158 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5159
5160 /* Shift in address. */
5161 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5162
5163 /* Shift out the data. */
5164 wm_eeprom_recvbits(sc, &val, 16);
5165 data[i] = val & 0xffff;
5166
5167 /* Clear CHIP SELECT. */
5168 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5169 CSR_WRITE(sc, WMREG_EECD, reg);
5170 delay(2);
5171 }
5172
5173 return 0;
5174 }
5175
5176 /*
5177 * wm_spi_eeprom_ready:
5178 *
5179 * Wait for a SPI EEPROM to be ready for commands.
5180 */
5181 static int
5182 wm_spi_eeprom_ready(struct wm_softc *sc)
5183 {
5184 uint32_t val;
5185 int usec;
5186
5187 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5188 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5189 wm_eeprom_recvbits(sc, &val, 8);
5190 if ((val & SPI_SR_RDY) == 0)
5191 break;
5192 }
5193 if (usec >= SPI_MAX_RETRIES) {
5194 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5195 return 1;
5196 }
5197 return 0;
5198 }
5199
5200 /*
5201 * wm_read_eeprom_spi:
5202 *
5203 * Read a work from the EEPROM using the SPI protocol.
5204 */
5205 static int
5206 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5207 {
5208 uint32_t reg, val;
5209 int i;
5210 uint8_t opc;
5211
5212 /* Clear SK and CS. */
5213 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5214 CSR_WRITE(sc, WMREG_EECD, reg);
5215 delay(2);
5216
5217 if (wm_spi_eeprom_ready(sc))
5218 return 1;
5219
5220 /* Toggle CS to flush commands. */
5221 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5222 delay(2);
5223 CSR_WRITE(sc, WMREG_EECD, reg);
5224 delay(2);
5225
5226 opc = SPI_OPC_READ;
5227 if (sc->sc_ee_addrbits == 8 && word >= 128)
5228 opc |= SPI_OPC_A8;
5229
5230 wm_eeprom_sendbits(sc, opc, 8);
5231 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5232
5233 for (i = 0; i < wordcnt; i++) {
5234 wm_eeprom_recvbits(sc, &val, 16);
5235 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5236 }
5237
5238 /* Raise CS and clear SK. */
5239 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5240 CSR_WRITE(sc, WMREG_EECD, reg);
5241 delay(2);
5242
5243 return 0;
5244 }
5245
5246 #define EEPROM_CHECKSUM 0xBABA
5247 #define EEPROM_SIZE 0x0040
5248
5249 /*
5250 * wm_validate_eeprom_checksum
5251 *
5252 * The checksum is defined as the sum of the first 64 (16 bit) words.
5253 */
5254 static int
5255 wm_validate_eeprom_checksum(struct wm_softc *sc)
5256 {
5257 uint16_t checksum;
5258 uint16_t eeprom_data;
5259 int i;
5260
5261 checksum = 0;
5262
5263 for (i = 0; i < EEPROM_SIZE; i++) {
5264 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5265 return 1;
5266 checksum += eeprom_data;
5267 }
5268
5269 if (checksum != (uint16_t) EEPROM_CHECKSUM)
5270 return 1;
5271
5272 return 0;
5273 }
5274
5275 /*
5276 * wm_read_eeprom:
5277 *
5278 * Read data from the serial EEPROM.
5279 */
5280 static int
5281 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5282 {
5283 int rv;
5284
5285 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5286 return 1;
5287
5288 if (wm_acquire_eeprom(sc))
5289 return 1;
5290
5291 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5292 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5293 || (sc->sc_type == WM_T_PCH2))
5294 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5295 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5296 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5297 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5298 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5299 else
5300 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5301
5302 wm_release_eeprom(sc);
5303 return rv;
5304 }
5305
5306 static int
5307 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5308 uint16_t *data)
5309 {
5310 int i, eerd = 0;
5311 int error = 0;
5312
5313 for (i = 0; i < wordcnt; i++) {
5314 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5315
5316 CSR_WRITE(sc, WMREG_EERD, eerd);
5317 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5318 if (error != 0)
5319 break;
5320
5321 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5322 }
5323
5324 return error;
5325 }
5326
5327 static int
5328 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5329 {
5330 uint32_t attempts = 100000;
5331 uint32_t i, reg = 0;
5332 int32_t done = -1;
5333
5334 for (i = 0; i < attempts; i++) {
5335 reg = CSR_READ(sc, rw);
5336
5337 if (reg & EERD_DONE) {
5338 done = 0;
5339 break;
5340 }
5341 delay(5);
5342 }
5343
5344 return done;
5345 }
5346
5347 static int
5348 wm_check_alt_mac_addr(struct wm_softc *sc)
5349 {
5350 uint16_t myea[ETHER_ADDR_LEN / 2];
5351 uint16_t offset = EEPROM_OFF_MACADDR;
5352
5353 /* Try to read alternative MAC address pointer */
5354 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5355 return -1;
5356
5357 /* Check pointer */
5358 if (offset == 0xffff)
5359 return -1;
5360
5361 /*
5362 * Check whether alternative MAC address is valid or not.
5363 * Some cards have non 0xffff pointer but those don't use
5364 * alternative MAC address in reality.
5365 *
5366 * Check whether the broadcast bit is set or not.
5367 */
5368 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5369 if (((myea[0] & 0xff) & 0x01) == 0)
5370 return 0; /* found! */
5371
5372 /* not found */
5373 return -1;
5374 }
5375
5376 static int
5377 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5378 {
5379 uint16_t myea[ETHER_ADDR_LEN / 2];
5380 uint16_t offset = EEPROM_OFF_MACADDR;
5381 int do_invert = 0;
5382
5383 switch (sc->sc_type) {
5384 case WM_T_82580:
5385 case WM_T_82580ER:
5386 case WM_T_I350:
5387 switch (sc->sc_funcid) {
5388 case 0:
5389 /* default value (== EEPROM_OFF_MACADDR) */
5390 break;
5391 case 1:
5392 offset = EEPROM_OFF_LAN1;
5393 break;
5394 case 2:
5395 offset = EEPROM_OFF_LAN2;
5396 break;
5397 case 3:
5398 offset = EEPROM_OFF_LAN3;
5399 break;
5400 default:
5401 goto bad;
5402 /* NOTREACHED */
5403 break;
5404 }
5405 break;
5406 case WM_T_82571:
5407 case WM_T_82575:
5408 case WM_T_82576:
5409 case WM_T_80003:
5410 if (wm_check_alt_mac_addr(sc) != 0) {
5411 /* reset the offset to LAN0 */
5412 offset = EEPROM_OFF_MACADDR;
5413 if ((sc->sc_funcid & 0x01) == 1)
5414 do_invert = 1;
5415 goto do_read;
5416 }
5417 switch (sc->sc_funcid) {
5418 case 0:
5419 /*
5420 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5421 * itself.
5422 */
5423 break;
5424 case 1:
5425 offset += EEPROM_OFF_MACADDR_LAN1;
5426 break;
5427 case 2:
5428 offset += EEPROM_OFF_MACADDR_LAN2;
5429 break;
5430 case 3:
5431 offset += EEPROM_OFF_MACADDR_LAN3;
5432 break;
5433 default:
5434 goto bad;
5435 /* NOTREACHED */
5436 break;
5437 }
5438 break;
5439 default:
5440 if ((sc->sc_funcid & 0x01) == 1)
5441 do_invert = 1;
5442 break;
5443 }
5444
5445 do_read:
5446 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5447 myea) != 0) {
5448 goto bad;
5449 }
5450
5451 enaddr[0] = myea[0] & 0xff;
5452 enaddr[1] = myea[0] >> 8;
5453 enaddr[2] = myea[1] & 0xff;
5454 enaddr[3] = myea[1] >> 8;
5455 enaddr[4] = myea[2] & 0xff;
5456 enaddr[5] = myea[2] >> 8;
5457
5458 /*
5459 * Toggle the LSB of the MAC address on the second port
5460 * of some dual port cards.
5461 */
5462 if (do_invert != 0)
5463 enaddr[5] ^= 1;
5464
5465 return 0;
5466
5467 bad:
5468 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5469
5470 return -1;
5471 }
5472
5473 /*
5474 * wm_add_rxbuf:
5475 *
5476 * Add a receive buffer to the indiciated descriptor.
5477 */
5478 static int
5479 wm_add_rxbuf(struct wm_softc *sc, int idx)
5480 {
5481 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5482 struct mbuf *m;
5483 int error;
5484
5485 MGETHDR(m, M_DONTWAIT, MT_DATA);
5486 if (m == NULL)
5487 return ENOBUFS;
5488
5489 MCLGET(m, M_DONTWAIT);
5490 if ((m->m_flags & M_EXT) == 0) {
5491 m_freem(m);
5492 return ENOBUFS;
5493 }
5494
5495 if (rxs->rxs_mbuf != NULL)
5496 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5497
5498 rxs->rxs_mbuf = m;
5499
5500 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5501 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5502 BUS_DMA_READ|BUS_DMA_NOWAIT);
5503 if (error) {
5504 /* XXX XXX XXX */
5505 aprint_error_dev(sc->sc_dev,
5506 "unable to load rx DMA map %d, error = %d\n",
5507 idx, error);
5508 panic("wm_add_rxbuf");
5509 }
5510
5511 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5512 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5513
5514 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5515 if ((sc->sc_rctl & RCTL_EN) != 0)
5516 WM_INIT_RXDESC(sc, idx);
5517 } else
5518 WM_INIT_RXDESC(sc, idx);
5519
5520 return 0;
5521 }
5522
5523 /*
5524 * wm_set_ral:
5525 *
5526 * Set an entery in the receive address list.
5527 */
5528 static void
5529 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5530 {
5531 uint32_t ral_lo, ral_hi;
5532
5533 if (enaddr != NULL) {
5534 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5535 (enaddr[3] << 24);
5536 ral_hi = enaddr[4] | (enaddr[5] << 8);
5537 ral_hi |= RAL_AV;
5538 } else {
5539 ral_lo = 0;
5540 ral_hi = 0;
5541 }
5542
5543 if (sc->sc_type >= WM_T_82544) {
5544 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5545 ral_lo);
5546 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5547 ral_hi);
5548 } else {
5549 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5550 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5551 }
5552 }
5553
5554 /*
5555 * wm_mchash:
5556 *
5557 * Compute the hash of the multicast address for the 4096-bit
5558 * multicast filter.
5559 */
5560 static uint32_t
5561 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5562 {
5563 static const int lo_shift[4] = { 4, 3, 2, 0 };
5564 static const int hi_shift[4] = { 4, 5, 6, 8 };
5565 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5566 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5567 uint32_t hash;
5568
5569 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5570 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5571 || (sc->sc_type == WM_T_PCH2)) {
5572 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5573 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5574 return (hash & 0x3ff);
5575 }
5576 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5577 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5578
5579 return (hash & 0xfff);
5580 }
5581
5582 /*
5583 * wm_set_filter:
5584 *
5585 * Set up the receive filter.
5586 */
5587 static void
5588 wm_set_filter(struct wm_softc *sc)
5589 {
5590 struct ethercom *ec = &sc->sc_ethercom;
5591 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5592 struct ether_multi *enm;
5593 struct ether_multistep step;
5594 bus_addr_t mta_reg;
5595 uint32_t hash, reg, bit;
5596 int i, size;
5597
5598 if (sc->sc_type >= WM_T_82544)
5599 mta_reg = WMREG_CORDOVA_MTA;
5600 else
5601 mta_reg = WMREG_MTA;
5602
5603 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5604
5605 if (ifp->if_flags & IFF_BROADCAST)
5606 sc->sc_rctl |= RCTL_BAM;
5607 if (ifp->if_flags & IFF_PROMISC) {
5608 sc->sc_rctl |= RCTL_UPE;
5609 goto allmulti;
5610 }
5611
5612 /*
5613 * Set the station address in the first RAL slot, and
5614 * clear the remaining slots.
5615 */
5616 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5617 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5618 || (sc->sc_type == WM_T_PCH2))
5619 size = WM_ICH8_RAL_TABSIZE;
5620 else
5621 size = WM_RAL_TABSIZE;
5622 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5623 for (i = 1; i < size; i++)
5624 wm_set_ral(sc, NULL, i);
5625
5626 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5627 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5628 || (sc->sc_type == WM_T_PCH2))
5629 size = WM_ICH8_MC_TABSIZE;
5630 else
5631 size = WM_MC_TABSIZE;
5632 /* Clear out the multicast table. */
5633 for (i = 0; i < size; i++)
5634 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5635
5636 ETHER_FIRST_MULTI(step, ec, enm);
5637 while (enm != NULL) {
5638 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5639 /*
5640 * We must listen to a range of multicast addresses.
5641 * For now, just accept all multicasts, rather than
5642 * trying to set only those filter bits needed to match
5643 * the range. (At this time, the only use of address
5644 * ranges is for IP multicast routing, for which the
5645 * range is big enough to require all bits set.)
5646 */
5647 goto allmulti;
5648 }
5649
5650 hash = wm_mchash(sc, enm->enm_addrlo);
5651
5652 reg = (hash >> 5);
5653 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5654 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5655 || (sc->sc_type == WM_T_PCH2))
5656 reg &= 0x1f;
5657 else
5658 reg &= 0x7f;
5659 bit = hash & 0x1f;
5660
5661 hash = CSR_READ(sc, mta_reg + (reg << 2));
5662 hash |= 1U << bit;
5663
5664 /* XXX Hardware bug?? */
5665 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5666 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5667 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5668 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5669 } else
5670 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5671
5672 ETHER_NEXT_MULTI(step, enm);
5673 }
5674
5675 ifp->if_flags &= ~IFF_ALLMULTI;
5676 goto setit;
5677
5678 allmulti:
5679 ifp->if_flags |= IFF_ALLMULTI;
5680 sc->sc_rctl |= RCTL_MPE;
5681
5682 setit:
5683 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5684 }
5685
5686 /*
5687 * wm_tbi_mediainit:
5688 *
5689 * Initialize media for use on 1000BASE-X devices.
5690 */
5691 static void
5692 wm_tbi_mediainit(struct wm_softc *sc)
5693 {
5694 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5695 const char *sep = "";
5696
5697 if (sc->sc_type < WM_T_82543)
5698 sc->sc_tipg = TIPG_WM_DFLT;
5699 else
5700 sc->sc_tipg = TIPG_LG_DFLT;
5701
5702 sc->sc_tbi_anegticks = 5;
5703
5704 /* Initialize our media structures */
5705 sc->sc_mii.mii_ifp = ifp;
5706
5707 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5708 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5709 wm_tbi_mediastatus);
5710
5711 /*
5712 * SWD Pins:
5713 *
5714 * 0 = Link LED (output)
5715 * 1 = Loss Of Signal (input)
5716 */
5717 sc->sc_ctrl |= CTRL_SWDPIO(0);
5718 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5719
5720 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5721
5722 #define ADD(ss, mm, dd) \
5723 do { \
5724 aprint_normal("%s%s", sep, ss); \
5725 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5726 sep = ", "; \
5727 } while (/*CONSTCOND*/0)
5728
5729 aprint_normal_dev(sc->sc_dev, "");
5730 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5731 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5732 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5733 aprint_normal("\n");
5734
5735 #undef ADD
5736
5737 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5738 }
5739
5740 /*
5741 * wm_tbi_mediastatus: [ifmedia interface function]
5742 *
5743 * Get the current interface media status on a 1000BASE-X device.
5744 */
5745 static void
5746 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5747 {
5748 struct wm_softc *sc = ifp->if_softc;
5749 uint32_t ctrl, status;
5750
5751 ifmr->ifm_status = IFM_AVALID;
5752 ifmr->ifm_active = IFM_ETHER;
5753
5754 status = CSR_READ(sc, WMREG_STATUS);
5755 if ((status & STATUS_LU) == 0) {
5756 ifmr->ifm_active |= IFM_NONE;
5757 return;
5758 }
5759
5760 ifmr->ifm_status |= IFM_ACTIVE;
5761 ifmr->ifm_active |= IFM_1000_SX;
5762 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5763 ifmr->ifm_active |= IFM_FDX;
5764 ctrl = CSR_READ(sc, WMREG_CTRL);
5765 if (ctrl & CTRL_RFCE)
5766 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5767 if (ctrl & CTRL_TFCE)
5768 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5769 }
5770
5771 /*
5772 * wm_tbi_mediachange: [ifmedia interface function]
5773 *
5774 * Set hardware to newly-selected media on a 1000BASE-X device.
5775 */
5776 static int
5777 wm_tbi_mediachange(struct ifnet *ifp)
5778 {
5779 struct wm_softc *sc = ifp->if_softc;
5780 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5781 uint32_t status;
5782 int i;
5783
5784 sc->sc_txcw = 0;
5785 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5786 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5787 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5788 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5789 sc->sc_txcw |= TXCW_ANE;
5790 } else {
5791 /*
5792 * If autonegotiation is turned off, force link up and turn on
5793 * full duplex
5794 */
5795 sc->sc_txcw &= ~TXCW_ANE;
5796 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5797 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5798 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5799 delay(1000);
5800 }
5801
5802 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5803 device_xname(sc->sc_dev),sc->sc_txcw));
5804 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5805 delay(10000);
5806
5807 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5808 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5809
5810 /*
5811 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5812 * optics detect a signal, 0 if they don't.
5813 */
5814 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5815 /* Have signal; wait for the link to come up. */
5816
5817 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5818 /*
5819 * Reset the link, and let autonegotiation do its thing
5820 */
5821 sc->sc_ctrl |= CTRL_LRST;
5822 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5823 delay(1000);
5824 sc->sc_ctrl &= ~CTRL_LRST;
5825 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5826 delay(1000);
5827 }
5828
5829 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5830 delay(10000);
5831 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5832 break;
5833 }
5834
5835 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5836 device_xname(sc->sc_dev),i));
5837
5838 status = CSR_READ(sc, WMREG_STATUS);
5839 DPRINTF(WM_DEBUG_LINK,
5840 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5841 device_xname(sc->sc_dev),status, STATUS_LU));
5842 if (status & STATUS_LU) {
5843 /* Link is up. */
5844 DPRINTF(WM_DEBUG_LINK,
5845 ("%s: LINK: set media -> link up %s\n",
5846 device_xname(sc->sc_dev),
5847 (status & STATUS_FD) ? "FDX" : "HDX"));
5848
5849 /*
5850 * NOTE: CTRL will update TFCE and RFCE automatically,
5851 * so we should update sc->sc_ctrl
5852 */
5853 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5854 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5855 sc->sc_fcrtl &= ~FCRTL_XONE;
5856 if (status & STATUS_FD)
5857 sc->sc_tctl |=
5858 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5859 else
5860 sc->sc_tctl |=
5861 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5862 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5863 sc->sc_fcrtl |= FCRTL_XONE;
5864 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5865 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5866 WMREG_OLD_FCRTL : WMREG_FCRTL,
5867 sc->sc_fcrtl);
5868 sc->sc_tbi_linkup = 1;
5869 } else {
5870 if (i == WM_LINKUP_TIMEOUT)
5871 wm_check_for_link(sc);
5872 /* Link is down. */
5873 DPRINTF(WM_DEBUG_LINK,
5874 ("%s: LINK: set media -> link down\n",
5875 device_xname(sc->sc_dev)));
5876 sc->sc_tbi_linkup = 0;
5877 }
5878 } else {
5879 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5880 device_xname(sc->sc_dev)));
5881 sc->sc_tbi_linkup = 0;
5882 }
5883
5884 wm_tbi_set_linkled(sc);
5885
5886 return 0;
5887 }
5888
5889 /*
5890 * wm_tbi_set_linkled:
5891 *
5892 * Update the link LED on 1000BASE-X devices.
5893 */
5894 static void
5895 wm_tbi_set_linkled(struct wm_softc *sc)
5896 {
5897
5898 if (sc->sc_tbi_linkup)
5899 sc->sc_ctrl |= CTRL_SWDPIN(0);
5900 else
5901 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5902
5903 /* 82540 or newer devices are active low */
5904 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5905
5906 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5907 }
5908
5909 /*
5910 * wm_tbi_check_link:
5911 *
5912 * Check the link on 1000BASE-X devices.
5913 */
5914 static void
5915 wm_tbi_check_link(struct wm_softc *sc)
5916 {
5917 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5918 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5919 uint32_t rxcw, ctrl, status;
5920
5921 status = CSR_READ(sc, WMREG_STATUS);
5922
5923 rxcw = CSR_READ(sc, WMREG_RXCW);
5924 ctrl = CSR_READ(sc, WMREG_CTRL);
5925
5926 /* set link status */
5927 if ((status & STATUS_LU) == 0) {
5928 DPRINTF(WM_DEBUG_LINK,
5929 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5930 sc->sc_tbi_linkup = 0;
5931 } else if (sc->sc_tbi_linkup == 0) {
5932 DPRINTF(WM_DEBUG_LINK,
5933 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5934 (status & STATUS_FD) ? "FDX" : "HDX"));
5935 sc->sc_tbi_linkup = 1;
5936 }
5937
5938 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5939 && ((status & STATUS_LU) == 0)) {
5940 sc->sc_tbi_linkup = 0;
5941 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5942 /* RXCFG storm! */
5943 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5944 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5945 wm_init(ifp);
5946 ifp->if_start(ifp);
5947 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5948 /* If the timer expired, retry autonegotiation */
5949 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5950 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5951 sc->sc_tbi_ticks = 0;
5952 /*
5953 * Reset the link, and let autonegotiation do
5954 * its thing
5955 */
5956 sc->sc_ctrl |= CTRL_LRST;
5957 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5958 delay(1000);
5959 sc->sc_ctrl &= ~CTRL_LRST;
5960 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5961 delay(1000);
5962 CSR_WRITE(sc, WMREG_TXCW,
5963 sc->sc_txcw & ~TXCW_ANE);
5964 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5965 }
5966 }
5967 }
5968
5969 wm_tbi_set_linkled(sc);
5970 }
5971
5972 /*
5973 * wm_gmii_reset:
5974 *
5975 * Reset the PHY.
5976 */
5977 static void
5978 wm_gmii_reset(struct wm_softc *sc)
5979 {
5980 uint32_t reg;
5981 int rv;
5982
5983 /* get phy semaphore */
5984 switch (sc->sc_type) {
5985 case WM_T_82571:
5986 case WM_T_82572:
5987 case WM_T_82573:
5988 case WM_T_82574:
5989 case WM_T_82583:
5990 /* XXX should get sw semaphore, too */
5991 rv = wm_get_swsm_semaphore(sc);
5992 break;
5993 case WM_T_82575:
5994 case WM_T_82576:
5995 case WM_T_82580:
5996 case WM_T_82580ER:
5997 case WM_T_I350:
5998 case WM_T_80003:
5999 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6000 break;
6001 case WM_T_ICH8:
6002 case WM_T_ICH9:
6003 case WM_T_ICH10:
6004 case WM_T_PCH:
6005 case WM_T_PCH2:
6006 rv = wm_get_swfwhw_semaphore(sc);
6007 break;
6008 default:
6009 /* nothing to do*/
6010 rv = 0;
6011 break;
6012 }
6013 if (rv != 0) {
6014 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6015 __func__);
6016 return;
6017 }
6018
6019 switch (sc->sc_type) {
6020 case WM_T_82542_2_0:
6021 case WM_T_82542_2_1:
6022 /* null */
6023 break;
6024 case WM_T_82543:
6025 /*
6026 * With 82543, we need to force speed and duplex on the MAC
6027 * equal to what the PHY speed and duplex configuration is.
6028 * In addition, we need to perform a hardware reset on the PHY
6029 * to take it out of reset.
6030 */
6031 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6032 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6033
6034 /* The PHY reset pin is active-low. */
6035 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6036 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6037 CTRL_EXT_SWDPIN(4));
6038 reg |= CTRL_EXT_SWDPIO(4);
6039
6040 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6041 delay(10*1000);
6042
6043 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6044 delay(150);
6045 #if 0
6046 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6047 #endif
6048 delay(20*1000); /* XXX extra delay to get PHY ID? */
6049 break;
6050 case WM_T_82544: /* reset 10000us */
6051 case WM_T_82540:
6052 case WM_T_82545:
6053 case WM_T_82545_3:
6054 case WM_T_82546:
6055 case WM_T_82546_3:
6056 case WM_T_82541:
6057 case WM_T_82541_2:
6058 case WM_T_82547:
6059 case WM_T_82547_2:
6060 case WM_T_82571: /* reset 100us */
6061 case WM_T_82572:
6062 case WM_T_82573:
6063 case WM_T_82574:
6064 case WM_T_82575:
6065 case WM_T_82576:
6066 case WM_T_82580:
6067 case WM_T_82580ER:
6068 case WM_T_I350:
6069 case WM_T_82583:
6070 case WM_T_80003:
6071 /* generic reset */
6072 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6073 delay(20000);
6074 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6075 delay(20000);
6076
6077 if ((sc->sc_type == WM_T_82541)
6078 || (sc->sc_type == WM_T_82541_2)
6079 || (sc->sc_type == WM_T_82547)
6080 || (sc->sc_type == WM_T_82547_2)) {
6081 /* workaround for igp are done in igp_reset() */
6082 /* XXX add code to set LED after phy reset */
6083 }
6084 break;
6085 case WM_T_ICH8:
6086 case WM_T_ICH9:
6087 case WM_T_ICH10:
6088 case WM_T_PCH:
6089 case WM_T_PCH2:
6090 /* generic reset */
6091 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6092 delay(100);
6093 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6094 delay(150);
6095 break;
6096 default:
6097 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6098 __func__);
6099 break;
6100 }
6101
6102 /* release PHY semaphore */
6103 switch (sc->sc_type) {
6104 case WM_T_82571:
6105 case WM_T_82572:
6106 case WM_T_82573:
6107 case WM_T_82574:
6108 case WM_T_82583:
6109 /* XXX should put sw semaphore, too */
6110 wm_put_swsm_semaphore(sc);
6111 break;
6112 case WM_T_82575:
6113 case WM_T_82576:
6114 case WM_T_82580:
6115 case WM_T_82580ER:
6116 case WM_T_I350:
6117 case WM_T_80003:
6118 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6119 break;
6120 case WM_T_ICH8:
6121 case WM_T_ICH9:
6122 case WM_T_ICH10:
6123 case WM_T_PCH:
6124 case WM_T_PCH2:
6125 wm_put_swfwhw_semaphore(sc);
6126 break;
6127 default:
6128 /* nothing to do*/
6129 rv = 0;
6130 break;
6131 }
6132
6133 /* get_cfg_done */
6134 wm_get_cfg_done(sc);
6135
6136 /* extra setup */
6137 switch (sc->sc_type) {
6138 case WM_T_82542_2_0:
6139 case WM_T_82542_2_1:
6140 case WM_T_82543:
6141 case WM_T_82544:
6142 case WM_T_82540:
6143 case WM_T_82545:
6144 case WM_T_82545_3:
6145 case WM_T_82546:
6146 case WM_T_82546_3:
6147 case WM_T_82541_2:
6148 case WM_T_82547_2:
6149 case WM_T_82571:
6150 case WM_T_82572:
6151 case WM_T_82573:
6152 case WM_T_82574:
6153 case WM_T_82575:
6154 case WM_T_82576:
6155 case WM_T_82580:
6156 case WM_T_82580ER:
6157 case WM_T_I350:
6158 case WM_T_82583:
6159 case WM_T_80003:
6160 /* null */
6161 break;
6162 case WM_T_82541:
6163 case WM_T_82547:
6164 /* XXX Configure actively LED after PHY reset */
6165 break;
6166 case WM_T_ICH8:
6167 case WM_T_ICH9:
6168 case WM_T_ICH10:
6169 case WM_T_PCH:
6170 case WM_T_PCH2:
6171 /* Allow time for h/w to get to a quiescent state afer reset */
6172 delay(10*1000);
6173
6174 if (sc->sc_type == WM_T_PCH)
6175 wm_hv_phy_workaround_ich8lan(sc);
6176
6177 if (sc->sc_type == WM_T_PCH2)
6178 wm_lv_phy_workaround_ich8lan(sc);
6179
6180 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6181 /*
6182 * dummy read to clear the phy wakeup bit after lcd
6183 * reset
6184 */
6185 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6186 }
6187
6188 /*
6189 * XXX Configure the LCD with th extended configuration region
6190 * in NVM
6191 */
6192
6193 /* Configure the LCD with the OEM bits in NVM */
6194 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6195 /*
6196 * Disable LPLU.
6197 * XXX It seems that 82567 has LPLU, too.
6198 */
6199 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6200 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6201 reg |= HV_OEM_BITS_ANEGNOW;
6202 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6203 }
6204 break;
6205 default:
6206 panic("%s: unknown type\n", __func__);
6207 break;
6208 }
6209 }
6210
6211 /*
6212 * wm_gmii_mediainit:
6213 *
6214 * Initialize media for use on 1000BASE-T devices.
6215 */
6216 static void
6217 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6218 {
6219 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6220
6221 /* We have MII. */
6222 sc->sc_flags |= WM_F_HAS_MII;
6223
6224 if (sc->sc_type == WM_T_80003)
6225 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6226 else
6227 sc->sc_tipg = TIPG_1000T_DFLT;
6228
6229 /*
6230 * Let the chip set speed/duplex on its own based on
6231 * signals from the PHY.
6232 * XXXbouyer - I'm not sure this is right for the 80003,
6233 * the em driver only sets CTRL_SLU here - but it seems to work.
6234 */
6235 sc->sc_ctrl |= CTRL_SLU;
6236 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6237
6238 /* Initialize our media structures and probe the GMII. */
6239 sc->sc_mii.mii_ifp = ifp;
6240
6241 switch (prodid) {
6242 case PCI_PRODUCT_INTEL_PCH_M_LM:
6243 case PCI_PRODUCT_INTEL_PCH_M_LC:
6244 /* 82577 */
6245 sc->sc_phytype = WMPHY_82577;
6246 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6247 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6248 break;
6249 case PCI_PRODUCT_INTEL_PCH_D_DM:
6250 case PCI_PRODUCT_INTEL_PCH_D_DC:
6251 /* 82578 */
6252 sc->sc_phytype = WMPHY_82578;
6253 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6254 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6255 break;
6256 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6257 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6258 /* 82578 */
6259 sc->sc_phytype = WMPHY_82579;
6260 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6261 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6262 break;
6263 case PCI_PRODUCT_INTEL_82801I_BM:
6264 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6265 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6266 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6267 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6268 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6269 /* 82567 */
6270 sc->sc_phytype = WMPHY_BM;
6271 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6272 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6273 break;
6274 default:
6275 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6276 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
6277 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
6278 } else if (sc->sc_type >= WM_T_80003) {
6279 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
6280 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
6281 } else if (sc->sc_type >= WM_T_82544) {
6282 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
6283 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
6284 } else {
6285 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
6286 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
6287 }
6288 break;
6289 }
6290 sc->sc_mii.mii_statchg = wm_gmii_statchg;
6291
6292 wm_gmii_reset(sc);
6293
6294 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6295 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
6296 wm_gmii_mediastatus);
6297
6298 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6299 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6300 || (sc->sc_type == WM_T_I350)) {
6301 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6302 /* Attach only one port */
6303 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6304 MII_OFFSET_ANY, MIIF_DOPAUSE);
6305 } else {
6306 int i;
6307 uint32_t ctrl_ext;
6308
6309 /* Power on sgmii phy if it is disabled */
6310 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6311 CSR_WRITE(sc, WMREG_CTRL_EXT,
6312 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6313 CSR_WRITE_FLUSH(sc);
6314 delay(300*1000); /* XXX too long */
6315
6316 /* from 1 to 8 */
6317 for (i = 1; i < 8; i++)
6318 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6319 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6320
6321 /* restore previous sfp cage power state */
6322 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6323 }
6324 } else {
6325 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6326 MII_OFFSET_ANY, MIIF_DOPAUSE);
6327 }
6328
6329 if ((sc->sc_type == WM_T_PCH2) &&
6330 (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
6331 wm_set_mdio_slow_mode_hv(sc);
6332 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6333 MII_OFFSET_ANY, MIIF_DOPAUSE);
6334 }
6335
6336 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6337 /* if failed, retry with *_bm_* */
6338 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6339 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6340
6341 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6342 MII_OFFSET_ANY, MIIF_DOPAUSE);
6343 }
6344 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6345 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6346 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
6347 sc->sc_phytype = WMPHY_NONE;
6348 } else {
6349 /* Check PHY type */
6350 uint32_t model;
6351 struct mii_softc *child;
6352
6353 child = LIST_FIRST(&sc->sc_mii.mii_phys);
6354 if (device_is_a(child->mii_dev, "igphy")) {
6355 struct igphy_softc *isc = (struct igphy_softc *)child;
6356
6357 model = isc->sc_mii.mii_mpd_model;
6358 if (model == MII_MODEL_yyINTEL_I82566)
6359 sc->sc_phytype = WMPHY_IGP_3;
6360 }
6361
6362 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
6363 }
6364 }
6365
6366 /*
6367 * wm_gmii_mediastatus: [ifmedia interface function]
6368 *
6369 * Get the current interface media status on a 1000BASE-T device.
6370 */
6371 static void
6372 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6373 {
6374 struct wm_softc *sc = ifp->if_softc;
6375
6376 ether_mediastatus(ifp, ifmr);
6377 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6378 | sc->sc_flowflags;
6379 }
6380
6381 /*
6382 * wm_gmii_mediachange: [ifmedia interface function]
6383 *
6384 * Set hardware to newly-selected media on a 1000BASE-T device.
6385 */
6386 static int
6387 wm_gmii_mediachange(struct ifnet *ifp)
6388 {
6389 struct wm_softc *sc = ifp->if_softc;
6390 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6391 int rc;
6392
6393 if ((ifp->if_flags & IFF_UP) == 0)
6394 return 0;
6395
6396 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6397 sc->sc_ctrl |= CTRL_SLU;
6398 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6399 || (sc->sc_type > WM_T_82543)) {
6400 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6401 } else {
6402 sc->sc_ctrl &= ~CTRL_ASDE;
6403 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6404 if (ife->ifm_media & IFM_FDX)
6405 sc->sc_ctrl |= CTRL_FD;
6406 switch (IFM_SUBTYPE(ife->ifm_media)) {
6407 case IFM_10_T:
6408 sc->sc_ctrl |= CTRL_SPEED_10;
6409 break;
6410 case IFM_100_TX:
6411 sc->sc_ctrl |= CTRL_SPEED_100;
6412 break;
6413 case IFM_1000_T:
6414 sc->sc_ctrl |= CTRL_SPEED_1000;
6415 break;
6416 default:
6417 panic("wm_gmii_mediachange: bad media 0x%x",
6418 ife->ifm_media);
6419 }
6420 }
6421 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6422 if (sc->sc_type <= WM_T_82543)
6423 wm_gmii_reset(sc);
6424
6425 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6426 return 0;
6427 return rc;
6428 }
6429
6430 #define MDI_IO CTRL_SWDPIN(2)
6431 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6432 #define MDI_CLK CTRL_SWDPIN(3)
6433
6434 static void
6435 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6436 {
6437 uint32_t i, v;
6438
6439 v = CSR_READ(sc, WMREG_CTRL);
6440 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6441 v |= MDI_DIR | CTRL_SWDPIO(3);
6442
6443 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6444 if (data & i)
6445 v |= MDI_IO;
6446 else
6447 v &= ~MDI_IO;
6448 CSR_WRITE(sc, WMREG_CTRL, v);
6449 delay(10);
6450 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6451 delay(10);
6452 CSR_WRITE(sc, WMREG_CTRL, v);
6453 delay(10);
6454 }
6455 }
6456
6457 static uint32_t
6458 i82543_mii_recvbits(struct wm_softc *sc)
6459 {
6460 uint32_t v, i, data = 0;
6461
6462 v = CSR_READ(sc, WMREG_CTRL);
6463 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6464 v |= CTRL_SWDPIO(3);
6465
6466 CSR_WRITE(sc, WMREG_CTRL, v);
6467 delay(10);
6468 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6469 delay(10);
6470 CSR_WRITE(sc, WMREG_CTRL, v);
6471 delay(10);
6472
6473 for (i = 0; i < 16; i++) {
6474 data <<= 1;
6475 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6476 delay(10);
6477 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6478 data |= 1;
6479 CSR_WRITE(sc, WMREG_CTRL, v);
6480 delay(10);
6481 }
6482
6483 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6484 delay(10);
6485 CSR_WRITE(sc, WMREG_CTRL, v);
6486 delay(10);
6487
6488 return data;
6489 }
6490
6491 #undef MDI_IO
6492 #undef MDI_DIR
6493 #undef MDI_CLK
6494
6495 /*
6496 * wm_gmii_i82543_readreg: [mii interface function]
6497 *
6498 * Read a PHY register on the GMII (i82543 version).
6499 */
6500 static int
6501 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6502 {
6503 struct wm_softc *sc = device_private(self);
6504 int rv;
6505
6506 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6507 i82543_mii_sendbits(sc, reg | (phy << 5) |
6508 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6509 rv = i82543_mii_recvbits(sc) & 0xffff;
6510
6511 DPRINTF(WM_DEBUG_GMII,
6512 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6513 device_xname(sc->sc_dev), phy, reg, rv));
6514
6515 return rv;
6516 }
6517
6518 /*
6519 * wm_gmii_i82543_writereg: [mii interface function]
6520 *
6521 * Write a PHY register on the GMII (i82543 version).
6522 */
6523 static void
6524 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6525 {
6526 struct wm_softc *sc = device_private(self);
6527
6528 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6529 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6530 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6531 (MII_COMMAND_START << 30), 32);
6532 }
6533
6534 /*
6535 * wm_gmii_i82544_readreg: [mii interface function]
6536 *
6537 * Read a PHY register on the GMII.
6538 */
6539 static int
6540 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6541 {
6542 struct wm_softc *sc = device_private(self);
6543 uint32_t mdic = 0;
6544 int i, rv;
6545
6546 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6547 MDIC_REGADD(reg));
6548
6549 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6550 mdic = CSR_READ(sc, WMREG_MDIC);
6551 if (mdic & MDIC_READY)
6552 break;
6553 delay(50);
6554 }
6555
6556 if ((mdic & MDIC_READY) == 0) {
6557 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6558 device_xname(sc->sc_dev), phy, reg);
6559 rv = 0;
6560 } else if (mdic & MDIC_E) {
6561 #if 0 /* This is normal if no PHY is present. */
6562 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6563 device_xname(sc->sc_dev), phy, reg);
6564 #endif
6565 rv = 0;
6566 } else {
6567 rv = MDIC_DATA(mdic);
6568 if (rv == 0xffff)
6569 rv = 0;
6570 }
6571
6572 return rv;
6573 }
6574
6575 /*
6576 * wm_gmii_i82544_writereg: [mii interface function]
6577 *
6578 * Write a PHY register on the GMII.
6579 */
6580 static void
6581 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6582 {
6583 struct wm_softc *sc = device_private(self);
6584 uint32_t mdic = 0;
6585 int i;
6586
6587 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6588 MDIC_REGADD(reg) | MDIC_DATA(val));
6589
6590 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6591 mdic = CSR_READ(sc, WMREG_MDIC);
6592 if (mdic & MDIC_READY)
6593 break;
6594 delay(50);
6595 }
6596
6597 if ((mdic & MDIC_READY) == 0)
6598 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6599 device_xname(sc->sc_dev), phy, reg);
6600 else if (mdic & MDIC_E)
6601 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6602 device_xname(sc->sc_dev), phy, reg);
6603 }
6604
6605 /*
6606 * wm_gmii_i80003_readreg: [mii interface function]
6607 *
6608 * Read a PHY register on the kumeran
6609 * This could be handled by the PHY layer if we didn't have to lock the
6610 * ressource ...
6611 */
6612 static int
6613 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6614 {
6615 struct wm_softc *sc = device_private(self);
6616 int sem;
6617 int rv;
6618
6619 if (phy != 1) /* only one PHY on kumeran bus */
6620 return 0;
6621
6622 sem = swfwphysem[sc->sc_funcid];
6623 if (wm_get_swfw_semaphore(sc, sem)) {
6624 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6625 __func__);
6626 return 0;
6627 }
6628
6629 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6630 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6631 reg >> GG82563_PAGE_SHIFT);
6632 } else {
6633 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6634 reg >> GG82563_PAGE_SHIFT);
6635 }
6636 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6637 delay(200);
6638 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6639 delay(200);
6640
6641 wm_put_swfw_semaphore(sc, sem);
6642 return rv;
6643 }
6644
6645 /*
6646 * wm_gmii_i80003_writereg: [mii interface function]
6647 *
6648 * Write a PHY register on the kumeran.
6649 * This could be handled by the PHY layer if we didn't have to lock the
6650 * ressource ...
6651 */
6652 static void
6653 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6654 {
6655 struct wm_softc *sc = device_private(self);
6656 int sem;
6657
6658 if (phy != 1) /* only one PHY on kumeran bus */
6659 return;
6660
6661 sem = swfwphysem[sc->sc_funcid];
6662 if (wm_get_swfw_semaphore(sc, sem)) {
6663 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6664 __func__);
6665 return;
6666 }
6667
6668 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6669 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6670 reg >> GG82563_PAGE_SHIFT);
6671 } else {
6672 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6673 reg >> GG82563_PAGE_SHIFT);
6674 }
6675 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6676 delay(200);
6677 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6678 delay(200);
6679
6680 wm_put_swfw_semaphore(sc, sem);
6681 }
6682
6683 /*
6684 * wm_gmii_bm_readreg: [mii interface function]
6685 *
6686 * Read a PHY register on the kumeran
6687 * This could be handled by the PHY layer if we didn't have to lock the
6688 * ressource ...
6689 */
6690 static int
6691 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6692 {
6693 struct wm_softc *sc = device_private(self);
6694 int sem;
6695 int rv;
6696
6697 sem = swfwphysem[sc->sc_funcid];
6698 if (wm_get_swfw_semaphore(sc, sem)) {
6699 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6700 __func__);
6701 return 0;
6702 }
6703
6704 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6705 if (phy == 1)
6706 wm_gmii_i82544_writereg(self, phy, 0x1f,
6707 reg);
6708 else
6709 wm_gmii_i82544_writereg(self, phy,
6710 GG82563_PHY_PAGE_SELECT,
6711 reg >> GG82563_PAGE_SHIFT);
6712 }
6713
6714 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6715 wm_put_swfw_semaphore(sc, sem);
6716 return rv;
6717 }
6718
6719 /*
6720 * wm_gmii_bm_writereg: [mii interface function]
6721 *
6722 * Write a PHY register on the kumeran.
6723 * This could be handled by the PHY layer if we didn't have to lock the
6724 * ressource ...
6725 */
6726 static void
6727 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6728 {
6729 struct wm_softc *sc = device_private(self);
6730 int sem;
6731
6732 sem = swfwphysem[sc->sc_funcid];
6733 if (wm_get_swfw_semaphore(sc, sem)) {
6734 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6735 __func__);
6736 return;
6737 }
6738
6739 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6740 if (phy == 1)
6741 wm_gmii_i82544_writereg(self, phy, 0x1f,
6742 reg);
6743 else
6744 wm_gmii_i82544_writereg(self, phy,
6745 GG82563_PHY_PAGE_SELECT,
6746 reg >> GG82563_PAGE_SHIFT);
6747 }
6748
6749 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6750 wm_put_swfw_semaphore(sc, sem);
6751 }
6752
6753 static void
6754 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6755 {
6756 struct wm_softc *sc = device_private(self);
6757 uint16_t regnum = BM_PHY_REG_NUM(offset);
6758 uint16_t wuce;
6759
6760 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6761 if (sc->sc_type == WM_T_PCH) {
6762 /* XXX e1000 driver do nothing... why? */
6763 }
6764
6765 /* Set page 769 */
6766 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6767 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6768
6769 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6770
6771 wuce &= ~BM_WUC_HOST_WU_BIT;
6772 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6773 wuce | BM_WUC_ENABLE_BIT);
6774
6775 /* Select page 800 */
6776 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6777 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6778
6779 /* Write page 800 */
6780 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6781
6782 if (rd)
6783 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6784 else
6785 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6786
6787 /* Set page 769 */
6788 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6789 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6790
6791 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6792 }
6793
6794 /*
6795 * wm_gmii_hv_readreg: [mii interface function]
6796 *
6797 * Read a PHY register on the kumeran
6798 * This could be handled by the PHY layer if we didn't have to lock the
6799 * ressource ...
6800 */
6801 static int
6802 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6803 {
6804 struct wm_softc *sc = device_private(self);
6805 uint16_t page = BM_PHY_REG_PAGE(reg);
6806 uint16_t regnum = BM_PHY_REG_NUM(reg);
6807 uint16_t val;
6808 int rv;
6809
6810 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6811 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6812 __func__);
6813 return 0;
6814 }
6815
6816 /* XXX Workaround failure in MDIO access while cable is disconnected */
6817 if (sc->sc_phytype == WMPHY_82577) {
6818 /* XXX must write */
6819 }
6820
6821 /* Page 800 works differently than the rest so it has its own func */
6822 if (page == BM_WUC_PAGE) {
6823 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6824 return val;
6825 }
6826
6827 /*
6828 * Lower than page 768 works differently than the rest so it has its
6829 * own func
6830 */
6831 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6832 printf("gmii_hv_readreg!!!\n");
6833 return 0;
6834 }
6835
6836 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6837 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6838 page << BME1000_PAGE_SHIFT);
6839 }
6840
6841 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6842 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6843 return rv;
6844 }
6845
6846 /*
6847 * wm_gmii_hv_writereg: [mii interface function]
6848 *
6849 * Write a PHY register on the kumeran.
6850 * This could be handled by the PHY layer if we didn't have to lock the
6851 * ressource ...
6852 */
6853 static void
6854 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6855 {
6856 struct wm_softc *sc = device_private(self);
6857 uint16_t page = BM_PHY_REG_PAGE(reg);
6858 uint16_t regnum = BM_PHY_REG_NUM(reg);
6859
6860 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6861 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6862 __func__);
6863 return;
6864 }
6865
6866 /* XXX Workaround failure in MDIO access while cable is disconnected */
6867
6868 /* Page 800 works differently than the rest so it has its own func */
6869 if (page == BM_WUC_PAGE) {
6870 uint16_t tmp;
6871
6872 tmp = val;
6873 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6874 return;
6875 }
6876
6877 /*
6878 * Lower than page 768 works differently than the rest so it has its
6879 * own func
6880 */
6881 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6882 printf("gmii_hv_writereg!!!\n");
6883 return;
6884 }
6885
6886 /*
6887 * XXX Workaround MDIO accesses being disabled after entering IEEE
6888 * Power Down (whenever bit 11 of the PHY control register is set)
6889 */
6890
6891 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6892 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6893 page << BME1000_PAGE_SHIFT);
6894 }
6895
6896 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6897 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6898 }
6899
6900 /*
6901 * wm_gmii_hv_readreg: [mii interface function]
6902 *
6903 * Read a PHY register on the kumeran
6904 * This could be handled by the PHY layer if we didn't have to lock the
6905 * ressource ...
6906 */
6907 static int
6908 wm_sgmii_readreg(device_t self, int phy, int reg)
6909 {
6910 struct wm_softc *sc = device_private(self);
6911 uint32_t i2ccmd;
6912 int i, rv;
6913
6914 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6915 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6916 __func__);
6917 return 0;
6918 }
6919
6920 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6921 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6922 | I2CCMD_OPCODE_READ;
6923 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6924
6925 /* Poll the ready bit */
6926 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6927 delay(50);
6928 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6929 if (i2ccmd & I2CCMD_READY)
6930 break;
6931 }
6932 if ((i2ccmd & I2CCMD_READY) == 0)
6933 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6934 if ((i2ccmd & I2CCMD_ERROR) != 0)
6935 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6936
6937 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6938
6939 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6940 return rv;
6941 }
6942
6943 /*
6944 * wm_gmii_hv_writereg: [mii interface function]
6945 *
6946 * Write a PHY register on the kumeran.
6947 * This could be handled by the PHY layer if we didn't have to lock the
6948 * ressource ...
6949 */
6950 static void
6951 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6952 {
6953 struct wm_softc *sc = device_private(self);
6954 uint32_t i2ccmd;
6955 int i;
6956
6957 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6958 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6959 __func__);
6960 return;
6961 }
6962
6963 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6964 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6965 | I2CCMD_OPCODE_WRITE;
6966 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6967
6968 /* Poll the ready bit */
6969 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6970 delay(50);
6971 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6972 if (i2ccmd & I2CCMD_READY)
6973 break;
6974 }
6975 if ((i2ccmd & I2CCMD_READY) == 0)
6976 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6977 if ((i2ccmd & I2CCMD_ERROR) != 0)
6978 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6979
6980 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6981 }
6982
6983 /*
6984 * wm_gmii_statchg: [mii interface function]
6985 *
6986 * Callback from MII layer when media changes.
6987 */
6988 static void
6989 wm_gmii_statchg(struct ifnet *ifp)
6990 {
6991 struct wm_softc *sc = ifp->if_softc;
6992 struct mii_data *mii = &sc->sc_mii;
6993
6994 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6995 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6996 sc->sc_fcrtl &= ~FCRTL_XONE;
6997
6998 /*
6999 * Get flow control negotiation result.
7000 */
7001 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7002 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7003 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7004 mii->mii_media_active &= ~IFM_ETH_FMASK;
7005 }
7006
7007 if (sc->sc_flowflags & IFM_FLOW) {
7008 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7009 sc->sc_ctrl |= CTRL_TFCE;
7010 sc->sc_fcrtl |= FCRTL_XONE;
7011 }
7012 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7013 sc->sc_ctrl |= CTRL_RFCE;
7014 }
7015
7016 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7017 DPRINTF(WM_DEBUG_LINK,
7018 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7019 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7020 } else {
7021 DPRINTF(WM_DEBUG_LINK,
7022 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7023 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7024 }
7025
7026 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7027 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7028 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7029 : WMREG_FCRTL, sc->sc_fcrtl);
7030 if (sc->sc_type == WM_T_80003) {
7031 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7032 case IFM_1000_T:
7033 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7034 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7035 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7036 break;
7037 default:
7038 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7039 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7040 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7041 break;
7042 }
7043 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7044 }
7045 }
7046
7047 /*
7048 * wm_kmrn_readreg:
7049 *
7050 * Read a kumeran register
7051 */
7052 static int
7053 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7054 {
7055 int rv;
7056
7057 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7058 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7059 aprint_error_dev(sc->sc_dev,
7060 "%s: failed to get semaphore\n", __func__);
7061 return 0;
7062 }
7063 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7064 if (wm_get_swfwhw_semaphore(sc)) {
7065 aprint_error_dev(sc->sc_dev,
7066 "%s: failed to get semaphore\n", __func__);
7067 return 0;
7068 }
7069 }
7070
7071 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7072 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7073 KUMCTRLSTA_REN);
7074 delay(2);
7075
7076 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7077
7078 if (sc->sc_flags == WM_F_SWFW_SYNC)
7079 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7080 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7081 wm_put_swfwhw_semaphore(sc);
7082
7083 return rv;
7084 }
7085
7086 /*
7087 * wm_kmrn_writereg:
7088 *
7089 * Write a kumeran register
7090 */
7091 static void
7092 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7093 {
7094
7095 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7096 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7097 aprint_error_dev(sc->sc_dev,
7098 "%s: failed to get semaphore\n", __func__);
7099 return;
7100 }
7101 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7102 if (wm_get_swfwhw_semaphore(sc)) {
7103 aprint_error_dev(sc->sc_dev,
7104 "%s: failed to get semaphore\n", __func__);
7105 return;
7106 }
7107 }
7108
7109 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7110 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7111 (val & KUMCTRLSTA_MASK));
7112
7113 if (sc->sc_flags == WM_F_SWFW_SYNC)
7114 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7115 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7116 wm_put_swfwhw_semaphore(sc);
7117 }
7118
7119 static int
7120 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7121 {
7122 uint32_t eecd = 0;
7123
7124 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7125 || sc->sc_type == WM_T_82583) {
7126 eecd = CSR_READ(sc, WMREG_EECD);
7127
7128 /* Isolate bits 15 & 16 */
7129 eecd = ((eecd >> 15) & 0x03);
7130
7131 /* If both bits are set, device is Flash type */
7132 if (eecd == 0x03)
7133 return 0;
7134 }
7135 return 1;
7136 }
7137
7138 static int
7139 wm_get_swsm_semaphore(struct wm_softc *sc)
7140 {
7141 int32_t timeout;
7142 uint32_t swsm;
7143
7144 /* Get the FW semaphore. */
7145 timeout = 1000 + 1; /* XXX */
7146 while (timeout) {
7147 swsm = CSR_READ(sc, WMREG_SWSM);
7148 swsm |= SWSM_SWESMBI;
7149 CSR_WRITE(sc, WMREG_SWSM, swsm);
7150 /* if we managed to set the bit we got the semaphore. */
7151 swsm = CSR_READ(sc, WMREG_SWSM);
7152 if (swsm & SWSM_SWESMBI)
7153 break;
7154
7155 delay(50);
7156 timeout--;
7157 }
7158
7159 if (timeout == 0) {
7160 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7161 /* Release semaphores */
7162 wm_put_swsm_semaphore(sc);
7163 return 1;
7164 }
7165 return 0;
7166 }
7167
7168 static void
7169 wm_put_swsm_semaphore(struct wm_softc *sc)
7170 {
7171 uint32_t swsm;
7172
7173 swsm = CSR_READ(sc, WMREG_SWSM);
7174 swsm &= ~(SWSM_SWESMBI);
7175 CSR_WRITE(sc, WMREG_SWSM, swsm);
7176 }
7177
7178 static int
7179 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7180 {
7181 uint32_t swfw_sync;
7182 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7183 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7184 int timeout = 200;
7185
7186 for (timeout = 0; timeout < 200; timeout++) {
7187 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7188 if (wm_get_swsm_semaphore(sc)) {
7189 aprint_error_dev(sc->sc_dev,
7190 "%s: failed to get semaphore\n",
7191 __func__);
7192 return 1;
7193 }
7194 }
7195 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7196 if ((swfw_sync & (swmask | fwmask)) == 0) {
7197 swfw_sync |= swmask;
7198 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7199 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7200 wm_put_swsm_semaphore(sc);
7201 return 0;
7202 }
7203 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7204 wm_put_swsm_semaphore(sc);
7205 delay(5000);
7206 }
7207 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7208 device_xname(sc->sc_dev), mask, swfw_sync);
7209 return 1;
7210 }
7211
7212 static void
7213 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7214 {
7215 uint32_t swfw_sync;
7216
7217 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7218 while (wm_get_swsm_semaphore(sc) != 0)
7219 continue;
7220 }
7221 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7222 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7223 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7224 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7225 wm_put_swsm_semaphore(sc);
7226 }
7227
7228 static int
7229 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7230 {
7231 uint32_t ext_ctrl;
7232 int timeout = 200;
7233
7234 for (timeout = 0; timeout < 200; timeout++) {
7235 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7236 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7237 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7238
7239 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7240 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7241 return 0;
7242 delay(5000);
7243 }
7244 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7245 device_xname(sc->sc_dev), ext_ctrl);
7246 return 1;
7247 }
7248
7249 static void
7250 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7251 {
7252 uint32_t ext_ctrl;
7253 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7254 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7255 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7256 }
7257
7258 static int
7259 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7260 {
7261 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7262 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7263
7264 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7265 /* Value of bit 22 corresponds to the flash bank we're on. */
7266 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7267 } else {
7268 uint8_t bank_high_byte;
7269 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
7270 if ((bank_high_byte & 0xc0) == 0x80)
7271 *bank = 0;
7272 else {
7273 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7274 &bank_high_byte);
7275 if ((bank_high_byte & 0xc0) == 0x80)
7276 *bank = 1;
7277 else {
7278 aprint_error_dev(sc->sc_dev,
7279 "EEPROM not present\n");
7280 return -1;
7281 }
7282 }
7283 }
7284
7285 return 0;
7286 }
7287
7288 /******************************************************************************
7289 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7290 * register.
7291 *
7292 * sc - Struct containing variables accessed by shared code
7293 * offset - offset of word in the EEPROM to read
7294 * data - word read from the EEPROM
7295 * words - number of words to read
7296 *****************************************************************************/
7297 static int
7298 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7299 {
7300 int32_t error = 0;
7301 uint32_t flash_bank = 0;
7302 uint32_t act_offset = 0;
7303 uint32_t bank_offset = 0;
7304 uint16_t word = 0;
7305 uint16_t i = 0;
7306
7307 /* We need to know which is the valid flash bank. In the event
7308 * that we didn't allocate eeprom_shadow_ram, we may not be
7309 * managing flash_bank. So it cannot be trusted and needs
7310 * to be updated with each read.
7311 */
7312 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7313 if (error) {
7314 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7315 __func__);
7316 return error;
7317 }
7318
7319 /*
7320 * Adjust offset appropriately if we're on bank 1 - adjust for word
7321 * size
7322 */
7323 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7324
7325 error = wm_get_swfwhw_semaphore(sc);
7326 if (error) {
7327 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7328 __func__);
7329 return error;
7330 }
7331
7332 for (i = 0; i < words; i++) {
7333 /* The NVM part needs a byte offset, hence * 2 */
7334 act_offset = bank_offset + ((offset + i) * 2);
7335 error = wm_read_ich8_word(sc, act_offset, &word);
7336 if (error) {
7337 aprint_error_dev(sc->sc_dev,
7338 "%s: failed to read NVM\n", __func__);
7339 break;
7340 }
7341 data[i] = word;
7342 }
7343
7344 wm_put_swfwhw_semaphore(sc);
7345 return error;
7346 }
7347
7348 /******************************************************************************
7349 * This function does initial flash setup so that a new read/write/erase cycle
7350 * can be started.
7351 *
7352 * sc - The pointer to the hw structure
7353 ****************************************************************************/
7354 static int32_t
7355 wm_ich8_cycle_init(struct wm_softc *sc)
7356 {
7357 uint16_t hsfsts;
7358 int32_t error = 1;
7359 int32_t i = 0;
7360
7361 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7362
7363 /* May be check the Flash Des Valid bit in Hw status */
7364 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7365 return error;
7366 }
7367
7368 /* Clear FCERR in Hw status by writing 1 */
7369 /* Clear DAEL in Hw status by writing a 1 */
7370 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7371
7372 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7373
7374 /*
7375 * Either we should have a hardware SPI cycle in progress bit to check
7376 * against, in order to start a new cycle or FDONE bit should be
7377 * changed in the hardware so that it is 1 after harware reset, which
7378 * can then be used as an indication whether a cycle is in progress or
7379 * has been completed .. we should also have some software semaphore
7380 * mechanism to guard FDONE or the cycle in progress bit so that two
7381 * threads access to those bits can be sequentiallized or a way so that
7382 * 2 threads dont start the cycle at the same time
7383 */
7384
7385 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7386 /*
7387 * There is no cycle running at present, so we can start a
7388 * cycle
7389 */
7390
7391 /* Begin by setting Flash Cycle Done. */
7392 hsfsts |= HSFSTS_DONE;
7393 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7394 error = 0;
7395 } else {
7396 /*
7397 * otherwise poll for sometime so the current cycle has a
7398 * chance to end before giving up.
7399 */
7400 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7401 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7402 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7403 error = 0;
7404 break;
7405 }
7406 delay(1);
7407 }
7408 if (error == 0) {
7409 /*
7410 * Successful in waiting for previous cycle to timeout,
7411 * now set the Flash Cycle Done.
7412 */
7413 hsfsts |= HSFSTS_DONE;
7414 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7415 }
7416 }
7417 return error;
7418 }
7419
7420 /******************************************************************************
7421 * This function starts a flash cycle and waits for its completion
7422 *
7423 * sc - The pointer to the hw structure
7424 ****************************************************************************/
7425 static int32_t
7426 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7427 {
7428 uint16_t hsflctl;
7429 uint16_t hsfsts;
7430 int32_t error = 1;
7431 uint32_t i = 0;
7432
7433 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7434 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7435 hsflctl |= HSFCTL_GO;
7436 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7437
7438 /* wait till FDONE bit is set to 1 */
7439 do {
7440 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7441 if (hsfsts & HSFSTS_DONE)
7442 break;
7443 delay(1);
7444 i++;
7445 } while (i < timeout);
7446 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7447 error = 0;
7448
7449 return error;
7450 }
7451
7452 /******************************************************************************
7453 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7454 *
7455 * sc - The pointer to the hw structure
7456 * index - The index of the byte or word to read.
7457 * size - Size of data to read, 1=byte 2=word
7458 * data - Pointer to the word to store the value read.
7459 *****************************************************************************/
7460 static int32_t
7461 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7462 uint32_t size, uint16_t* data)
7463 {
7464 uint16_t hsfsts;
7465 uint16_t hsflctl;
7466 uint32_t flash_linear_address;
7467 uint32_t flash_data = 0;
7468 int32_t error = 1;
7469 int32_t count = 0;
7470
7471 if (size < 1 || size > 2 || data == 0x0 ||
7472 index > ICH_FLASH_LINEAR_ADDR_MASK)
7473 return error;
7474
7475 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7476 sc->sc_ich8_flash_base;
7477
7478 do {
7479 delay(1);
7480 /* Steps */
7481 error = wm_ich8_cycle_init(sc);
7482 if (error)
7483 break;
7484
7485 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7486 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7487 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7488 & HSFCTL_BCOUNT_MASK;
7489 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7490 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7491
7492 /*
7493 * Write the last 24 bits of index into Flash Linear address
7494 * field in Flash Address
7495 */
7496 /* TODO: TBD maybe check the index against the size of flash */
7497
7498 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7499
7500 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7501
7502 /*
7503 * Check if FCERR is set to 1, if set to 1, clear it and try
7504 * the whole sequence a few more times, else read in (shift in)
7505 * the Flash Data0, the order is least significant byte first
7506 * msb to lsb
7507 */
7508 if (error == 0) {
7509 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7510 if (size == 1)
7511 *data = (uint8_t)(flash_data & 0x000000FF);
7512 else if (size == 2)
7513 *data = (uint16_t)(flash_data & 0x0000FFFF);
7514 break;
7515 } else {
7516 /*
7517 * If we've gotten here, then things are probably
7518 * completely hosed, but if the error condition is
7519 * detected, it won't hurt to give it another try...
7520 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7521 */
7522 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7523 if (hsfsts & HSFSTS_ERR) {
7524 /* Repeat for some time before giving up. */
7525 continue;
7526 } else if ((hsfsts & HSFSTS_DONE) == 0)
7527 break;
7528 }
7529 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7530
7531 return error;
7532 }
7533
7534 /******************************************************************************
7535 * Reads a single byte from the NVM using the ICH8 flash access registers.
7536 *
7537 * sc - pointer to wm_hw structure
7538 * index - The index of the byte to read.
7539 * data - Pointer to a byte to store the value read.
7540 *****************************************************************************/
7541 static int32_t
7542 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7543 {
7544 int32_t status;
7545 uint16_t word = 0;
7546
7547 status = wm_read_ich8_data(sc, index, 1, &word);
7548 if (status == 0)
7549 *data = (uint8_t)word;
7550 else
7551 *data = 0;
7552
7553 return status;
7554 }
7555
7556 /******************************************************************************
7557 * Reads a word from the NVM using the ICH8 flash access registers.
7558 *
7559 * sc - pointer to wm_hw structure
7560 * index - The starting byte index of the word to read.
7561 * data - Pointer to a word to store the value read.
7562 *****************************************************************************/
7563 static int32_t
7564 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7565 {
7566 int32_t status;
7567
7568 status = wm_read_ich8_data(sc, index, 2, data);
7569 return status;
7570 }
7571
7572 static int
7573 wm_check_mng_mode(struct wm_softc *sc)
7574 {
7575 int rv;
7576
7577 switch (sc->sc_type) {
7578 case WM_T_ICH8:
7579 case WM_T_ICH9:
7580 case WM_T_ICH10:
7581 case WM_T_PCH:
7582 case WM_T_PCH2:
7583 rv = wm_check_mng_mode_ich8lan(sc);
7584 break;
7585 case WM_T_82574:
7586 case WM_T_82583:
7587 rv = wm_check_mng_mode_82574(sc);
7588 break;
7589 case WM_T_82571:
7590 case WM_T_82572:
7591 case WM_T_82573:
7592 case WM_T_80003:
7593 rv = wm_check_mng_mode_generic(sc);
7594 break;
7595 default:
7596 /* noting to do */
7597 rv = 0;
7598 break;
7599 }
7600
7601 return rv;
7602 }
7603
7604 static int
7605 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7606 {
7607 uint32_t fwsm;
7608
7609 fwsm = CSR_READ(sc, WMREG_FWSM);
7610
7611 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7612 return 1;
7613
7614 return 0;
7615 }
7616
7617 static int
7618 wm_check_mng_mode_82574(struct wm_softc *sc)
7619 {
7620 uint16_t data;
7621
7622 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7623
7624 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7625 return 1;
7626
7627 return 0;
7628 }
7629
7630 static int
7631 wm_check_mng_mode_generic(struct wm_softc *sc)
7632 {
7633 uint32_t fwsm;
7634
7635 fwsm = CSR_READ(sc, WMREG_FWSM);
7636
7637 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7638 return 1;
7639
7640 return 0;
7641 }
7642
7643 static int
7644 wm_enable_mng_pass_thru(struct wm_softc *sc)
7645 {
7646 uint32_t manc, fwsm, factps;
7647
7648 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7649 return 0;
7650
7651 manc = CSR_READ(sc, WMREG_MANC);
7652
7653 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7654 device_xname(sc->sc_dev), manc));
7655 if (((manc & MANC_RECV_TCO_EN) == 0)
7656 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7657 return 0;
7658
7659 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7660 fwsm = CSR_READ(sc, WMREG_FWSM);
7661 factps = CSR_READ(sc, WMREG_FACTPS);
7662 if (((factps & FACTPS_MNGCG) == 0)
7663 && ((fwsm & FWSM_MODE_MASK)
7664 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7665 return 1;
7666 } else if (((manc & MANC_SMBUS_EN) != 0)
7667 && ((manc & MANC_ASF_EN) == 0))
7668 return 1;
7669
7670 return 0;
7671 }
7672
7673 static int
7674 wm_check_reset_block(struct wm_softc *sc)
7675 {
7676 uint32_t reg;
7677
7678 switch (sc->sc_type) {
7679 case WM_T_ICH8:
7680 case WM_T_ICH9:
7681 case WM_T_ICH10:
7682 case WM_T_PCH:
7683 case WM_T_PCH2:
7684 reg = CSR_READ(sc, WMREG_FWSM);
7685 if ((reg & FWSM_RSPCIPHY) != 0)
7686 return 0;
7687 else
7688 return -1;
7689 break;
7690 case WM_T_82571:
7691 case WM_T_82572:
7692 case WM_T_82573:
7693 case WM_T_82574:
7694 case WM_T_82583:
7695 case WM_T_80003:
7696 reg = CSR_READ(sc, WMREG_MANC);
7697 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7698 return -1;
7699 else
7700 return 0;
7701 break;
7702 default:
7703 /* no problem */
7704 break;
7705 }
7706
7707 return 0;
7708 }
7709
7710 static void
7711 wm_get_hw_control(struct wm_softc *sc)
7712 {
7713 uint32_t reg;
7714
7715 switch (sc->sc_type) {
7716 case WM_T_82573:
7717 reg = CSR_READ(sc, WMREG_SWSM);
7718 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7719 break;
7720 case WM_T_82571:
7721 case WM_T_82572:
7722 case WM_T_82574:
7723 case WM_T_82583:
7724 case WM_T_80003:
7725 case WM_T_ICH8:
7726 case WM_T_ICH9:
7727 case WM_T_ICH10:
7728 case WM_T_PCH:
7729 case WM_T_PCH2:
7730 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7731 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7732 break;
7733 default:
7734 break;
7735 }
7736 }
7737
7738 static void
7739 wm_release_hw_control(struct wm_softc *sc)
7740 {
7741 uint32_t reg;
7742
7743 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7744 return;
7745
7746 if (sc->sc_type == WM_T_82573) {
7747 reg = CSR_READ(sc, WMREG_SWSM);
7748 reg &= ~SWSM_DRV_LOAD;
7749 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7750 } else {
7751 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7752 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7753 }
7754 }
7755
7756 /* XXX Currently TBI only */
7757 static int
7758 wm_check_for_link(struct wm_softc *sc)
7759 {
7760 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7761 uint32_t rxcw;
7762 uint32_t ctrl;
7763 uint32_t status;
7764 uint32_t sig;
7765
7766 rxcw = CSR_READ(sc, WMREG_RXCW);
7767 ctrl = CSR_READ(sc, WMREG_CTRL);
7768 status = CSR_READ(sc, WMREG_STATUS);
7769
7770 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7771
7772 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7773 device_xname(sc->sc_dev), __func__,
7774 ((ctrl & CTRL_SWDPIN(1)) == sig),
7775 ((status & STATUS_LU) != 0),
7776 ((rxcw & RXCW_C) != 0)
7777 ));
7778
7779 /*
7780 * SWDPIN LU RXCW
7781 * 0 0 0
7782 * 0 0 1 (should not happen)
7783 * 0 1 0 (should not happen)
7784 * 0 1 1 (should not happen)
7785 * 1 0 0 Disable autonego and force linkup
7786 * 1 0 1 got /C/ but not linkup yet
7787 * 1 1 0 (linkup)
7788 * 1 1 1 If IFM_AUTO, back to autonego
7789 *
7790 */
7791 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7792 && ((status & STATUS_LU) == 0)
7793 && ((rxcw & RXCW_C) == 0)) {
7794 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7795 __func__));
7796 sc->sc_tbi_linkup = 0;
7797 /* Disable auto-negotiation in the TXCW register */
7798 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7799
7800 /*
7801 * Force link-up and also force full-duplex.
7802 *
7803 * NOTE: CTRL was updated TFCE and RFCE automatically,
7804 * so we should update sc->sc_ctrl
7805 */
7806 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7807 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7808 } else if (((status & STATUS_LU) != 0)
7809 && ((rxcw & RXCW_C) != 0)
7810 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7811 sc->sc_tbi_linkup = 1;
7812 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7813 __func__));
7814 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7815 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7816 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7817 && ((rxcw & RXCW_C) != 0)) {
7818 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7819 } else {
7820 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7821 status));
7822 }
7823
7824 return 0;
7825 }
7826
7827 /* Work-around for 82566 Kumeran PCS lock loss */
7828 static void
7829 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7830 {
7831 int miistatus, active, i;
7832 int reg;
7833
7834 miistatus = sc->sc_mii.mii_media_status;
7835
7836 /* If the link is not up, do nothing */
7837 if ((miistatus & IFM_ACTIVE) != 0)
7838 return;
7839
7840 active = sc->sc_mii.mii_media_active;
7841
7842 /* Nothing to do if the link is other than 1Gbps */
7843 if (IFM_SUBTYPE(active) != IFM_1000_T)
7844 return;
7845
7846 for (i = 0; i < 10; i++) {
7847 /* read twice */
7848 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7849 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7850 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7851 goto out; /* GOOD! */
7852
7853 /* Reset the PHY */
7854 wm_gmii_reset(sc);
7855 delay(5*1000);
7856 }
7857
7858 /* Disable GigE link negotiation */
7859 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7860 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7861 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7862
7863 /*
7864 * Call gig speed drop workaround on Gig disable before accessing
7865 * any PHY registers.
7866 */
7867 wm_gig_downshift_workaround_ich8lan(sc);
7868
7869 out:
7870 return;
7871 }
7872
7873 /* WOL from S5 stops working */
7874 static void
7875 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7876 {
7877 uint16_t kmrn_reg;
7878
7879 /* Only for igp3 */
7880 if (sc->sc_phytype == WMPHY_IGP_3) {
7881 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7882 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7883 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7884 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7885 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7886 }
7887 }
7888
7889 #ifdef WM_WOL
7890 /* Power down workaround on D3 */
7891 static void
7892 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7893 {
7894 uint32_t reg;
7895 int i;
7896
7897 for (i = 0; i < 2; i++) {
7898 /* Disable link */
7899 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7900 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7901 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7902
7903 /*
7904 * Call gig speed drop workaround on Gig disable before
7905 * accessing any PHY registers
7906 */
7907 if (sc->sc_type == WM_T_ICH8)
7908 wm_gig_downshift_workaround_ich8lan(sc);
7909
7910 /* Write VR power-down enable */
7911 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7912 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7913 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7914 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7915
7916 /* Read it back and test */
7917 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7918 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7919 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7920 break;
7921
7922 /* Issue PHY reset and repeat at most one more time */
7923 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7924 }
7925 }
7926 #endif /* WM_WOL */
7927
7928 /*
7929 * Workaround for pch's PHYs
7930 * XXX should be moved to new PHY driver?
7931 */
7932 static void
7933 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7934 {
7935 if (sc->sc_phytype == WMPHY_82577)
7936 wm_set_mdio_slow_mode_hv(sc);
7937
7938 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7939
7940 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7941
7942 /* 82578 */
7943 if (sc->sc_phytype == WMPHY_82578) {
7944 /* PCH rev. < 3 */
7945 if (sc->sc_rev < 3) {
7946 /* XXX 6 bit shift? Why? Is it page2? */
7947 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7948 0x66c0);
7949 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7950 0xffff);
7951 }
7952
7953 /* XXX phy rev. < 2 */
7954 }
7955
7956 /* Select page 0 */
7957
7958 /* XXX acquire semaphore */
7959 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7960 /* XXX release semaphore */
7961
7962 /*
7963 * Configure the K1 Si workaround during phy reset assuming there is
7964 * link so that it disables K1 if link is in 1Gbps.
7965 */
7966 wm_k1_gig_workaround_hv(sc, 1);
7967 }
7968
7969 static void
7970 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7971 {
7972
7973 wm_set_mdio_slow_mode_hv(sc);
7974 }
7975
7976 static void
7977 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7978 {
7979 int k1_enable = sc->sc_nvm_k1_enabled;
7980
7981 /* XXX acquire semaphore */
7982
7983 if (link) {
7984 k1_enable = 0;
7985
7986 /* Link stall fix for link up */
7987 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7988 } else {
7989 /* Link stall fix for link down */
7990 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7991 }
7992
7993 wm_configure_k1_ich8lan(sc, k1_enable);
7994
7995 /* XXX release semaphore */
7996 }
7997
7998 static void
7999 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8000 {
8001 uint32_t reg;
8002
8003 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8004 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8005 reg | HV_KMRN_MDIO_SLOW);
8006 }
8007
8008 static void
8009 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8010 {
8011 uint32_t ctrl, ctrl_ext, tmp;
8012 uint16_t kmrn_reg;
8013
8014 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8015
8016 if (k1_enable)
8017 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8018 else
8019 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8020
8021 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8022
8023 delay(20);
8024
8025 ctrl = CSR_READ(sc, WMREG_CTRL);
8026 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8027
8028 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8029 tmp |= CTRL_FRCSPD;
8030
8031 CSR_WRITE(sc, WMREG_CTRL, tmp);
8032 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8033 delay(20);
8034
8035 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8036 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8037 delay(20);
8038 }
8039
8040 static void
8041 wm_smbustopci(struct wm_softc *sc)
8042 {
8043 uint32_t fwsm;
8044
8045 fwsm = CSR_READ(sc, WMREG_FWSM);
8046 if (((fwsm & FWSM_FW_VALID) == 0)
8047 && ((wm_check_reset_block(sc) == 0))) {
8048 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8049 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8050 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8051 delay(10);
8052 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8054 delay(50*1000);
8055
8056 /*
8057 * Gate automatic PHY configuration by hardware on non-managed
8058 * 82579
8059 */
8060 if (sc->sc_type == WM_T_PCH2)
8061 wm_gate_hw_phy_config_ich8lan(sc, 1);
8062 }
8063 }
8064
8065 static void
8066 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8067 {
8068 uint32_t gcr;
8069 pcireg_t ctrl2;
8070
8071 gcr = CSR_READ(sc, WMREG_GCR);
8072
8073 /* Only take action if timeout value is defaulted to 0 */
8074 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8075 goto out;
8076
8077 if ((gcr & GCR_CAP_VER2) == 0) {
8078 gcr |= GCR_CMPL_TMOUT_10MS;
8079 goto out;
8080 }
8081
8082 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8083 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
8084 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
8085 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8086 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
8087
8088 out:
8089 /* Disable completion timeout resend */
8090 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8091
8092 CSR_WRITE(sc, WMREG_GCR, gcr);
8093 }
8094
8095 /* special case - for 82575 - need to do manual init ... */
8096 static void
8097 wm_reset_init_script_82575(struct wm_softc *sc)
8098 {
8099 /*
8100 * remark: this is untested code - we have no board without EEPROM
8101 * same setup as mentioned int the freeBSD driver for the i82575
8102 */
8103
8104 /* SerDes configuration via SERDESCTRL */
8105 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8106 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8107 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8108 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8109
8110 /* CCM configuration via CCMCTL register */
8111 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8112 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8113
8114 /* PCIe lanes configuration */
8115 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8116 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8117 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8118 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8119
8120 /* PCIe PLL Configuration */
8121 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8122 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8123 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8124 }
8125
8126 static void
8127 wm_init_manageability(struct wm_softc *sc)
8128 {
8129
8130 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8131 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8132 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8133
8134 /* disabl hardware interception of ARP */
8135 manc &= ~MANC_ARP_EN;
8136
8137 /* enable receiving management packets to the host */
8138 if (sc->sc_type >= WM_T_82571) {
8139 manc |= MANC_EN_MNG2HOST;
8140 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8141 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8142
8143 }
8144
8145 CSR_WRITE(sc, WMREG_MANC, manc);
8146 }
8147 }
8148
8149 static void
8150 wm_release_manageability(struct wm_softc *sc)
8151 {
8152
8153 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8154 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8155
8156 if (sc->sc_type >= WM_T_82571)
8157 manc &= ~MANC_EN_MNG2HOST;
8158
8159 CSR_WRITE(sc, WMREG_MANC, manc);
8160 }
8161 }
8162
8163 static void
8164 wm_get_wakeup(struct wm_softc *sc)
8165 {
8166
8167 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8168 switch (sc->sc_type) {
8169 case WM_T_82573:
8170 case WM_T_82583:
8171 sc->sc_flags |= WM_F_HAS_AMT;
8172 /* FALLTHROUGH */
8173 case WM_T_80003:
8174 case WM_T_82541:
8175 case WM_T_82547:
8176 case WM_T_82571:
8177 case WM_T_82572:
8178 case WM_T_82574:
8179 case WM_T_82575:
8180 case WM_T_82576:
8181 #if 0 /* XXX */
8182 case WM_T_82580:
8183 case WM_T_82580ER:
8184 case WM_T_I350:
8185 #endif
8186 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8187 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8188 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8189 break;
8190 case WM_T_ICH8:
8191 case WM_T_ICH9:
8192 case WM_T_ICH10:
8193 case WM_T_PCH:
8194 case WM_T_PCH2:
8195 sc->sc_flags |= WM_F_HAS_AMT;
8196 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8197 break;
8198 default:
8199 break;
8200 }
8201
8202 /* 1: HAS_MANAGE */
8203 if (wm_enable_mng_pass_thru(sc) != 0)
8204 sc->sc_flags |= WM_F_HAS_MANAGE;
8205
8206 #ifdef WM_DEBUG
8207 printf("\n");
8208 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8209 printf("HAS_AMT,");
8210 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8211 printf("ARC_SUBSYS_VALID,");
8212 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8213 printf("ASF_FIRMWARE_PRES,");
8214 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8215 printf("HAS_MANAGE,");
8216 printf("\n");
8217 #endif
8218 /*
8219 * Note that the WOL flags is set after the resetting of the eeprom
8220 * stuff
8221 */
8222 }
8223
8224 #ifdef WM_WOL
8225 /* WOL in the newer chipset interfaces (pchlan) */
8226 static void
8227 wm_enable_phy_wakeup(struct wm_softc *sc)
8228 {
8229 #if 0
8230 uint16_t preg;
8231
8232 /* Copy MAC RARs to PHY RARs */
8233
8234 /* Copy MAC MTA to PHY MTA */
8235
8236 /* Configure PHY Rx Control register */
8237
8238 /* Enable PHY wakeup in MAC register */
8239
8240 /* Configure and enable PHY wakeup in PHY registers */
8241
8242 /* Activate PHY wakeup */
8243
8244 /* XXX */
8245 #endif
8246 }
8247
8248 static void
8249 wm_enable_wakeup(struct wm_softc *sc)
8250 {
8251 uint32_t reg, pmreg;
8252 pcireg_t pmode;
8253
8254 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8255 &pmreg, NULL) == 0)
8256 return;
8257
8258 /* Advertise the wakeup capability */
8259 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8260 | CTRL_SWDPIN(3));
8261 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8262
8263 /* ICH workaround */
8264 switch (sc->sc_type) {
8265 case WM_T_ICH8:
8266 case WM_T_ICH9:
8267 case WM_T_ICH10:
8268 case WM_T_PCH:
8269 case WM_T_PCH2:
8270 /* Disable gig during WOL */
8271 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8272 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8273 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8274 if (sc->sc_type == WM_T_PCH)
8275 wm_gmii_reset(sc);
8276
8277 /* Power down workaround */
8278 if (sc->sc_phytype == WMPHY_82577) {
8279 struct mii_softc *child;
8280
8281 /* Assume that the PHY is copper */
8282 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8283 if (child->mii_mpd_rev <= 2)
8284 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8285 (768 << 5) | 25, 0x0444); /* magic num */
8286 }
8287 break;
8288 default:
8289 break;
8290 }
8291
8292 /* Keep the laser running on fiber adapters */
8293 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8294 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8295 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8296 reg |= CTRL_EXT_SWDPIN(3);
8297 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8298 }
8299
8300 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8301 #if 0 /* for the multicast packet */
8302 reg |= WUFC_MC;
8303 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8304 #endif
8305
8306 if (sc->sc_type == WM_T_PCH) {
8307 wm_enable_phy_wakeup(sc);
8308 } else {
8309 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8310 CSR_WRITE(sc, WMREG_WUFC, reg);
8311 }
8312
8313 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8314 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8315 || (sc->sc_type == WM_T_PCH2))
8316 && (sc->sc_phytype == WMPHY_IGP_3))
8317 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8318
8319 /* Request PME */
8320 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8321 #if 0
8322 /* Disable WOL */
8323 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8324 #else
8325 /* For WOL */
8326 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8327 #endif
8328 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8329 }
8330 #endif /* WM_WOL */
8331
8332 static bool
8333 wm_suspend(device_t self, const pmf_qual_t *qual)
8334 {
8335 struct wm_softc *sc = device_private(self);
8336
8337 wm_release_manageability(sc);
8338 wm_release_hw_control(sc);
8339 #ifdef WM_WOL
8340 wm_enable_wakeup(sc);
8341 #endif
8342
8343 return true;
8344 }
8345
8346 static bool
8347 wm_resume(device_t self, const pmf_qual_t *qual)
8348 {
8349 struct wm_softc *sc = device_private(self);
8350
8351 wm_init_manageability(sc);
8352
8353 return true;
8354 }
8355
8356 static void
8357 wm_set_eee_i350(struct wm_softc * sc)
8358 {
8359 uint32_t ipcnfg, eeer;
8360
8361 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8362 eeer = CSR_READ(sc, WMREG_EEER);
8363
8364 if ((sc->sc_flags & WM_F_EEE) != 0) {
8365 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8366 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8367 | EEER_LPI_FC);
8368 } else {
8369 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8370 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8371 | EEER_LPI_FC);
8372 }
8373
8374 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8375 CSR_WRITE(sc, WMREG_EEER, eeer);
8376 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8377 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8378 }
8379