if_wm.c revision 1.241 1 /* $NetBSD: if_wm.c,v 1.241 2013/02/07 02:10:18 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.241 2013/02/07 02:10:18 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 /*
145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155 #define WM_NTXSEGS 256
156 #define WM_IFQUEUELEN 256
157 #define WM_TXQUEUELEN_MAX 64
158 #define WM_TXQUEUELEN_MAX_82547 16
159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162 #define WM_NTXDESC_82542 256
163 #define WM_NTXDESC_82544 4096
164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171
172 /*
173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178 #define WM_NRXDESC 256
179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182
183 /*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188 struct wm_control_data_82544 {
189 /*
190 * The receive descriptors.
191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194 /*
195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them.
197 */
198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u;
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257
258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */
260
261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak;
275
276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */
278
279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled;
283
284 /*
285 * Software state for the transmit and receive descriptors.
286 */
287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291 /*
292 * Control data structures.
293 */
294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 krndsource_t rnd_source; /* random source */
383 };
384
385 #define WM_RXCHAIN_RESET(sc) \
386 do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \
390 } while (/*CONSTCOND*/0)
391
392 #define WM_RXCHAIN_LINK(sc, m) \
393 do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \
396 } while (/*CONSTCOND*/0)
397
398 #ifdef WM_EVENT_COUNTERS
399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401 #else
402 #define WM_EVCNT_INCR(ev) /* nothing */
403 #define WM_EVCNT_ADD(ev, val) /* nothing */
404 #endif
405
406 #define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS)
412
413 #define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436 #define WM_CDTXSYNC(sc, x, n, ops) \
437 do { \
438 int __x, __n; \
439 \
440 __x = (x); \
441 __n = (n); \
442 \
443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \
450 } \
451 \
452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455 } while (/*CONSTCOND*/0)
456
457 #define WM_CDRXSYNC(sc, x, ops) \
458 do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_INIT_RXDESC(sc, x) \
464 do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \
469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \
473 * \
474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \
482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495 } while (/*CONSTCOND*/0)
496
497 static void wm_start(struct ifnet *);
498 static void wm_nq_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ifflags_cb(struct ethercom *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_check_alt_mac_addr(struct wm_softc *);
514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void wm_tick(void *);
516
517 static void wm_set_filter(struct wm_softc *);
518 static void wm_set_vlan(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536
537 static int wm_gmii_i82544_readreg(device_t, int, int);
538 static void wm_gmii_i82544_writereg(device_t, int, int, int);
539
540 static int wm_gmii_i80003_readreg(device_t, int, int);
541 static void wm_gmii_i80003_writereg(device_t, int, int, int);
542 static int wm_gmii_bm_readreg(device_t, int, int);
543 static void wm_gmii_bm_writereg(device_t, int, int, int);
544 static int wm_gmii_hv_readreg(device_t, int, int);
545 static void wm_gmii_hv_writereg(device_t, int, int, int);
546 static int wm_sgmii_readreg(device_t, int, int);
547 static void wm_sgmii_writereg(device_t, int, int, int);
548
549 static void wm_gmii_statchg(struct ifnet *);
550
551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int wm_gmii_mediachange(struct ifnet *);
553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554
555 static int wm_kmrn_readreg(struct wm_softc *, int);
556 static void wm_kmrn_writereg(struct wm_softc *, int, int);
557
558 static void wm_set_spiaddrbits(struct wm_softc *);
559 static int wm_match(device_t, cfdata_t, void *);
560 static void wm_attach(device_t, device_t, void *);
561 static int wm_detach(device_t, int);
562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void wm_get_auto_rd_done(struct wm_softc *);
564 static void wm_lan_init_done(struct wm_softc *);
565 static void wm_get_cfg_done(struct wm_softc *);
566 static int wm_get_swsm_semaphore(struct wm_softc *);
567 static void wm_put_swsm_semaphore(struct wm_softc *);
568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void wm_put_swfwhw_semaphore(struct wm_softc *);
573
574 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
578 uint32_t, uint16_t *);
579 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void wm_82547_txfifo_stall(void *);
582 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int wm_check_mng_mode(struct wm_softc *);
584 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int wm_check_mng_mode_82574(struct wm_softc *);
586 static int wm_check_mng_mode_generic(struct wm_softc *);
587 static int wm_enable_mng_pass_thru(struct wm_softc *);
588 static int wm_check_reset_block(struct wm_softc *);
589 static void wm_get_hw_control(struct wm_softc *);
590 static int wm_check_for_link(struct wm_softc *);
591 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void wm_smbustopci(struct wm_softc *);
602 static void wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void wm_reset_init_script_82575(struct wm_softc *);
604 static void wm_release_manageability(struct wm_softc *);
605 static void wm_release_hw_control(struct wm_softc *);
606 static void wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void wm_enable_phy_wakeup(struct wm_softc *);
609 static void wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void wm_init_manageability(struct wm_softc *);
612 static void wm_set_eee_i350(struct wm_softc *);
613
614 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
615 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
616
617 /*
618 * Devices supported by this driver.
619 */
620 static const struct wm_product {
621 pci_vendor_id_t wmp_vendor;
622 pci_product_id_t wmp_product;
623 const char *wmp_name;
624 wm_chip_type wmp_type;
625 int wmp_flags;
626 #define WMP_F_1000X 0x01
627 #define WMP_F_1000T 0x02
628 #define WMP_F_SERDES 0x04
629 } wm_products[] = {
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
631 "Intel i82542 1000BASE-X Ethernet",
632 WM_T_82542_2_1, WMP_F_1000X },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
635 "Intel i82543GC 1000BASE-X Ethernet",
636 WM_T_82543, WMP_F_1000X },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
639 "Intel i82543GC 1000BASE-T Ethernet",
640 WM_T_82543, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
643 "Intel i82544EI 1000BASE-T Ethernet",
644 WM_T_82544, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
647 "Intel i82544EI 1000BASE-X Ethernet",
648 WM_T_82544, WMP_F_1000X },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
651 "Intel i82544GC 1000BASE-T Ethernet",
652 WM_T_82544, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
655 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
659 "Intel i82540EM 1000BASE-T Ethernet",
660 WM_T_82540, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
663 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
667 "Intel i82540EP 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
679 "Intel i82545EM 1000BASE-T Ethernet",
680 WM_T_82545, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
683 "Intel i82545GM 1000BASE-T Ethernet",
684 WM_T_82545_3, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
687 "Intel i82545GM 1000BASE-X Ethernet",
688 WM_T_82545_3, WMP_F_1000X },
689 #if 0
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
691 "Intel i82545GM Gigabit Ethernet (SERDES)",
692 WM_T_82545_3, WMP_F_SERDES },
693 #endif
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
695 "Intel i82546EB 1000BASE-T Ethernet",
696 WM_T_82546, WMP_F_1000T },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
703 "Intel i82545EM 1000BASE-X Ethernet",
704 WM_T_82545, WMP_F_1000X },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
707 "Intel i82546EB 1000BASE-X Ethernet",
708 WM_T_82546, WMP_F_1000X },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
711 "Intel i82546GB 1000BASE-T Ethernet",
712 WM_T_82546_3, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
715 "Intel i82546GB 1000BASE-X Ethernet",
716 WM_T_82546_3, WMP_F_1000X },
717 #if 0
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
719 "Intel i82546GB Gigabit Ethernet (SERDES)",
720 WM_T_82546_3, WMP_F_SERDES },
721 #endif
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
723 "i82546GB quad-port Gigabit Ethernet",
724 WM_T_82546_3, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
727 "i82546GB quad-port Gigabit Ethernet (KSP3)",
728 WM_T_82546_3, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
731 "Intel PRO/1000MT (82546GB)",
732 WM_T_82546_3, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
735 "Intel i82541EI 1000BASE-T Ethernet",
736 WM_T_82541, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
739 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
743 "Intel i82541EI Mobile 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
747 "Intel i82541ER 1000BASE-T Ethernet",
748 WM_T_82541_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
751 "Intel i82541GI 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
755 "Intel i82541GI Mobile 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
759 "Intel i82541PI 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
763 "Intel i82547EI 1000BASE-T Ethernet",
764 WM_T_82547, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
767 "Intel i82547EI Mobile 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
771 "Intel i82547GI 1000BASE-T Ethernet",
772 WM_T_82547_2, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
775 "Intel PRO/1000 PT (82571EB)",
776 WM_T_82571, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
779 "Intel PRO/1000 PF (82571EB)",
780 WM_T_82571, WMP_F_1000X },
781 #if 0
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
783 "Intel PRO/1000 PB (82571EB)",
784 WM_T_82571, WMP_F_SERDES },
785 #endif
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
787 "Intel PRO/1000 QT (82571EB)",
788 WM_T_82571, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
791 "Intel i82572EI 1000baseT Ethernet",
792 WM_T_82572, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
795 "Intel PRO/1000 PT Quad Port Server Adapter",
796 WM_T_82571, WMP_F_1000T, },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
799 "Intel i82572EI 1000baseX Ethernet",
800 WM_T_82572, WMP_F_1000X },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
803 "Intel i82572EI Gigabit Ethernet (SERDES)",
804 WM_T_82572, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
808 "Intel i82572EI 1000baseT Ethernet",
809 WM_T_82572, WMP_F_1000T },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
812 "Intel i82573E",
813 WM_T_82573, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
816 "Intel i82573E IAMT",
817 WM_T_82573, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
820 "Intel i82573L Gigabit Ethernet",
821 WM_T_82573, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
824 "Intel i82574L",
825 WM_T_82574, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
828 "Intel i82583V",
829 WM_T_82583, WMP_F_1000T },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
832 "i80003 dual 1000baseT Ethernet",
833 WM_T_80003, WMP_F_1000T },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
836 "i80003 dual 1000baseX Ethernet",
837 WM_T_80003, WMP_F_1000T },
838 #if 0
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
840 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
841 WM_T_80003, WMP_F_SERDES },
842 #endif
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
845 "Intel i80003 1000baseT Ethernet",
846 WM_T_80003, WMP_F_1000T },
847 #if 0
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
849 "Intel i80003 Gigabit Ethernet (SERDES)",
850 WM_T_80003, WMP_F_SERDES },
851 #endif
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
853 "Intel i82801H (M_AMT) LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
856 "Intel i82801H (AMT) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
859 "Intel i82801H LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
862 "Intel i82801H (IFE) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
865 "Intel i82801H (M) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
868 "Intel i82801H IFE (GT) LAN Controller",
869 WM_T_ICH8, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
871 "Intel i82801H IFE (G) LAN Controller",
872 WM_T_ICH8, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
874 "82801I (AMT) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
877 "82801I LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
880 "82801I (G) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
883 "82801I (GT) LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
886 "82801I (C) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
889 "82801I mobile LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
892 "82801I mobile (V) LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
895 "82801I mobile (AMT) LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
898 "82567LM-4 LAN Controller",
899 WM_T_ICH9, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
901 "82567V-3 LAN Controller",
902 WM_T_ICH9, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
904 "82567LM-2 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
907 "82567LF-2 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
910 "82567LM-3 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
913 "82567LF-3 LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
916 "82567V-2 LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
919 "82567V-3? LAN Controller",
920 WM_T_ICH10, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
922 "HANKSVILLE LAN Controller",
923 WM_T_ICH10, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
925 "PCH LAN (82577LM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
928 "PCH LAN (82577LC) Controller",
929 WM_T_PCH, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
931 "PCH LAN (82578DM) Controller",
932 WM_T_PCH, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
934 "PCH LAN (82578DC) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
937 "PCH2 LAN (82579LM) Controller",
938 WM_T_PCH2, WMP_F_1000T },
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
940 "PCH2 LAN (82579V) Controller",
941 WM_T_PCH2, WMP_F_1000T },
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
943 "82575EB dual-1000baseT Ethernet",
944 WM_T_82575, WMP_F_1000T },
945 #if 0
946 /*
947 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
948 * disabled for now ...
949 */
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
951 "82575EB dual-1000baseX Ethernet (SERDES)",
952 WM_T_82575, WMP_F_SERDES },
953 #endif
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
955 "82575GB quad-1000baseT Ethernet",
956 WM_T_82575, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
958 "82575GB quad-1000baseT Ethernet (PM)",
959 WM_T_82575, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
961 "82576 1000BaseT Ethernet",
962 WM_T_82576, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
964 "82576 1000BaseX Ethernet",
965 WM_T_82576, WMP_F_1000X },
966 #if 0
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
968 "82576 gigabit Ethernet (SERDES)",
969 WM_T_82576, WMP_F_SERDES },
970 #endif
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
972 "82576 quad-1000BaseT Ethernet",
973 WM_T_82576, WMP_F_1000T },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
975 "82576 gigabit Ethernet",
976 WM_T_82576, WMP_F_1000T },
977 #if 0
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
979 "82576 gigabit Ethernet (SERDES)",
980 WM_T_82576, WMP_F_SERDES },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
982 "82576 quad-gigabit Ethernet (SERDES)",
983 WM_T_82576, WMP_F_SERDES },
984 #endif
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
986 "82580 1000BaseT Ethernet",
987 WM_T_82580, WMP_F_1000T },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
989 "82580 1000BaseX Ethernet",
990 WM_T_82580, WMP_F_1000X },
991 #if 0
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
993 "82580 1000BaseT Ethernet (SERDES)",
994 WM_T_82580, WMP_F_SERDES },
995 #endif
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
997 "82580 gigabit Ethernet (SGMII)",
998 WM_T_82580, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1003 "82580 1000BaseT Ethernet",
1004 WM_T_82580ER, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1006 "82580 dual-1000BaseT Ethernet",
1007 WM_T_82580ER, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1009 "82580 quad-1000BaseX Ethernet",
1010 WM_T_82580, WMP_F_1000X },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1012 "I350 Gigabit Network Connection",
1013 WM_T_I350, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1015 "I350 Gigabit Fiber Network Connection",
1016 WM_T_I350, WMP_F_1000X },
1017 #if 0
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1019 "I350 Gigabit Backplane Connection",
1020 WM_T_I350, WMP_F_SERDES },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1022 "I350 Gigabit Connection",
1023 WM_T_I350, WMP_F_1000T },
1024 #endif
1025 { 0, 0,
1026 NULL,
1027 0, 0 },
1028 };
1029
1030 #ifdef WM_EVENT_COUNTERS
1031 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1032 #endif /* WM_EVENT_COUNTERS */
1033
1034 #if 0 /* Not currently used */
1035 static inline uint32_t
1036 wm_io_read(struct wm_softc *sc, int reg)
1037 {
1038
1039 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1040 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1041 }
1042 #endif
1043
1044 static inline void
1045 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1046 {
1047
1048 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1049 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1050 }
1051
1052 static inline void
1053 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1054 uint32_t data)
1055 {
1056 uint32_t regval;
1057 int i;
1058
1059 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1060
1061 CSR_WRITE(sc, reg, regval);
1062
1063 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1064 delay(5);
1065 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1066 break;
1067 }
1068 if (i == SCTL_CTL_POLL_TIMEOUT) {
1069 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1070 device_xname(sc->sc_dev), reg);
1071 }
1072 }
1073
1074 static inline void
1075 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1076 {
1077 wa->wa_low = htole32(v & 0xffffffffU);
1078 if (sizeof(bus_addr_t) == 8)
1079 wa->wa_high = htole32((uint64_t) v >> 32);
1080 else
1081 wa->wa_high = 0;
1082 }
1083
1084 static void
1085 wm_set_spiaddrbits(struct wm_softc *sc)
1086 {
1087 uint32_t reg;
1088
1089 sc->sc_flags |= WM_F_EEPROM_SPI;
1090 reg = CSR_READ(sc, WMREG_EECD);
1091 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1092 }
1093
1094 static const struct wm_product *
1095 wm_lookup(const struct pci_attach_args *pa)
1096 {
1097 const struct wm_product *wmp;
1098
1099 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1100 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1101 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1102 return wmp;
1103 }
1104 return NULL;
1105 }
1106
1107 static int
1108 wm_match(device_t parent, cfdata_t cf, void *aux)
1109 {
1110 struct pci_attach_args *pa = aux;
1111
1112 if (wm_lookup(pa) != NULL)
1113 return 1;
1114
1115 return 0;
1116 }
1117
1118 static void
1119 wm_attach(device_t parent, device_t self, void *aux)
1120 {
1121 struct wm_softc *sc = device_private(self);
1122 struct pci_attach_args *pa = aux;
1123 prop_dictionary_t dict;
1124 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1125 pci_chipset_tag_t pc = pa->pa_pc;
1126 pci_intr_handle_t ih;
1127 const char *intrstr = NULL;
1128 const char *eetype, *xname;
1129 bus_space_tag_t memt;
1130 bus_space_handle_t memh;
1131 bus_size_t memsize;
1132 int memh_valid;
1133 int i, error;
1134 const struct wm_product *wmp;
1135 prop_data_t ea;
1136 prop_number_t pn;
1137 uint8_t enaddr[ETHER_ADDR_LEN];
1138 uint16_t cfg1, cfg2, swdpin, io3;
1139 pcireg_t preg, memtype;
1140 uint16_t eeprom_data, apme_mask;
1141 uint32_t reg;
1142
1143 sc->sc_dev = self;
1144 callout_init(&sc->sc_tick_ch, 0);
1145
1146 sc->sc_wmp = wmp = wm_lookup(pa);
1147 if (wmp == NULL) {
1148 printf("\n");
1149 panic("wm_attach: impossible");
1150 }
1151
1152 sc->sc_pc = pa->pa_pc;
1153 sc->sc_pcitag = pa->pa_tag;
1154
1155 if (pci_dma64_available(pa))
1156 sc->sc_dmat = pa->pa_dmat64;
1157 else
1158 sc->sc_dmat = pa->pa_dmat;
1159
1160 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1161 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1162
1163 sc->sc_type = wmp->wmp_type;
1164 if (sc->sc_type < WM_T_82543) {
1165 if (sc->sc_rev < 2) {
1166 aprint_error_dev(sc->sc_dev,
1167 "i82542 must be at least rev. 2\n");
1168 return;
1169 }
1170 if (sc->sc_rev < 3)
1171 sc->sc_type = WM_T_82542_2_0;
1172 }
1173
1174 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1175 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1176 || (sc->sc_type == WM_T_I350))
1177 sc->sc_flags |= WM_F_NEWQUEUE;
1178
1179 /* Set device properties (mactype) */
1180 dict = device_properties(sc->sc_dev);
1181 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1182
1183 /*
1184 * Map the device. All devices support memory-mapped acccess,
1185 * and it is really required for normal operation.
1186 */
1187 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1188 switch (memtype) {
1189 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1190 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1191 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1192 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1193 break;
1194 default:
1195 memh_valid = 0;
1196 break;
1197 }
1198
1199 if (memh_valid) {
1200 sc->sc_st = memt;
1201 sc->sc_sh = memh;
1202 sc->sc_ss = memsize;
1203 } else {
1204 aprint_error_dev(sc->sc_dev,
1205 "unable to map device registers\n");
1206 return;
1207 }
1208
1209 wm_get_wakeup(sc);
1210
1211 /*
1212 * In addition, i82544 and later support I/O mapped indirect
1213 * register access. It is not desirable (nor supported in
1214 * this driver) to use it for normal operation, though it is
1215 * required to work around bugs in some chip versions.
1216 */
1217 if (sc->sc_type >= WM_T_82544) {
1218 /* First we have to find the I/O BAR. */
1219 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1220 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1221 if (memtype == PCI_MAPREG_TYPE_IO)
1222 break;
1223 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1224 PCI_MAPREG_MEM_TYPE_64BIT)
1225 i += 4; /* skip high bits, too */
1226 }
1227 if (i < PCI_MAPREG_END) {
1228 /*
1229 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1230 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1231 * It's no problem because newer chips has no this
1232 * bug.
1233 *
1234 * The i8254x doesn't apparently respond when the
1235 * I/O BAR is 0, which looks somewhat like it's not
1236 * been configured.
1237 */
1238 preg = pci_conf_read(pc, pa->pa_tag, i);
1239 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1240 aprint_error_dev(sc->sc_dev,
1241 "WARNING: I/O BAR at zero.\n");
1242 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1243 0, &sc->sc_iot, &sc->sc_ioh,
1244 NULL, &sc->sc_ios) == 0) {
1245 sc->sc_flags |= WM_F_IOH_VALID;
1246 } else {
1247 aprint_error_dev(sc->sc_dev,
1248 "WARNING: unable to map I/O space\n");
1249 }
1250 }
1251
1252 }
1253
1254 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1255 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1256 preg |= PCI_COMMAND_MASTER_ENABLE;
1257 if (sc->sc_type < WM_T_82542_2_1)
1258 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1259 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1260
1261 /* power up chip */
1262 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1263 NULL)) && error != EOPNOTSUPP) {
1264 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1265 return;
1266 }
1267
1268 /*
1269 * Map and establish our interrupt.
1270 */
1271 if (pci_intr_map(pa, &ih)) {
1272 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1273 return;
1274 }
1275 intrstr = pci_intr_string(pc, ih);
1276 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1277 if (sc->sc_ih == NULL) {
1278 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1279 if (intrstr != NULL)
1280 aprint_error(" at %s", intrstr);
1281 aprint_error("\n");
1282 return;
1283 }
1284 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1285
1286 /*
1287 * Check the function ID (unit number of the chip).
1288 */
1289 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1290 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1291 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1292 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1293 || (sc->sc_type == WM_T_I350))
1294 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1295 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1296 else
1297 sc->sc_funcid = 0;
1298
1299 /*
1300 * Determine a few things about the bus we're connected to.
1301 */
1302 if (sc->sc_type < WM_T_82543) {
1303 /* We don't really know the bus characteristics here. */
1304 sc->sc_bus_speed = 33;
1305 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1306 /*
1307 * CSA (Communication Streaming Architecture) is about as fast
1308 * a 32-bit 66MHz PCI Bus.
1309 */
1310 sc->sc_flags |= WM_F_CSA;
1311 sc->sc_bus_speed = 66;
1312 aprint_verbose_dev(sc->sc_dev,
1313 "Communication Streaming Architecture\n");
1314 if (sc->sc_type == WM_T_82547) {
1315 callout_init(&sc->sc_txfifo_ch, 0);
1316 callout_setfunc(&sc->sc_txfifo_ch,
1317 wm_82547_txfifo_stall, sc);
1318 aprint_verbose_dev(sc->sc_dev,
1319 "using 82547 Tx FIFO stall work-around\n");
1320 }
1321 } else if (sc->sc_type >= WM_T_82571) {
1322 sc->sc_flags |= WM_F_PCIE;
1323 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1324 && (sc->sc_type != WM_T_ICH10)
1325 && (sc->sc_type != WM_T_PCH)
1326 && (sc->sc_type != WM_T_PCH2)) {
1327 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1328 /* ICH* and PCH* have no PCIe capability registers */
1329 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1330 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1331 NULL) == 0)
1332 aprint_error_dev(sc->sc_dev,
1333 "unable to find PCIe capability\n");
1334 }
1335 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1336 } else {
1337 reg = CSR_READ(sc, WMREG_STATUS);
1338 if (reg & STATUS_BUS64)
1339 sc->sc_flags |= WM_F_BUS64;
1340 if ((reg & STATUS_PCIX_MODE) != 0) {
1341 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1342
1343 sc->sc_flags |= WM_F_PCIX;
1344 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1345 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1346 aprint_error_dev(sc->sc_dev,
1347 "unable to find PCIX capability\n");
1348 else if (sc->sc_type != WM_T_82545_3 &&
1349 sc->sc_type != WM_T_82546_3) {
1350 /*
1351 * Work around a problem caused by the BIOS
1352 * setting the max memory read byte count
1353 * incorrectly.
1354 */
1355 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1356 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1357 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1358 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1359
1360 bytecnt =
1361 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1362 PCI_PCIX_CMD_BYTECNT_SHIFT;
1363 maxb =
1364 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1365 PCI_PCIX_STATUS_MAXB_SHIFT;
1366 if (bytecnt > maxb) {
1367 aprint_verbose_dev(sc->sc_dev,
1368 "resetting PCI-X MMRBC: %d -> %d\n",
1369 512 << bytecnt, 512 << maxb);
1370 pcix_cmd = (pcix_cmd &
1371 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1372 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1373 pci_conf_write(pa->pa_pc, pa->pa_tag,
1374 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1375 pcix_cmd);
1376 }
1377 }
1378 }
1379 /*
1380 * The quad port adapter is special; it has a PCIX-PCIX
1381 * bridge on the board, and can run the secondary bus at
1382 * a higher speed.
1383 */
1384 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1385 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1386 : 66;
1387 } else if (sc->sc_flags & WM_F_PCIX) {
1388 switch (reg & STATUS_PCIXSPD_MASK) {
1389 case STATUS_PCIXSPD_50_66:
1390 sc->sc_bus_speed = 66;
1391 break;
1392 case STATUS_PCIXSPD_66_100:
1393 sc->sc_bus_speed = 100;
1394 break;
1395 case STATUS_PCIXSPD_100_133:
1396 sc->sc_bus_speed = 133;
1397 break;
1398 default:
1399 aprint_error_dev(sc->sc_dev,
1400 "unknown PCIXSPD %d; assuming 66MHz\n",
1401 reg & STATUS_PCIXSPD_MASK);
1402 sc->sc_bus_speed = 66;
1403 break;
1404 }
1405 } else
1406 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1407 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1408 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1409 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1410 }
1411
1412 /*
1413 * Allocate the control data structures, and create and load the
1414 * DMA map for it.
1415 *
1416 * NOTE: All Tx descriptors must be in the same 4G segment of
1417 * memory. So must Rx descriptors. We simplify by allocating
1418 * both sets within the same 4G segment.
1419 */
1420 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1421 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1422 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1423 sizeof(struct wm_control_data_82542) :
1424 sizeof(struct wm_control_data_82544);
1425 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1426 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1427 &sc->sc_cd_rseg, 0)) != 0) {
1428 aprint_error_dev(sc->sc_dev,
1429 "unable to allocate control data, error = %d\n",
1430 error);
1431 goto fail_0;
1432 }
1433
1434 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1435 sc->sc_cd_rseg, sc->sc_cd_size,
1436 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1437 aprint_error_dev(sc->sc_dev,
1438 "unable to map control data, error = %d\n", error);
1439 goto fail_1;
1440 }
1441
1442 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1443 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1444 aprint_error_dev(sc->sc_dev,
1445 "unable to create control data DMA map, error = %d\n",
1446 error);
1447 goto fail_2;
1448 }
1449
1450 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1451 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1452 aprint_error_dev(sc->sc_dev,
1453 "unable to load control data DMA map, error = %d\n",
1454 error);
1455 goto fail_3;
1456 }
1457
1458 /*
1459 * Create the transmit buffer DMA maps.
1460 */
1461 WM_TXQUEUELEN(sc) =
1462 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1463 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1464 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1465 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1466 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1467 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1468 aprint_error_dev(sc->sc_dev,
1469 "unable to create Tx DMA map %d, error = %d\n",
1470 i, error);
1471 goto fail_4;
1472 }
1473 }
1474
1475 /*
1476 * Create the receive buffer DMA maps.
1477 */
1478 for (i = 0; i < WM_NRXDESC; i++) {
1479 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1480 MCLBYTES, 0, 0,
1481 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1482 aprint_error_dev(sc->sc_dev,
1483 "unable to create Rx DMA map %d error = %d\n",
1484 i, error);
1485 goto fail_5;
1486 }
1487 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1488 }
1489
1490 /* clear interesting stat counters */
1491 CSR_READ(sc, WMREG_COLC);
1492 CSR_READ(sc, WMREG_RXERRC);
1493
1494 /* get PHY control from SMBus to PCIe */
1495 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1496 wm_smbustopci(sc);
1497
1498 /*
1499 * Reset the chip to a known state.
1500 */
1501 wm_reset(sc);
1502
1503 switch (sc->sc_type) {
1504 case WM_T_82571:
1505 case WM_T_82572:
1506 case WM_T_82573:
1507 case WM_T_82574:
1508 case WM_T_82583:
1509 case WM_T_80003:
1510 case WM_T_ICH8:
1511 case WM_T_ICH9:
1512 case WM_T_ICH10:
1513 case WM_T_PCH:
1514 case WM_T_PCH2:
1515 if (wm_check_mng_mode(sc) != 0)
1516 wm_get_hw_control(sc);
1517 break;
1518 default:
1519 break;
1520 }
1521
1522 /*
1523 * Get some information about the EEPROM.
1524 */
1525 switch (sc->sc_type) {
1526 case WM_T_82542_2_0:
1527 case WM_T_82542_2_1:
1528 case WM_T_82543:
1529 case WM_T_82544:
1530 /* Microwire */
1531 sc->sc_ee_addrbits = 6;
1532 break;
1533 case WM_T_82540:
1534 case WM_T_82545:
1535 case WM_T_82545_3:
1536 case WM_T_82546:
1537 case WM_T_82546_3:
1538 /* Microwire */
1539 reg = CSR_READ(sc, WMREG_EECD);
1540 if (reg & EECD_EE_SIZE)
1541 sc->sc_ee_addrbits = 8;
1542 else
1543 sc->sc_ee_addrbits = 6;
1544 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1545 break;
1546 case WM_T_82541:
1547 case WM_T_82541_2:
1548 case WM_T_82547:
1549 case WM_T_82547_2:
1550 reg = CSR_READ(sc, WMREG_EECD);
1551 if (reg & EECD_EE_TYPE) {
1552 /* SPI */
1553 wm_set_spiaddrbits(sc);
1554 } else
1555 /* Microwire */
1556 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1557 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1558 break;
1559 case WM_T_82571:
1560 case WM_T_82572:
1561 /* SPI */
1562 wm_set_spiaddrbits(sc);
1563 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1564 break;
1565 case WM_T_82573:
1566 case WM_T_82574:
1567 case WM_T_82583:
1568 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1569 sc->sc_flags |= WM_F_EEPROM_FLASH;
1570 else {
1571 /* SPI */
1572 wm_set_spiaddrbits(sc);
1573 }
1574 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1575 break;
1576 case WM_T_82575:
1577 case WM_T_82576:
1578 case WM_T_82580:
1579 case WM_T_82580ER:
1580 case WM_T_I350:
1581 case WM_T_80003:
1582 /* SPI */
1583 wm_set_spiaddrbits(sc);
1584 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1585 break;
1586 case WM_T_ICH8:
1587 case WM_T_ICH9:
1588 case WM_T_ICH10:
1589 case WM_T_PCH:
1590 case WM_T_PCH2:
1591 /* FLASH */
1592 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1593 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1594 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1595 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1596 aprint_error_dev(sc->sc_dev,
1597 "can't map FLASH registers\n");
1598 return;
1599 }
1600 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1601 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1602 ICH_FLASH_SECTOR_SIZE;
1603 sc->sc_ich8_flash_bank_size =
1604 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1605 sc->sc_ich8_flash_bank_size -=
1606 (reg & ICH_GFPREG_BASE_MASK);
1607 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1608 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1609 break;
1610 default:
1611 break;
1612 }
1613
1614 /*
1615 * Defer printing the EEPROM type until after verifying the checksum
1616 * This allows the EEPROM type to be printed correctly in the case
1617 * that no EEPROM is attached.
1618 */
1619 /*
1620 * Validate the EEPROM checksum. If the checksum fails, flag
1621 * this for later, so we can fail future reads from the EEPROM.
1622 */
1623 if (wm_validate_eeprom_checksum(sc)) {
1624 /*
1625 * Read twice again because some PCI-e parts fail the
1626 * first check due to the link being in sleep state.
1627 */
1628 if (wm_validate_eeprom_checksum(sc))
1629 sc->sc_flags |= WM_F_EEPROM_INVALID;
1630 }
1631
1632 /* Set device properties (macflags) */
1633 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1634
1635 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1636 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1637 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1638 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1639 } else {
1640 if (sc->sc_flags & WM_F_EEPROM_SPI)
1641 eetype = "SPI";
1642 else
1643 eetype = "MicroWire";
1644 aprint_verbose_dev(sc->sc_dev,
1645 "%u word (%d address bits) %s EEPROM\n",
1646 1U << sc->sc_ee_addrbits,
1647 sc->sc_ee_addrbits, eetype);
1648 }
1649
1650 /*
1651 * Read the Ethernet address from the EEPROM, if not first found
1652 * in device properties.
1653 */
1654 ea = prop_dictionary_get(dict, "mac-address");
1655 if (ea != NULL) {
1656 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1657 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1658 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1659 } else {
1660 if (wm_read_mac_addr(sc, enaddr) != 0) {
1661 aprint_error_dev(sc->sc_dev,
1662 "unable to read Ethernet address\n");
1663 return;
1664 }
1665 }
1666
1667 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1668 ether_sprintf(enaddr));
1669
1670 /*
1671 * Read the config info from the EEPROM, and set up various
1672 * bits in the control registers based on their contents.
1673 */
1674 pn = prop_dictionary_get(dict, "i82543-cfg1");
1675 if (pn != NULL) {
1676 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1677 cfg1 = (uint16_t) prop_number_integer_value(pn);
1678 } else {
1679 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1680 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1681 return;
1682 }
1683 }
1684
1685 pn = prop_dictionary_get(dict, "i82543-cfg2");
1686 if (pn != NULL) {
1687 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1688 cfg2 = (uint16_t) prop_number_integer_value(pn);
1689 } else {
1690 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1691 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1692 return;
1693 }
1694 }
1695
1696 /* check for WM_F_WOL */
1697 switch (sc->sc_type) {
1698 case WM_T_82542_2_0:
1699 case WM_T_82542_2_1:
1700 case WM_T_82543:
1701 /* dummy? */
1702 eeprom_data = 0;
1703 apme_mask = EEPROM_CFG3_APME;
1704 break;
1705 case WM_T_82544:
1706 apme_mask = EEPROM_CFG2_82544_APM_EN;
1707 eeprom_data = cfg2;
1708 break;
1709 case WM_T_82546:
1710 case WM_T_82546_3:
1711 case WM_T_82571:
1712 case WM_T_82572:
1713 case WM_T_82573:
1714 case WM_T_82574:
1715 case WM_T_82583:
1716 case WM_T_80003:
1717 default:
1718 apme_mask = EEPROM_CFG3_APME;
1719 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1720 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1721 break;
1722 case WM_T_82575:
1723 case WM_T_82576:
1724 case WM_T_82580:
1725 case WM_T_82580ER:
1726 case WM_T_I350:
1727 case WM_T_ICH8:
1728 case WM_T_ICH9:
1729 case WM_T_ICH10:
1730 case WM_T_PCH:
1731 case WM_T_PCH2:
1732 /* XXX The funcid should be checked on some devices */
1733 apme_mask = WUC_APME;
1734 eeprom_data = CSR_READ(sc, WMREG_WUC);
1735 break;
1736 }
1737
1738 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1739 if ((eeprom_data & apme_mask) != 0)
1740 sc->sc_flags |= WM_F_WOL;
1741 #ifdef WM_DEBUG
1742 if ((sc->sc_flags & WM_F_WOL) != 0)
1743 printf("WOL\n");
1744 #endif
1745
1746 /*
1747 * XXX need special handling for some multiple port cards
1748 * to disable a paticular port.
1749 */
1750
1751 if (sc->sc_type >= WM_T_82544) {
1752 pn = prop_dictionary_get(dict, "i82543-swdpin");
1753 if (pn != NULL) {
1754 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1755 swdpin = (uint16_t) prop_number_integer_value(pn);
1756 } else {
1757 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1758 aprint_error_dev(sc->sc_dev,
1759 "unable to read SWDPIN\n");
1760 return;
1761 }
1762 }
1763 }
1764
1765 if (cfg1 & EEPROM_CFG1_ILOS)
1766 sc->sc_ctrl |= CTRL_ILOS;
1767 if (sc->sc_type >= WM_T_82544) {
1768 sc->sc_ctrl |=
1769 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1770 CTRL_SWDPIO_SHIFT;
1771 sc->sc_ctrl |=
1772 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1773 CTRL_SWDPINS_SHIFT;
1774 } else {
1775 sc->sc_ctrl |=
1776 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1777 CTRL_SWDPIO_SHIFT;
1778 }
1779
1780 #if 0
1781 if (sc->sc_type >= WM_T_82544) {
1782 if (cfg1 & EEPROM_CFG1_IPS0)
1783 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1784 if (cfg1 & EEPROM_CFG1_IPS1)
1785 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1786 sc->sc_ctrl_ext |=
1787 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1788 CTRL_EXT_SWDPIO_SHIFT;
1789 sc->sc_ctrl_ext |=
1790 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1791 CTRL_EXT_SWDPINS_SHIFT;
1792 } else {
1793 sc->sc_ctrl_ext |=
1794 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1795 CTRL_EXT_SWDPIO_SHIFT;
1796 }
1797 #endif
1798
1799 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1800 #if 0
1801 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1802 #endif
1803
1804 /*
1805 * Set up some register offsets that are different between
1806 * the i82542 and the i82543 and later chips.
1807 */
1808 if (sc->sc_type < WM_T_82543) {
1809 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1810 sc->sc_tdt_reg = WMREG_OLD_TDT;
1811 } else {
1812 sc->sc_rdt_reg = WMREG_RDT;
1813 sc->sc_tdt_reg = WMREG_TDT;
1814 }
1815
1816 if (sc->sc_type == WM_T_PCH) {
1817 uint16_t val;
1818
1819 /* Save the NVM K1 bit setting */
1820 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1821
1822 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1823 sc->sc_nvm_k1_enabled = 1;
1824 else
1825 sc->sc_nvm_k1_enabled = 0;
1826 }
1827
1828 /*
1829 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1830 * media structures accordingly.
1831 */
1832 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1833 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1834 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1835 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1836 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1837 wm_gmii_mediainit(sc, wmp->wmp_product);
1838 } else if (sc->sc_type < WM_T_82543 ||
1839 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1840 if (wmp->wmp_flags & WMP_F_1000T)
1841 aprint_error_dev(sc->sc_dev,
1842 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1843 wm_tbi_mediainit(sc);
1844 } else {
1845 switch (sc->sc_type) {
1846 case WM_T_82575:
1847 case WM_T_82576:
1848 case WM_T_82580:
1849 case WM_T_82580ER:
1850 case WM_T_I350:
1851 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1852 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1853 case CTRL_EXT_LINK_MODE_SGMII:
1854 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1855 sc->sc_flags |= WM_F_SGMII;
1856 CSR_WRITE(sc, WMREG_CTRL_EXT,
1857 reg | CTRL_EXT_I2C_ENA);
1858 wm_gmii_mediainit(sc, wmp->wmp_product);
1859 break;
1860 case CTRL_EXT_LINK_MODE_1000KX:
1861 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1862 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1863 CSR_WRITE(sc, WMREG_CTRL_EXT,
1864 reg | CTRL_EXT_I2C_ENA);
1865 panic("not supported yet\n");
1866 break;
1867 case CTRL_EXT_LINK_MODE_GMII:
1868 default:
1869 CSR_WRITE(sc, WMREG_CTRL_EXT,
1870 reg & ~CTRL_EXT_I2C_ENA);
1871 wm_gmii_mediainit(sc, wmp->wmp_product);
1872 break;
1873 }
1874 break;
1875 default:
1876 if (wmp->wmp_flags & WMP_F_1000X)
1877 aprint_error_dev(sc->sc_dev,
1878 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1879 wm_gmii_mediainit(sc, wmp->wmp_product);
1880 }
1881 }
1882
1883 ifp = &sc->sc_ethercom.ec_if;
1884 xname = device_xname(sc->sc_dev);
1885 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1886 ifp->if_softc = sc;
1887 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1888 ifp->if_ioctl = wm_ioctl;
1889 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1890 ifp->if_start = wm_nq_start;
1891 else
1892 ifp->if_start = wm_start;
1893 ifp->if_watchdog = wm_watchdog;
1894 ifp->if_init = wm_init;
1895 ifp->if_stop = wm_stop;
1896 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1897 IFQ_SET_READY(&ifp->if_snd);
1898
1899 /* Check for jumbo frame */
1900 switch (sc->sc_type) {
1901 case WM_T_82573:
1902 /* XXX limited to 9234 if ASPM is disabled */
1903 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1904 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1905 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1906 break;
1907 case WM_T_82571:
1908 case WM_T_82572:
1909 case WM_T_82574:
1910 case WM_T_82575:
1911 case WM_T_82576:
1912 case WM_T_82580:
1913 case WM_T_82580ER:
1914 case WM_T_I350:
1915 case WM_T_80003:
1916 case WM_T_ICH9:
1917 case WM_T_ICH10:
1918 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1919 /* XXX limited to 9234 */
1920 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1921 break;
1922 case WM_T_PCH:
1923 /* XXX limited to 4096 */
1924 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1925 break;
1926 case WM_T_82542_2_0:
1927 case WM_T_82542_2_1:
1928 case WM_T_82583:
1929 case WM_T_ICH8:
1930 /* No support for jumbo frame */
1931 break;
1932 default:
1933 /* ETHER_MAX_LEN_JUMBO */
1934 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1935 break;
1936 }
1937
1938 /*
1939 * If we're a i82543 or greater, we can support VLANs.
1940 */
1941 if (sc->sc_type >= WM_T_82543)
1942 sc->sc_ethercom.ec_capabilities |=
1943 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1944
1945 /*
1946 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1947 * on i82543 and later.
1948 */
1949 if (sc->sc_type >= WM_T_82543) {
1950 ifp->if_capabilities |=
1951 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1952 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1953 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1954 IFCAP_CSUM_TCPv6_Tx |
1955 IFCAP_CSUM_UDPv6_Tx;
1956 }
1957
1958 /*
1959 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1960 *
1961 * 82541GI (8086:1076) ... no
1962 * 82572EI (8086:10b9) ... yes
1963 */
1964 if (sc->sc_type >= WM_T_82571) {
1965 ifp->if_capabilities |=
1966 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1967 }
1968
1969 /*
1970 * If we're a i82544 or greater (except i82547), we can do
1971 * TCP segmentation offload.
1972 */
1973 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1974 ifp->if_capabilities |= IFCAP_TSOv4;
1975 }
1976
1977 if (sc->sc_type >= WM_T_82571) {
1978 ifp->if_capabilities |= IFCAP_TSOv6;
1979 }
1980
1981 /*
1982 * Attach the interface.
1983 */
1984 if_attach(ifp);
1985 ether_ifattach(ifp, enaddr);
1986 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1987 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1988
1989 #ifdef WM_EVENT_COUNTERS
1990 /* Attach event counters. */
1991 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1992 NULL, xname, "txsstall");
1993 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1994 NULL, xname, "txdstall");
1995 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1996 NULL, xname, "txfifo_stall");
1997 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1998 NULL, xname, "txdw");
1999 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2000 NULL, xname, "txqe");
2001 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2002 NULL, xname, "rxintr");
2003 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2004 NULL, xname, "linkintr");
2005
2006 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2007 NULL, xname, "rxipsum");
2008 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2009 NULL, xname, "rxtusum");
2010 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2011 NULL, xname, "txipsum");
2012 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2013 NULL, xname, "txtusum");
2014 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2015 NULL, xname, "txtusum6");
2016
2017 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2018 NULL, xname, "txtso");
2019 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2020 NULL, xname, "txtso6");
2021 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2022 NULL, xname, "txtsopain");
2023
2024 for (i = 0; i < WM_NTXSEGS; i++) {
2025 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2026 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2027 NULL, xname, wm_txseg_evcnt_names[i]);
2028 }
2029
2030 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2031 NULL, xname, "txdrop");
2032
2033 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2034 NULL, xname, "tu");
2035
2036 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2037 NULL, xname, "tx_xoff");
2038 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2039 NULL, xname, "tx_xon");
2040 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2041 NULL, xname, "rx_xoff");
2042 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2043 NULL, xname, "rx_xon");
2044 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2045 NULL, xname, "rx_macctl");
2046 #endif /* WM_EVENT_COUNTERS */
2047
2048 if (pmf_device_register(self, wm_suspend, wm_resume))
2049 pmf_class_network_register(self, ifp);
2050 else
2051 aprint_error_dev(self, "couldn't establish power handler\n");
2052
2053 return;
2054
2055 /*
2056 * Free any resources we've allocated during the failed attach
2057 * attempt. Do this in reverse order and fall through.
2058 */
2059 fail_5:
2060 for (i = 0; i < WM_NRXDESC; i++) {
2061 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2062 bus_dmamap_destroy(sc->sc_dmat,
2063 sc->sc_rxsoft[i].rxs_dmamap);
2064 }
2065 fail_4:
2066 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2067 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2068 bus_dmamap_destroy(sc->sc_dmat,
2069 sc->sc_txsoft[i].txs_dmamap);
2070 }
2071 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2072 fail_3:
2073 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2074 fail_2:
2075 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2076 sc->sc_cd_size);
2077 fail_1:
2078 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2079 fail_0:
2080 return;
2081 }
2082
2083 static int
2084 wm_detach(device_t self, int flags __unused)
2085 {
2086 struct wm_softc *sc = device_private(self);
2087 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2088 int i, s;
2089
2090 s = splnet();
2091 /* Stop the interface. Callouts are stopped in it. */
2092 wm_stop(ifp, 1);
2093 splx(s);
2094
2095 pmf_device_deregister(self);
2096
2097 /* Tell the firmware about the release */
2098 wm_release_manageability(sc);
2099 wm_release_hw_control(sc);
2100
2101 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2102
2103 /* Delete all remaining media. */
2104 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2105
2106 ether_ifdetach(ifp);
2107 if_detach(ifp);
2108
2109
2110 /* Unload RX dmamaps and free mbufs */
2111 wm_rxdrain(sc);
2112
2113 /* Free dmamap. It's the same as the end of the wm_attach() function */
2114 for (i = 0; i < WM_NRXDESC; i++) {
2115 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2116 bus_dmamap_destroy(sc->sc_dmat,
2117 sc->sc_rxsoft[i].rxs_dmamap);
2118 }
2119 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2120 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2121 bus_dmamap_destroy(sc->sc_dmat,
2122 sc->sc_txsoft[i].txs_dmamap);
2123 }
2124 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2125 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2126 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2127 sc->sc_cd_size);
2128 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2129
2130 /* Disestablish the interrupt handler */
2131 if (sc->sc_ih != NULL) {
2132 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2133 sc->sc_ih = NULL;
2134 }
2135
2136 /* Unmap the registers */
2137 if (sc->sc_ss) {
2138 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2139 sc->sc_ss = 0;
2140 }
2141
2142 if (sc->sc_ios) {
2143 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2144 sc->sc_ios = 0;
2145 }
2146
2147 return 0;
2148 }
2149
2150 /*
2151 * wm_tx_offload:
2152 *
2153 * Set up TCP/IP checksumming parameters for the
2154 * specified packet.
2155 */
2156 static int
2157 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2158 uint8_t *fieldsp)
2159 {
2160 struct mbuf *m0 = txs->txs_mbuf;
2161 struct livengood_tcpip_ctxdesc *t;
2162 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2163 uint32_t ipcse;
2164 struct ether_header *eh;
2165 int offset, iphl;
2166 uint8_t fields;
2167
2168 /*
2169 * XXX It would be nice if the mbuf pkthdr had offset
2170 * fields for the protocol headers.
2171 */
2172
2173 eh = mtod(m0, struct ether_header *);
2174 switch (htons(eh->ether_type)) {
2175 case ETHERTYPE_IP:
2176 case ETHERTYPE_IPV6:
2177 offset = ETHER_HDR_LEN;
2178 break;
2179
2180 case ETHERTYPE_VLAN:
2181 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2182 break;
2183
2184 default:
2185 /*
2186 * Don't support this protocol or encapsulation.
2187 */
2188 *fieldsp = 0;
2189 *cmdp = 0;
2190 return 0;
2191 }
2192
2193 if ((m0->m_pkthdr.csum_flags &
2194 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2195 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2196 } else {
2197 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2198 }
2199 ipcse = offset + iphl - 1;
2200
2201 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2202 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2203 seg = 0;
2204 fields = 0;
2205
2206 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2207 int hlen = offset + iphl;
2208 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2209
2210 if (__predict_false(m0->m_len <
2211 (hlen + sizeof(struct tcphdr)))) {
2212 /*
2213 * TCP/IP headers are not in the first mbuf; we need
2214 * to do this the slow and painful way. Let's just
2215 * hope this doesn't happen very often.
2216 */
2217 struct tcphdr th;
2218
2219 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2220
2221 m_copydata(m0, hlen, sizeof(th), &th);
2222 if (v4) {
2223 struct ip ip;
2224
2225 m_copydata(m0, offset, sizeof(ip), &ip);
2226 ip.ip_len = 0;
2227 m_copyback(m0,
2228 offset + offsetof(struct ip, ip_len),
2229 sizeof(ip.ip_len), &ip.ip_len);
2230 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2231 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2232 } else {
2233 struct ip6_hdr ip6;
2234
2235 m_copydata(m0, offset, sizeof(ip6), &ip6);
2236 ip6.ip6_plen = 0;
2237 m_copyback(m0,
2238 offset + offsetof(struct ip6_hdr, ip6_plen),
2239 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2240 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2241 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2242 }
2243 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2244 sizeof(th.th_sum), &th.th_sum);
2245
2246 hlen += th.th_off << 2;
2247 } else {
2248 /*
2249 * TCP/IP headers are in the first mbuf; we can do
2250 * this the easy way.
2251 */
2252 struct tcphdr *th;
2253
2254 if (v4) {
2255 struct ip *ip =
2256 (void *)(mtod(m0, char *) + offset);
2257 th = (void *)(mtod(m0, char *) + hlen);
2258
2259 ip->ip_len = 0;
2260 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2261 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2262 } else {
2263 struct ip6_hdr *ip6 =
2264 (void *)(mtod(m0, char *) + offset);
2265 th = (void *)(mtod(m0, char *) + hlen);
2266
2267 ip6->ip6_plen = 0;
2268 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2269 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2270 }
2271 hlen += th->th_off << 2;
2272 }
2273
2274 if (v4) {
2275 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2276 cmdlen |= WTX_TCPIP_CMD_IP;
2277 } else {
2278 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2279 ipcse = 0;
2280 }
2281 cmd |= WTX_TCPIP_CMD_TSE;
2282 cmdlen |= WTX_TCPIP_CMD_TSE |
2283 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2284 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2285 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2286 }
2287
2288 /*
2289 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2290 * offload feature, if we load the context descriptor, we
2291 * MUST provide valid values for IPCSS and TUCSS fields.
2292 */
2293
2294 ipcs = WTX_TCPIP_IPCSS(offset) |
2295 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2296 WTX_TCPIP_IPCSE(ipcse);
2297 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2298 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2299 fields |= WTX_IXSM;
2300 }
2301
2302 offset += iphl;
2303
2304 if (m0->m_pkthdr.csum_flags &
2305 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2306 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2307 fields |= WTX_TXSM;
2308 tucs = WTX_TCPIP_TUCSS(offset) |
2309 WTX_TCPIP_TUCSO(offset +
2310 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2311 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2312 } else if ((m0->m_pkthdr.csum_flags &
2313 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2314 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2315 fields |= WTX_TXSM;
2316 tucs = WTX_TCPIP_TUCSS(offset) |
2317 WTX_TCPIP_TUCSO(offset +
2318 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2319 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2320 } else {
2321 /* Just initialize it to a valid TCP context. */
2322 tucs = WTX_TCPIP_TUCSS(offset) |
2323 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2324 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2325 }
2326
2327 /* Fill in the context descriptor. */
2328 t = (struct livengood_tcpip_ctxdesc *)
2329 &sc->sc_txdescs[sc->sc_txnext];
2330 t->tcpip_ipcs = htole32(ipcs);
2331 t->tcpip_tucs = htole32(tucs);
2332 t->tcpip_cmdlen = htole32(cmdlen);
2333 t->tcpip_seg = htole32(seg);
2334 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2335
2336 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2337 txs->txs_ndesc++;
2338
2339 *cmdp = cmd;
2340 *fieldsp = fields;
2341
2342 return 0;
2343 }
2344
2345 static void
2346 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2347 {
2348 struct mbuf *m;
2349 int i;
2350
2351 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2352 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2353 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2354 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2355 m->m_data, m->m_len, m->m_flags);
2356 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2357 i, i == 1 ? "" : "s");
2358 }
2359
2360 /*
2361 * wm_82547_txfifo_stall:
2362 *
2363 * Callout used to wait for the 82547 Tx FIFO to drain,
2364 * reset the FIFO pointers, and restart packet transmission.
2365 */
2366 static void
2367 wm_82547_txfifo_stall(void *arg)
2368 {
2369 struct wm_softc *sc = arg;
2370 int s;
2371
2372 s = splnet();
2373
2374 if (sc->sc_txfifo_stall) {
2375 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2376 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2377 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2378 /*
2379 * Packets have drained. Stop transmitter, reset
2380 * FIFO pointers, restart transmitter, and kick
2381 * the packet queue.
2382 */
2383 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2384 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2385 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2386 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2387 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2388 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2389 CSR_WRITE(sc, WMREG_TCTL, tctl);
2390 CSR_WRITE_FLUSH(sc);
2391
2392 sc->sc_txfifo_head = 0;
2393 sc->sc_txfifo_stall = 0;
2394 wm_start(&sc->sc_ethercom.ec_if);
2395 } else {
2396 /*
2397 * Still waiting for packets to drain; try again in
2398 * another tick.
2399 */
2400 callout_schedule(&sc->sc_txfifo_ch, 1);
2401 }
2402 }
2403
2404 splx(s);
2405 }
2406
2407 static void
2408 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2409 {
2410 uint32_t reg;
2411
2412 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2413
2414 if (on != 0)
2415 reg |= EXTCNFCTR_GATE_PHY_CFG;
2416 else
2417 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2418
2419 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2420 }
2421
2422 /*
2423 * wm_82547_txfifo_bugchk:
2424 *
2425 * Check for bug condition in the 82547 Tx FIFO. We need to
2426 * prevent enqueueing a packet that would wrap around the end
2427 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2428 *
2429 * We do this by checking the amount of space before the end
2430 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2431 * the Tx FIFO, wait for all remaining packets to drain, reset
2432 * the internal FIFO pointers to the beginning, and restart
2433 * transmission on the interface.
2434 */
2435 #define WM_FIFO_HDR 0x10
2436 #define WM_82547_PAD_LEN 0x3e0
2437 static int
2438 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2439 {
2440 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2441 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2442
2443 /* Just return if already stalled. */
2444 if (sc->sc_txfifo_stall)
2445 return 1;
2446
2447 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2448 /* Stall only occurs in half-duplex mode. */
2449 goto send_packet;
2450 }
2451
2452 if (len >= WM_82547_PAD_LEN + space) {
2453 sc->sc_txfifo_stall = 1;
2454 callout_schedule(&sc->sc_txfifo_ch, 1);
2455 return 1;
2456 }
2457
2458 send_packet:
2459 sc->sc_txfifo_head += len;
2460 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2461 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2462
2463 return 0;
2464 }
2465
2466 /*
2467 * wm_start: [ifnet interface function]
2468 *
2469 * Start packet transmission on the interface.
2470 */
2471 static void
2472 wm_start(struct ifnet *ifp)
2473 {
2474 struct wm_softc *sc = ifp->if_softc;
2475 struct mbuf *m0;
2476 struct m_tag *mtag;
2477 struct wm_txsoft *txs;
2478 bus_dmamap_t dmamap;
2479 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2480 bus_addr_t curaddr;
2481 bus_size_t seglen, curlen;
2482 uint32_t cksumcmd;
2483 uint8_t cksumfields;
2484
2485 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2486 return;
2487
2488 /*
2489 * Remember the previous number of free descriptors.
2490 */
2491 ofree = sc->sc_txfree;
2492
2493 /*
2494 * Loop through the send queue, setting up transmit descriptors
2495 * until we drain the queue, or use up all available transmit
2496 * descriptors.
2497 */
2498 for (;;) {
2499 /* Grab a packet off the queue. */
2500 IFQ_POLL(&ifp->if_snd, m0);
2501 if (m0 == NULL)
2502 break;
2503
2504 DPRINTF(WM_DEBUG_TX,
2505 ("%s: TX: have packet to transmit: %p\n",
2506 device_xname(sc->sc_dev), m0));
2507
2508 /* Get a work queue entry. */
2509 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2510 wm_txintr(sc);
2511 if (sc->sc_txsfree == 0) {
2512 DPRINTF(WM_DEBUG_TX,
2513 ("%s: TX: no free job descriptors\n",
2514 device_xname(sc->sc_dev)));
2515 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2516 break;
2517 }
2518 }
2519
2520 txs = &sc->sc_txsoft[sc->sc_txsnext];
2521 dmamap = txs->txs_dmamap;
2522
2523 use_tso = (m0->m_pkthdr.csum_flags &
2524 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2525
2526 /*
2527 * So says the Linux driver:
2528 * The controller does a simple calculation to make sure
2529 * there is enough room in the FIFO before initiating the
2530 * DMA for each buffer. The calc is:
2531 * 4 = ceil(buffer len / MSS)
2532 * To make sure we don't overrun the FIFO, adjust the max
2533 * buffer len if the MSS drops.
2534 */
2535 dmamap->dm_maxsegsz =
2536 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2537 ? m0->m_pkthdr.segsz << 2
2538 : WTX_MAX_LEN;
2539
2540 /*
2541 * Load the DMA map. If this fails, the packet either
2542 * didn't fit in the allotted number of segments, or we
2543 * were short on resources. For the too-many-segments
2544 * case, we simply report an error and drop the packet,
2545 * since we can't sanely copy a jumbo packet to a single
2546 * buffer.
2547 */
2548 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2549 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2550 if (error) {
2551 if (error == EFBIG) {
2552 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2553 log(LOG_ERR, "%s: Tx packet consumes too many "
2554 "DMA segments, dropping...\n",
2555 device_xname(sc->sc_dev));
2556 IFQ_DEQUEUE(&ifp->if_snd, m0);
2557 wm_dump_mbuf_chain(sc, m0);
2558 m_freem(m0);
2559 continue;
2560 }
2561 /*
2562 * Short on resources, just stop for now.
2563 */
2564 DPRINTF(WM_DEBUG_TX,
2565 ("%s: TX: dmamap load failed: %d\n",
2566 device_xname(sc->sc_dev), error));
2567 break;
2568 }
2569
2570 segs_needed = dmamap->dm_nsegs;
2571 if (use_tso) {
2572 /* For sentinel descriptor; see below. */
2573 segs_needed++;
2574 }
2575
2576 /*
2577 * Ensure we have enough descriptors free to describe
2578 * the packet. Note, we always reserve one descriptor
2579 * at the end of the ring due to the semantics of the
2580 * TDT register, plus one more in the event we need
2581 * to load offload context.
2582 */
2583 if (segs_needed > sc->sc_txfree - 2) {
2584 /*
2585 * Not enough free descriptors to transmit this
2586 * packet. We haven't committed anything yet,
2587 * so just unload the DMA map, put the packet
2588 * pack on the queue, and punt. Notify the upper
2589 * layer that there are no more slots left.
2590 */
2591 DPRINTF(WM_DEBUG_TX,
2592 ("%s: TX: need %d (%d) descriptors, have %d\n",
2593 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2594 segs_needed, sc->sc_txfree - 1));
2595 ifp->if_flags |= IFF_OACTIVE;
2596 bus_dmamap_unload(sc->sc_dmat, dmamap);
2597 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2598 break;
2599 }
2600
2601 /*
2602 * Check for 82547 Tx FIFO bug. We need to do this
2603 * once we know we can transmit the packet, since we
2604 * do some internal FIFO space accounting here.
2605 */
2606 if (sc->sc_type == WM_T_82547 &&
2607 wm_82547_txfifo_bugchk(sc, m0)) {
2608 DPRINTF(WM_DEBUG_TX,
2609 ("%s: TX: 82547 Tx FIFO bug detected\n",
2610 device_xname(sc->sc_dev)));
2611 ifp->if_flags |= IFF_OACTIVE;
2612 bus_dmamap_unload(sc->sc_dmat, dmamap);
2613 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2614 break;
2615 }
2616
2617 IFQ_DEQUEUE(&ifp->if_snd, m0);
2618
2619 /*
2620 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2621 */
2622
2623 DPRINTF(WM_DEBUG_TX,
2624 ("%s: TX: packet has %d (%d) DMA segments\n",
2625 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2626
2627 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2628
2629 /*
2630 * Store a pointer to the packet so that we can free it
2631 * later.
2632 *
2633 * Initially, we consider the number of descriptors the
2634 * packet uses the number of DMA segments. This may be
2635 * incremented by 1 if we do checksum offload (a descriptor
2636 * is used to set the checksum context).
2637 */
2638 txs->txs_mbuf = m0;
2639 txs->txs_firstdesc = sc->sc_txnext;
2640 txs->txs_ndesc = segs_needed;
2641
2642 /* Set up offload parameters for this packet. */
2643 if (m0->m_pkthdr.csum_flags &
2644 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2645 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2646 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2647 if (wm_tx_offload(sc, txs, &cksumcmd,
2648 &cksumfields) != 0) {
2649 /* Error message already displayed. */
2650 bus_dmamap_unload(sc->sc_dmat, dmamap);
2651 continue;
2652 }
2653 } else {
2654 cksumcmd = 0;
2655 cksumfields = 0;
2656 }
2657
2658 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2659
2660 /* Sync the DMA map. */
2661 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2662 BUS_DMASYNC_PREWRITE);
2663
2664 /*
2665 * Initialize the transmit descriptor.
2666 */
2667 for (nexttx = sc->sc_txnext, seg = 0;
2668 seg < dmamap->dm_nsegs; seg++) {
2669 for (seglen = dmamap->dm_segs[seg].ds_len,
2670 curaddr = dmamap->dm_segs[seg].ds_addr;
2671 seglen != 0;
2672 curaddr += curlen, seglen -= curlen,
2673 nexttx = WM_NEXTTX(sc, nexttx)) {
2674 curlen = seglen;
2675
2676 /*
2677 * So says the Linux driver:
2678 * Work around for premature descriptor
2679 * write-backs in TSO mode. Append a
2680 * 4-byte sentinel descriptor.
2681 */
2682 if (use_tso &&
2683 seg == dmamap->dm_nsegs - 1 &&
2684 curlen > 8)
2685 curlen -= 4;
2686
2687 wm_set_dma_addr(
2688 &sc->sc_txdescs[nexttx].wtx_addr,
2689 curaddr);
2690 sc->sc_txdescs[nexttx].wtx_cmdlen =
2691 htole32(cksumcmd | curlen);
2692 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2693 0;
2694 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2695 cksumfields;
2696 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2697 lasttx = nexttx;
2698
2699 DPRINTF(WM_DEBUG_TX,
2700 ("%s: TX: desc %d: low %#" PRIx64 ", "
2701 "len %#04zx\n",
2702 device_xname(sc->sc_dev), nexttx,
2703 (uint64_t)curaddr, curlen));
2704 }
2705 }
2706
2707 KASSERT(lasttx != -1);
2708
2709 /*
2710 * Set up the command byte on the last descriptor of
2711 * the packet. If we're in the interrupt delay window,
2712 * delay the interrupt.
2713 */
2714 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2715 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2716
2717 /*
2718 * If VLANs are enabled and the packet has a VLAN tag, set
2719 * up the descriptor to encapsulate the packet for us.
2720 *
2721 * This is only valid on the last descriptor of the packet.
2722 */
2723 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2724 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2725 htole32(WTX_CMD_VLE);
2726 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2727 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2728 }
2729
2730 txs->txs_lastdesc = lasttx;
2731
2732 DPRINTF(WM_DEBUG_TX,
2733 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2734 device_xname(sc->sc_dev),
2735 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2736
2737 /* Sync the descriptors we're using. */
2738 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2739 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2740
2741 /* Give the packet to the chip. */
2742 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2743
2744 DPRINTF(WM_DEBUG_TX,
2745 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2746
2747 DPRINTF(WM_DEBUG_TX,
2748 ("%s: TX: finished transmitting packet, job %d\n",
2749 device_xname(sc->sc_dev), sc->sc_txsnext));
2750
2751 /* Advance the tx pointer. */
2752 sc->sc_txfree -= txs->txs_ndesc;
2753 sc->sc_txnext = nexttx;
2754
2755 sc->sc_txsfree--;
2756 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2757
2758 /* Pass the packet to any BPF listeners. */
2759 bpf_mtap(ifp, m0);
2760 }
2761
2762 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2763 /* No more slots; notify upper layer. */
2764 ifp->if_flags |= IFF_OACTIVE;
2765 }
2766
2767 if (sc->sc_txfree != ofree) {
2768 /* Set a watchdog timer in case the chip flakes out. */
2769 ifp->if_timer = 5;
2770 }
2771 }
2772
2773 /*
2774 * wm_nq_tx_offload:
2775 *
2776 * Set up TCP/IP checksumming parameters for the
2777 * specified packet, for NEWQUEUE devices
2778 */
2779 static int
2780 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2781 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2782 {
2783 struct mbuf *m0 = txs->txs_mbuf;
2784 struct m_tag *mtag;
2785 uint32_t vl_len, mssidx, cmdc;
2786 struct ether_header *eh;
2787 int offset, iphl;
2788
2789 /*
2790 * XXX It would be nice if the mbuf pkthdr had offset
2791 * fields for the protocol headers.
2792 */
2793 *cmdlenp = 0;
2794 *fieldsp = 0;
2795
2796 eh = mtod(m0, struct ether_header *);
2797 switch (htons(eh->ether_type)) {
2798 case ETHERTYPE_IP:
2799 case ETHERTYPE_IPV6:
2800 offset = ETHER_HDR_LEN;
2801 break;
2802
2803 case ETHERTYPE_VLAN:
2804 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2805 break;
2806
2807 default:
2808 /*
2809 * Don't support this protocol or encapsulation.
2810 */
2811 *do_csum = false;
2812 return 0;
2813 }
2814 *do_csum = true;
2815 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2816 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2817
2818 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2819 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2820
2821 if ((m0->m_pkthdr.csum_flags &
2822 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2823 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2824 } else {
2825 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2826 }
2827 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2828 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2829
2830 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2831 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2832 << NQTXC_VLLEN_VLAN_SHIFT);
2833 *cmdlenp |= NQTX_CMD_VLE;
2834 }
2835
2836 mssidx = 0;
2837
2838 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2839 int hlen = offset + iphl;
2840 int tcp_hlen;
2841 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2842
2843 if (__predict_false(m0->m_len <
2844 (hlen + sizeof(struct tcphdr)))) {
2845 /*
2846 * TCP/IP headers are not in the first mbuf; we need
2847 * to do this the slow and painful way. Let's just
2848 * hope this doesn't happen very often.
2849 */
2850 struct tcphdr th;
2851
2852 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2853
2854 m_copydata(m0, hlen, sizeof(th), &th);
2855 if (v4) {
2856 struct ip ip;
2857
2858 m_copydata(m0, offset, sizeof(ip), &ip);
2859 ip.ip_len = 0;
2860 m_copyback(m0,
2861 offset + offsetof(struct ip, ip_len),
2862 sizeof(ip.ip_len), &ip.ip_len);
2863 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2864 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2865 } else {
2866 struct ip6_hdr ip6;
2867
2868 m_copydata(m0, offset, sizeof(ip6), &ip6);
2869 ip6.ip6_plen = 0;
2870 m_copyback(m0,
2871 offset + offsetof(struct ip6_hdr, ip6_plen),
2872 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2873 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2874 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2875 }
2876 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2877 sizeof(th.th_sum), &th.th_sum);
2878
2879 tcp_hlen = th.th_off << 2;
2880 } else {
2881 /*
2882 * TCP/IP headers are in the first mbuf; we can do
2883 * this the easy way.
2884 */
2885 struct tcphdr *th;
2886
2887 if (v4) {
2888 struct ip *ip =
2889 (void *)(mtod(m0, char *) + offset);
2890 th = (void *)(mtod(m0, char *) + hlen);
2891
2892 ip->ip_len = 0;
2893 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2894 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2895 } else {
2896 struct ip6_hdr *ip6 =
2897 (void *)(mtod(m0, char *) + offset);
2898 th = (void *)(mtod(m0, char *) + hlen);
2899
2900 ip6->ip6_plen = 0;
2901 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2902 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2903 }
2904 tcp_hlen = th->th_off << 2;
2905 }
2906 hlen += tcp_hlen;
2907 *cmdlenp |= NQTX_CMD_TSE;
2908
2909 if (v4) {
2910 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2911 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2912 } else {
2913 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2914 *fieldsp |= NQTXD_FIELDS_TUXSM;
2915 }
2916 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2917 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2918 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2919 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2920 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2921 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2922 } else {
2923 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2924 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2925 }
2926
2927 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2928 *fieldsp |= NQTXD_FIELDS_IXSM;
2929 cmdc |= NQTXC_CMD_IP4;
2930 }
2931
2932 if (m0->m_pkthdr.csum_flags &
2933 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2934 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2935 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2936 cmdc |= NQTXC_CMD_TCP;
2937 } else {
2938 cmdc |= NQTXC_CMD_UDP;
2939 }
2940 cmdc |= NQTXC_CMD_IP4;
2941 *fieldsp |= NQTXD_FIELDS_TUXSM;
2942 }
2943 if (m0->m_pkthdr.csum_flags &
2944 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2945 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2946 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2947 cmdc |= NQTXC_CMD_TCP;
2948 } else {
2949 cmdc |= NQTXC_CMD_UDP;
2950 }
2951 cmdc |= NQTXC_CMD_IP6;
2952 *fieldsp |= NQTXD_FIELDS_TUXSM;
2953 }
2954
2955 /* Fill in the context descriptor. */
2956 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
2957 htole32(vl_len);
2958 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
2959 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
2960 htole32(cmdc);
2961 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
2962 htole32(mssidx);
2963 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2964 DPRINTF(WM_DEBUG_TX,
2965 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
2966 sc->sc_txnext, 0, vl_len));
2967 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
2968 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2969 txs->txs_ndesc++;
2970 return 0;
2971 }
2972
2973 /*
2974 * wm_nq_start: [ifnet interface function]
2975 *
2976 * Start packet transmission on the interface for NEWQUEUE devices
2977 */
2978 static void
2979 wm_nq_start(struct ifnet *ifp)
2980 {
2981 struct wm_softc *sc = ifp->if_softc;
2982 struct mbuf *m0;
2983 struct m_tag *mtag;
2984 struct wm_txsoft *txs;
2985 bus_dmamap_t dmamap;
2986 int error, nexttx, lasttx = -1, seg, segs_needed;
2987 bool do_csum, sent;
2988
2989 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2990 return;
2991
2992 sent = false;
2993
2994 /*
2995 * Loop through the send queue, setting up transmit descriptors
2996 * until we drain the queue, or use up all available transmit
2997 * descriptors.
2998 */
2999 for (;;) {
3000 /* Grab a packet off the queue. */
3001 IFQ_POLL(&ifp->if_snd, m0);
3002 if (m0 == NULL)
3003 break;
3004
3005 DPRINTF(WM_DEBUG_TX,
3006 ("%s: TX: have packet to transmit: %p\n",
3007 device_xname(sc->sc_dev), m0));
3008
3009 /* Get a work queue entry. */
3010 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3011 wm_txintr(sc);
3012 if (sc->sc_txsfree == 0) {
3013 DPRINTF(WM_DEBUG_TX,
3014 ("%s: TX: no free job descriptors\n",
3015 device_xname(sc->sc_dev)));
3016 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3017 break;
3018 }
3019 }
3020
3021 txs = &sc->sc_txsoft[sc->sc_txsnext];
3022 dmamap = txs->txs_dmamap;
3023
3024 /*
3025 * Load the DMA map. If this fails, the packet either
3026 * didn't fit in the allotted number of segments, or we
3027 * were short on resources. For the too-many-segments
3028 * case, we simply report an error and drop the packet,
3029 * since we can't sanely copy a jumbo packet to a single
3030 * buffer.
3031 */
3032 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3033 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3034 if (error) {
3035 if (error == EFBIG) {
3036 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3037 log(LOG_ERR, "%s: Tx packet consumes too many "
3038 "DMA segments, dropping...\n",
3039 device_xname(sc->sc_dev));
3040 IFQ_DEQUEUE(&ifp->if_snd, m0);
3041 wm_dump_mbuf_chain(sc, m0);
3042 m_freem(m0);
3043 continue;
3044 }
3045 /*
3046 * Short on resources, just stop for now.
3047 */
3048 DPRINTF(WM_DEBUG_TX,
3049 ("%s: TX: dmamap load failed: %d\n",
3050 device_xname(sc->sc_dev), error));
3051 break;
3052 }
3053
3054 segs_needed = dmamap->dm_nsegs;
3055
3056 /*
3057 * Ensure we have enough descriptors free to describe
3058 * the packet. Note, we always reserve one descriptor
3059 * at the end of the ring due to the semantics of the
3060 * TDT register, plus one more in the event we need
3061 * to load offload context.
3062 */
3063 if (segs_needed > sc->sc_txfree - 2) {
3064 /*
3065 * Not enough free descriptors to transmit this
3066 * packet. We haven't committed anything yet,
3067 * so just unload the DMA map, put the packet
3068 * pack on the queue, and punt. Notify the upper
3069 * layer that there are no more slots left.
3070 */
3071 DPRINTF(WM_DEBUG_TX,
3072 ("%s: TX: need %d (%d) descriptors, have %d\n",
3073 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3074 segs_needed, sc->sc_txfree - 1));
3075 ifp->if_flags |= IFF_OACTIVE;
3076 bus_dmamap_unload(sc->sc_dmat, dmamap);
3077 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3078 break;
3079 }
3080
3081 IFQ_DEQUEUE(&ifp->if_snd, m0);
3082
3083 /*
3084 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3085 */
3086
3087 DPRINTF(WM_DEBUG_TX,
3088 ("%s: TX: packet has %d (%d) DMA segments\n",
3089 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3090
3091 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3092
3093 /*
3094 * Store a pointer to the packet so that we can free it
3095 * later.
3096 *
3097 * Initially, we consider the number of descriptors the
3098 * packet uses the number of DMA segments. This may be
3099 * incremented by 1 if we do checksum offload (a descriptor
3100 * is used to set the checksum context).
3101 */
3102 txs->txs_mbuf = m0;
3103 txs->txs_firstdesc = sc->sc_txnext;
3104 txs->txs_ndesc = segs_needed;
3105
3106 /* Set up offload parameters for this packet. */
3107 uint32_t cmdlen, fields, dcmdlen;
3108 if (m0->m_pkthdr.csum_flags &
3109 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3110 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3111 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3112 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3113 &do_csum) != 0) {
3114 /* Error message already displayed. */
3115 bus_dmamap_unload(sc->sc_dmat, dmamap);
3116 continue;
3117 }
3118 } else {
3119 do_csum = false;
3120 cmdlen = 0;
3121 fields = 0;
3122 }
3123
3124 /* Sync the DMA map. */
3125 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3126 BUS_DMASYNC_PREWRITE);
3127
3128 /*
3129 * Initialize the first transmit descriptor.
3130 */
3131 nexttx = sc->sc_txnext;
3132 if (!do_csum) {
3133 /* setup a legacy descriptor */
3134 wm_set_dma_addr(
3135 &sc->sc_txdescs[nexttx].wtx_addr,
3136 dmamap->dm_segs[0].ds_addr);
3137 sc->sc_txdescs[nexttx].wtx_cmdlen =
3138 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3139 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3140 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3141 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3142 NULL) {
3143 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3144 htole32(WTX_CMD_VLE);
3145 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3146 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3147 } else {
3148 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3149 }
3150 dcmdlen = 0;
3151 } else {
3152 /* setup an advanced data descriptor */
3153 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3154 htole64(dmamap->dm_segs[0].ds_addr);
3155 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3156 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3157 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3158 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3159 htole32(fields);
3160 DPRINTF(WM_DEBUG_TX,
3161 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3162 device_xname(sc->sc_dev), nexttx,
3163 (uint64_t)dmamap->dm_segs[0].ds_addr));
3164 DPRINTF(WM_DEBUG_TX,
3165 ("\t 0x%08x%08x\n", fields,
3166 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3167 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3168 }
3169
3170 lasttx = nexttx;
3171 nexttx = WM_NEXTTX(sc, nexttx);
3172 /*
3173 * fill in the next descriptors. legacy or adcanced format
3174 * is the same here
3175 */
3176 for (seg = 1; seg < dmamap->dm_nsegs;
3177 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3178 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3179 htole64(dmamap->dm_segs[seg].ds_addr);
3180 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3181 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3182 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3183 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3184 lasttx = nexttx;
3185
3186 DPRINTF(WM_DEBUG_TX,
3187 ("%s: TX: desc %d: %#" PRIx64 ", "
3188 "len %#04zx\n",
3189 device_xname(sc->sc_dev), nexttx,
3190 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3191 dmamap->dm_segs[seg].ds_len));
3192 }
3193
3194 KASSERT(lasttx != -1);
3195
3196 /*
3197 * Set up the command byte on the last descriptor of
3198 * the packet. If we're in the interrupt delay window,
3199 * delay the interrupt.
3200 */
3201 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3202 (NQTX_CMD_EOP | NQTX_CMD_RS));
3203 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3204 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3205
3206 txs->txs_lastdesc = lasttx;
3207
3208 DPRINTF(WM_DEBUG_TX,
3209 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3210 device_xname(sc->sc_dev),
3211 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3212
3213 /* Sync the descriptors we're using. */
3214 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3215 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3216
3217 /* Give the packet to the chip. */
3218 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3219 sent = true;
3220
3221 DPRINTF(WM_DEBUG_TX,
3222 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3223
3224 DPRINTF(WM_DEBUG_TX,
3225 ("%s: TX: finished transmitting packet, job %d\n",
3226 device_xname(sc->sc_dev), sc->sc_txsnext));
3227
3228 /* Advance the tx pointer. */
3229 sc->sc_txfree -= txs->txs_ndesc;
3230 sc->sc_txnext = nexttx;
3231
3232 sc->sc_txsfree--;
3233 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3234
3235 /* Pass the packet to any BPF listeners. */
3236 bpf_mtap(ifp, m0);
3237 }
3238
3239 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3240 /* No more slots; notify upper layer. */
3241 ifp->if_flags |= IFF_OACTIVE;
3242 }
3243
3244 if (sent) {
3245 /* Set a watchdog timer in case the chip flakes out. */
3246 ifp->if_timer = 5;
3247 }
3248 }
3249
3250 /*
3251 * wm_watchdog: [ifnet interface function]
3252 *
3253 * Watchdog timer handler.
3254 */
3255 static void
3256 wm_watchdog(struct ifnet *ifp)
3257 {
3258 struct wm_softc *sc = ifp->if_softc;
3259
3260 /*
3261 * Since we're using delayed interrupts, sweep up
3262 * before we report an error.
3263 */
3264 wm_txintr(sc);
3265
3266 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3267 #ifdef WM_DEBUG
3268 int i, j;
3269 struct wm_txsoft *txs;
3270 #endif
3271 log(LOG_ERR,
3272 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3273 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3274 sc->sc_txnext);
3275 ifp->if_oerrors++;
3276 #ifdef WM_DEBUG
3277 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3278 i = WM_NEXTTXS(sc, i)) {
3279 txs = &sc->sc_txsoft[i];
3280 printf("txs %d tx %d -> %d\n",
3281 i, txs->txs_firstdesc, txs->txs_lastdesc);
3282 for (j = txs->txs_firstdesc; ;
3283 j = WM_NEXTTX(sc, j)) {
3284 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3285 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3286 printf("\t %#08x%08x\n",
3287 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3288 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3289 if (j == txs->txs_lastdesc)
3290 break;
3291 }
3292 }
3293 #endif
3294 /* Reset the interface. */
3295 (void) wm_init(ifp);
3296 }
3297
3298 /* Try to get more packets going. */
3299 ifp->if_start(ifp);
3300 }
3301
3302 static int
3303 wm_ifflags_cb(struct ethercom *ec)
3304 {
3305 struct ifnet *ifp = &ec->ec_if;
3306 struct wm_softc *sc = ifp->if_softc;
3307 int change = ifp->if_flags ^ sc->sc_if_flags;
3308
3309 if (change != 0)
3310 sc->sc_if_flags = ifp->if_flags;
3311
3312 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3313 return ENETRESET;
3314
3315 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3316 wm_set_filter(sc);
3317
3318 wm_set_vlan(sc);
3319
3320 return 0;
3321 }
3322
3323 /*
3324 * wm_ioctl: [ifnet interface function]
3325 *
3326 * Handle control requests from the operator.
3327 */
3328 static int
3329 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3330 {
3331 struct wm_softc *sc = ifp->if_softc;
3332 struct ifreq *ifr = (struct ifreq *) data;
3333 struct ifaddr *ifa = (struct ifaddr *)data;
3334 struct sockaddr_dl *sdl;
3335 int s, error;
3336
3337 s = splnet();
3338
3339 switch (cmd) {
3340 case SIOCSIFMEDIA:
3341 case SIOCGIFMEDIA:
3342 /* Flow control requires full-duplex mode. */
3343 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3344 (ifr->ifr_media & IFM_FDX) == 0)
3345 ifr->ifr_media &= ~IFM_ETH_FMASK;
3346 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3347 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3348 /* We can do both TXPAUSE and RXPAUSE. */
3349 ifr->ifr_media |=
3350 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3351 }
3352 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3353 }
3354 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3355 break;
3356 case SIOCINITIFADDR:
3357 if (ifa->ifa_addr->sa_family == AF_LINK) {
3358 sdl = satosdl(ifp->if_dl->ifa_addr);
3359 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3360 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3361 /* unicast address is first multicast entry */
3362 wm_set_filter(sc);
3363 error = 0;
3364 break;
3365 }
3366 /*FALLTHROUGH*/
3367 default:
3368 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3369 break;
3370
3371 error = 0;
3372
3373 if (cmd == SIOCSIFCAP)
3374 error = (*ifp->if_init)(ifp);
3375 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3376 ;
3377 else if (ifp->if_flags & IFF_RUNNING) {
3378 /*
3379 * Multicast list has changed; set the hardware filter
3380 * accordingly.
3381 */
3382 wm_set_filter(sc);
3383 }
3384 break;
3385 }
3386
3387 /* Try to get more packets going. */
3388 ifp->if_start(ifp);
3389
3390 splx(s);
3391 return error;
3392 }
3393
3394 /*
3395 * wm_intr:
3396 *
3397 * Interrupt service routine.
3398 */
3399 static int
3400 wm_intr(void *arg)
3401 {
3402 struct wm_softc *sc = arg;
3403 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3404 uint32_t icr;
3405 int handled = 0;
3406
3407 while (1 /* CONSTCOND */) {
3408 icr = CSR_READ(sc, WMREG_ICR);
3409 if ((icr & sc->sc_icr) == 0)
3410 break;
3411 rnd_add_uint32(&sc->rnd_source, icr);
3412
3413 handled = 1;
3414
3415 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3416 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3417 DPRINTF(WM_DEBUG_RX,
3418 ("%s: RX: got Rx intr 0x%08x\n",
3419 device_xname(sc->sc_dev),
3420 icr & (ICR_RXDMT0|ICR_RXT0)));
3421 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3422 }
3423 #endif
3424 wm_rxintr(sc);
3425
3426 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3427 if (icr & ICR_TXDW) {
3428 DPRINTF(WM_DEBUG_TX,
3429 ("%s: TX: got TXDW interrupt\n",
3430 device_xname(sc->sc_dev)));
3431 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3432 }
3433 #endif
3434 wm_txintr(sc);
3435
3436 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3437 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3438 wm_linkintr(sc, icr);
3439 }
3440
3441 if (icr & ICR_RXO) {
3442 #if defined(WM_DEBUG)
3443 log(LOG_WARNING, "%s: Receive overrun\n",
3444 device_xname(sc->sc_dev));
3445 #endif /* defined(WM_DEBUG) */
3446 }
3447 }
3448
3449 if (handled) {
3450 /* Try to get more packets going. */
3451 ifp->if_start(ifp);
3452 }
3453
3454 return handled;
3455 }
3456
3457 /*
3458 * wm_txintr:
3459 *
3460 * Helper; handle transmit interrupts.
3461 */
3462 static void
3463 wm_txintr(struct wm_softc *sc)
3464 {
3465 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3466 struct wm_txsoft *txs;
3467 uint8_t status;
3468 int i;
3469
3470 ifp->if_flags &= ~IFF_OACTIVE;
3471
3472 /*
3473 * Go through the Tx list and free mbufs for those
3474 * frames which have been transmitted.
3475 */
3476 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3477 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3478 txs = &sc->sc_txsoft[i];
3479
3480 DPRINTF(WM_DEBUG_TX,
3481 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3482
3483 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3484 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3485
3486 status =
3487 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3488 if ((status & WTX_ST_DD) == 0) {
3489 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3490 BUS_DMASYNC_PREREAD);
3491 break;
3492 }
3493
3494 DPRINTF(WM_DEBUG_TX,
3495 ("%s: TX: job %d done: descs %d..%d\n",
3496 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3497 txs->txs_lastdesc));
3498
3499 /*
3500 * XXX We should probably be using the statistics
3501 * XXX registers, but I don't know if they exist
3502 * XXX on chips before the i82544.
3503 */
3504
3505 #ifdef WM_EVENT_COUNTERS
3506 if (status & WTX_ST_TU)
3507 WM_EVCNT_INCR(&sc->sc_ev_tu);
3508 #endif /* WM_EVENT_COUNTERS */
3509
3510 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3511 ifp->if_oerrors++;
3512 if (status & WTX_ST_LC)
3513 log(LOG_WARNING, "%s: late collision\n",
3514 device_xname(sc->sc_dev));
3515 else if (status & WTX_ST_EC) {
3516 ifp->if_collisions += 16;
3517 log(LOG_WARNING, "%s: excessive collisions\n",
3518 device_xname(sc->sc_dev));
3519 }
3520 } else
3521 ifp->if_opackets++;
3522
3523 sc->sc_txfree += txs->txs_ndesc;
3524 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3525 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3526 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3527 m_freem(txs->txs_mbuf);
3528 txs->txs_mbuf = NULL;
3529 }
3530
3531 /* Update the dirty transmit buffer pointer. */
3532 sc->sc_txsdirty = i;
3533 DPRINTF(WM_DEBUG_TX,
3534 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3535
3536 /*
3537 * If there are no more pending transmissions, cancel the watchdog
3538 * timer.
3539 */
3540 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3541 ifp->if_timer = 0;
3542 }
3543
3544 /*
3545 * wm_rxintr:
3546 *
3547 * Helper; handle receive interrupts.
3548 */
3549 static void
3550 wm_rxintr(struct wm_softc *sc)
3551 {
3552 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3553 struct wm_rxsoft *rxs;
3554 struct mbuf *m;
3555 int i, len;
3556 uint8_t status, errors;
3557 uint16_t vlantag;
3558
3559 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3560 rxs = &sc->sc_rxsoft[i];
3561
3562 DPRINTF(WM_DEBUG_RX,
3563 ("%s: RX: checking descriptor %d\n",
3564 device_xname(sc->sc_dev), i));
3565
3566 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3567
3568 status = sc->sc_rxdescs[i].wrx_status;
3569 errors = sc->sc_rxdescs[i].wrx_errors;
3570 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3571 vlantag = sc->sc_rxdescs[i].wrx_special;
3572
3573 if ((status & WRX_ST_DD) == 0) {
3574 /*
3575 * We have processed all of the receive descriptors.
3576 */
3577 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3578 break;
3579 }
3580
3581 if (__predict_false(sc->sc_rxdiscard)) {
3582 DPRINTF(WM_DEBUG_RX,
3583 ("%s: RX: discarding contents of descriptor %d\n",
3584 device_xname(sc->sc_dev), i));
3585 WM_INIT_RXDESC(sc, i);
3586 if (status & WRX_ST_EOP) {
3587 /* Reset our state. */
3588 DPRINTF(WM_DEBUG_RX,
3589 ("%s: RX: resetting rxdiscard -> 0\n",
3590 device_xname(sc->sc_dev)));
3591 sc->sc_rxdiscard = 0;
3592 }
3593 continue;
3594 }
3595
3596 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3597 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3598
3599 m = rxs->rxs_mbuf;
3600
3601 /*
3602 * Add a new receive buffer to the ring, unless of
3603 * course the length is zero. Treat the latter as a
3604 * failed mapping.
3605 */
3606 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3607 /*
3608 * Failed, throw away what we've done so
3609 * far, and discard the rest of the packet.
3610 */
3611 ifp->if_ierrors++;
3612 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3613 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3614 WM_INIT_RXDESC(sc, i);
3615 if ((status & WRX_ST_EOP) == 0)
3616 sc->sc_rxdiscard = 1;
3617 if (sc->sc_rxhead != NULL)
3618 m_freem(sc->sc_rxhead);
3619 WM_RXCHAIN_RESET(sc);
3620 DPRINTF(WM_DEBUG_RX,
3621 ("%s: RX: Rx buffer allocation failed, "
3622 "dropping packet%s\n", device_xname(sc->sc_dev),
3623 sc->sc_rxdiscard ? " (discard)" : ""));
3624 continue;
3625 }
3626
3627 m->m_len = len;
3628 sc->sc_rxlen += len;
3629 DPRINTF(WM_DEBUG_RX,
3630 ("%s: RX: buffer at %p len %d\n",
3631 device_xname(sc->sc_dev), m->m_data, len));
3632
3633 /*
3634 * If this is not the end of the packet, keep
3635 * looking.
3636 */
3637 if ((status & WRX_ST_EOP) == 0) {
3638 WM_RXCHAIN_LINK(sc, m);
3639 DPRINTF(WM_DEBUG_RX,
3640 ("%s: RX: not yet EOP, rxlen -> %d\n",
3641 device_xname(sc->sc_dev), sc->sc_rxlen));
3642 continue;
3643 }
3644
3645 /*
3646 * Okay, we have the entire packet now. The chip is
3647 * configured to include the FCS except I350
3648 * (not all chips can be configured to strip it),
3649 * so we need to trim it.
3650 * May need to adjust length of previous mbuf in the
3651 * chain if the current mbuf is too short.
3652 * For an eratta, the RCTL_SECRC bit in RCTL register
3653 * is always set in I350, so we don't trim it.
3654 */
3655 if (sc->sc_type != WM_T_I350) {
3656 if (m->m_len < ETHER_CRC_LEN) {
3657 sc->sc_rxtail->m_len
3658 -= (ETHER_CRC_LEN - m->m_len);
3659 m->m_len = 0;
3660 } else
3661 m->m_len -= ETHER_CRC_LEN;
3662 len = sc->sc_rxlen - ETHER_CRC_LEN;
3663 } else
3664 len = sc->sc_rxlen;
3665
3666 WM_RXCHAIN_LINK(sc, m);
3667
3668 *sc->sc_rxtailp = NULL;
3669 m = sc->sc_rxhead;
3670
3671 WM_RXCHAIN_RESET(sc);
3672
3673 DPRINTF(WM_DEBUG_RX,
3674 ("%s: RX: have entire packet, len -> %d\n",
3675 device_xname(sc->sc_dev), len));
3676
3677 /*
3678 * If an error occurred, update stats and drop the packet.
3679 */
3680 if (errors &
3681 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3682 if (errors & WRX_ER_SE)
3683 log(LOG_WARNING, "%s: symbol error\n",
3684 device_xname(sc->sc_dev));
3685 else if (errors & WRX_ER_SEQ)
3686 log(LOG_WARNING, "%s: receive sequence error\n",
3687 device_xname(sc->sc_dev));
3688 else if (errors & WRX_ER_CE)
3689 log(LOG_WARNING, "%s: CRC error\n",
3690 device_xname(sc->sc_dev));
3691 m_freem(m);
3692 continue;
3693 }
3694
3695 /*
3696 * No errors. Receive the packet.
3697 */
3698 m->m_pkthdr.rcvif = ifp;
3699 m->m_pkthdr.len = len;
3700
3701 /*
3702 * If VLANs are enabled, VLAN packets have been unwrapped
3703 * for us. Associate the tag with the packet.
3704 */
3705 if ((status & WRX_ST_VP) != 0) {
3706 VLAN_INPUT_TAG(ifp, m,
3707 le16toh(vlantag),
3708 continue);
3709 }
3710
3711 /*
3712 * Set up checksum info for this packet.
3713 */
3714 if ((status & WRX_ST_IXSM) == 0) {
3715 if (status & WRX_ST_IPCS) {
3716 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3717 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3718 if (errors & WRX_ER_IPE)
3719 m->m_pkthdr.csum_flags |=
3720 M_CSUM_IPv4_BAD;
3721 }
3722 if (status & WRX_ST_TCPCS) {
3723 /*
3724 * Note: we don't know if this was TCP or UDP,
3725 * so we just set both bits, and expect the
3726 * upper layers to deal.
3727 */
3728 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3729 m->m_pkthdr.csum_flags |=
3730 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3731 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3732 if (errors & WRX_ER_TCPE)
3733 m->m_pkthdr.csum_flags |=
3734 M_CSUM_TCP_UDP_BAD;
3735 }
3736 }
3737
3738 ifp->if_ipackets++;
3739
3740 /* Pass this up to any BPF listeners. */
3741 bpf_mtap(ifp, m);
3742
3743 /* Pass it on. */
3744 (*ifp->if_input)(ifp, m);
3745 }
3746
3747 /* Update the receive pointer. */
3748 sc->sc_rxptr = i;
3749
3750 DPRINTF(WM_DEBUG_RX,
3751 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3752 }
3753
3754 /*
3755 * wm_linkintr_gmii:
3756 *
3757 * Helper; handle link interrupts for GMII.
3758 */
3759 static void
3760 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3761 {
3762
3763 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3764 __func__));
3765
3766 if (icr & ICR_LSC) {
3767 DPRINTF(WM_DEBUG_LINK,
3768 ("%s: LINK: LSC -> mii_tick\n",
3769 device_xname(sc->sc_dev)));
3770 mii_tick(&sc->sc_mii);
3771 if (sc->sc_type == WM_T_82543) {
3772 int miistatus, active;
3773
3774 /*
3775 * With 82543, we need to force speed and
3776 * duplex on the MAC equal to what the PHY
3777 * speed and duplex configuration is.
3778 */
3779 miistatus = sc->sc_mii.mii_media_status;
3780
3781 if (miistatus & IFM_ACTIVE) {
3782 active = sc->sc_mii.mii_media_active;
3783 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3784 switch (IFM_SUBTYPE(active)) {
3785 case IFM_10_T:
3786 sc->sc_ctrl |= CTRL_SPEED_10;
3787 break;
3788 case IFM_100_TX:
3789 sc->sc_ctrl |= CTRL_SPEED_100;
3790 break;
3791 case IFM_1000_T:
3792 sc->sc_ctrl |= CTRL_SPEED_1000;
3793 break;
3794 default:
3795 /*
3796 * fiber?
3797 * Shoud not enter here.
3798 */
3799 printf("unknown media (%x)\n",
3800 active);
3801 break;
3802 }
3803 if (active & IFM_FDX)
3804 sc->sc_ctrl |= CTRL_FD;
3805 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3806 }
3807 } else if ((sc->sc_type == WM_T_ICH8)
3808 && (sc->sc_phytype == WMPHY_IGP_3)) {
3809 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3810 } else if (sc->sc_type == WM_T_PCH) {
3811 wm_k1_gig_workaround_hv(sc,
3812 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3813 }
3814
3815 if ((sc->sc_phytype == WMPHY_82578)
3816 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3817 == IFM_1000_T)) {
3818
3819 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3820 delay(200*1000); /* XXX too big */
3821
3822 /* Link stall fix for link up */
3823 wm_gmii_hv_writereg(sc->sc_dev, 1,
3824 HV_MUX_DATA_CTRL,
3825 HV_MUX_DATA_CTRL_GEN_TO_MAC
3826 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3827 wm_gmii_hv_writereg(sc->sc_dev, 1,
3828 HV_MUX_DATA_CTRL,
3829 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3830 }
3831 }
3832 } else if (icr & ICR_RXSEQ) {
3833 DPRINTF(WM_DEBUG_LINK,
3834 ("%s: LINK Receive sequence error\n",
3835 device_xname(sc->sc_dev)));
3836 }
3837 }
3838
3839 /*
3840 * wm_linkintr_tbi:
3841 *
3842 * Helper; handle link interrupts for TBI mode.
3843 */
3844 static void
3845 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3846 {
3847 uint32_t status;
3848
3849 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3850 __func__));
3851
3852 status = CSR_READ(sc, WMREG_STATUS);
3853 if (icr & ICR_LSC) {
3854 if (status & STATUS_LU) {
3855 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3856 device_xname(sc->sc_dev),
3857 (status & STATUS_FD) ? "FDX" : "HDX"));
3858 /*
3859 * NOTE: CTRL will update TFCE and RFCE automatically,
3860 * so we should update sc->sc_ctrl
3861 */
3862
3863 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3864 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3865 sc->sc_fcrtl &= ~FCRTL_XONE;
3866 if (status & STATUS_FD)
3867 sc->sc_tctl |=
3868 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3869 else
3870 sc->sc_tctl |=
3871 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3872 if (sc->sc_ctrl & CTRL_TFCE)
3873 sc->sc_fcrtl |= FCRTL_XONE;
3874 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3875 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3876 WMREG_OLD_FCRTL : WMREG_FCRTL,
3877 sc->sc_fcrtl);
3878 sc->sc_tbi_linkup = 1;
3879 } else {
3880 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3881 device_xname(sc->sc_dev)));
3882 sc->sc_tbi_linkup = 0;
3883 }
3884 wm_tbi_set_linkled(sc);
3885 } else if (icr & ICR_RXCFG) {
3886 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3887 device_xname(sc->sc_dev)));
3888 sc->sc_tbi_nrxcfg++;
3889 wm_check_for_link(sc);
3890 } else if (icr & ICR_RXSEQ) {
3891 DPRINTF(WM_DEBUG_LINK,
3892 ("%s: LINK: Receive sequence error\n",
3893 device_xname(sc->sc_dev)));
3894 }
3895 }
3896
3897 /*
3898 * wm_linkintr:
3899 *
3900 * Helper; handle link interrupts.
3901 */
3902 static void
3903 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3904 {
3905
3906 if (sc->sc_flags & WM_F_HAS_MII)
3907 wm_linkintr_gmii(sc, icr);
3908 else
3909 wm_linkintr_tbi(sc, icr);
3910 }
3911
3912 /*
3913 * wm_tick:
3914 *
3915 * One second timer, used to check link status, sweep up
3916 * completed transmit jobs, etc.
3917 */
3918 static void
3919 wm_tick(void *arg)
3920 {
3921 struct wm_softc *sc = arg;
3922 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3923 int s;
3924
3925 s = splnet();
3926
3927 if (sc->sc_type >= WM_T_82542_2_1) {
3928 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3929 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3930 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3931 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3932 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3933 }
3934
3935 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3936 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3937 + CSR_READ(sc, WMREG_CRCERRS)
3938 + CSR_READ(sc, WMREG_ALGNERRC)
3939 + CSR_READ(sc, WMREG_SYMERRC)
3940 + CSR_READ(sc, WMREG_RXERRC)
3941 + CSR_READ(sc, WMREG_SEC)
3942 + CSR_READ(sc, WMREG_CEXTERR)
3943 + CSR_READ(sc, WMREG_RLEC);
3944 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3945
3946 if (sc->sc_flags & WM_F_HAS_MII)
3947 mii_tick(&sc->sc_mii);
3948 else
3949 wm_tbi_check_link(sc);
3950
3951 splx(s);
3952
3953 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3954 }
3955
3956 /*
3957 * wm_reset:
3958 *
3959 * Reset the i82542 chip.
3960 */
3961 static void
3962 wm_reset(struct wm_softc *sc)
3963 {
3964 int phy_reset = 0;
3965 uint32_t reg, mask;
3966 int i;
3967
3968 /*
3969 * Allocate on-chip memory according to the MTU size.
3970 * The Packet Buffer Allocation register must be written
3971 * before the chip is reset.
3972 */
3973 switch (sc->sc_type) {
3974 case WM_T_82547:
3975 case WM_T_82547_2:
3976 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3977 PBA_22K : PBA_30K;
3978 sc->sc_txfifo_head = 0;
3979 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3980 sc->sc_txfifo_size =
3981 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3982 sc->sc_txfifo_stall = 0;
3983 break;
3984 case WM_T_82571:
3985 case WM_T_82572:
3986 case WM_T_82575: /* XXX need special handing for jumbo frames */
3987 case WM_T_I350:
3988 case WM_T_80003:
3989 sc->sc_pba = PBA_32K;
3990 break;
3991 case WM_T_82580:
3992 case WM_T_82580ER:
3993 sc->sc_pba = PBA_35K;
3994 break;
3995 case WM_T_82576:
3996 sc->sc_pba = PBA_64K;
3997 break;
3998 case WM_T_82573:
3999 sc->sc_pba = PBA_12K;
4000 break;
4001 case WM_T_82574:
4002 case WM_T_82583:
4003 sc->sc_pba = PBA_20K;
4004 break;
4005 case WM_T_ICH8:
4006 sc->sc_pba = PBA_8K;
4007 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4008 break;
4009 case WM_T_ICH9:
4010 case WM_T_ICH10:
4011 sc->sc_pba = PBA_10K;
4012 break;
4013 case WM_T_PCH:
4014 case WM_T_PCH2:
4015 sc->sc_pba = PBA_26K;
4016 break;
4017 default:
4018 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4019 PBA_40K : PBA_48K;
4020 break;
4021 }
4022 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4023
4024 /* Prevent the PCI-E bus from sticking */
4025 if (sc->sc_flags & WM_F_PCIE) {
4026 int timeout = 800;
4027
4028 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4029 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4030
4031 while (timeout--) {
4032 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4033 == 0)
4034 break;
4035 delay(100);
4036 }
4037 }
4038
4039 /* Set the completion timeout for interface */
4040 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4041 || (sc->sc_type == WM_T_I350))
4042 wm_set_pcie_completion_timeout(sc);
4043
4044 /* Clear interrupt */
4045 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4046
4047 /* Stop the transmit and receive processes. */
4048 CSR_WRITE(sc, WMREG_RCTL, 0);
4049 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4050 sc->sc_rctl &= ~RCTL_EN;
4051
4052 /* XXX set_tbi_sbp_82543() */
4053
4054 delay(10*1000);
4055
4056 /* Must acquire the MDIO ownership before MAC reset */
4057 switch (sc->sc_type) {
4058 case WM_T_82573:
4059 case WM_T_82574:
4060 case WM_T_82583:
4061 i = 0;
4062 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4063 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4064 do {
4065 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4066 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4067 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4068 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4069 break;
4070 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4071 delay(2*1000);
4072 i++;
4073 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4074 break;
4075 default:
4076 break;
4077 }
4078
4079 /*
4080 * 82541 Errata 29? & 82547 Errata 28?
4081 * See also the description about PHY_RST bit in CTRL register
4082 * in 8254x_GBe_SDM.pdf.
4083 */
4084 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4085 CSR_WRITE(sc, WMREG_CTRL,
4086 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4087 delay(5000);
4088 }
4089
4090 switch (sc->sc_type) {
4091 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4092 case WM_T_82541:
4093 case WM_T_82541_2:
4094 case WM_T_82547:
4095 case WM_T_82547_2:
4096 /*
4097 * On some chipsets, a reset through a memory-mapped write
4098 * cycle can cause the chip to reset before completing the
4099 * write cycle. This causes major headache that can be
4100 * avoided by issuing the reset via indirect register writes
4101 * through I/O space.
4102 *
4103 * So, if we successfully mapped the I/O BAR at attach time,
4104 * use that. Otherwise, try our luck with a memory-mapped
4105 * reset.
4106 */
4107 if (sc->sc_flags & WM_F_IOH_VALID)
4108 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4109 else
4110 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4111 break;
4112 case WM_T_82545_3:
4113 case WM_T_82546_3:
4114 /* Use the shadow control register on these chips. */
4115 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4116 break;
4117 case WM_T_80003:
4118 mask = swfwphysem[sc->sc_funcid];
4119 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4120 wm_get_swfw_semaphore(sc, mask);
4121 CSR_WRITE(sc, WMREG_CTRL, reg);
4122 wm_put_swfw_semaphore(sc, mask);
4123 break;
4124 case WM_T_ICH8:
4125 case WM_T_ICH9:
4126 case WM_T_ICH10:
4127 case WM_T_PCH:
4128 case WM_T_PCH2:
4129 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4130 if (wm_check_reset_block(sc) == 0) {
4131 /*
4132 * Gate automatic PHY configuration by hardware on
4133 * non-managed 82579
4134 */
4135 if ((sc->sc_type == WM_T_PCH2)
4136 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4137 != 0))
4138 wm_gate_hw_phy_config_ich8lan(sc, 1);
4139
4140
4141 reg |= CTRL_PHY_RESET;
4142 phy_reset = 1;
4143 }
4144 wm_get_swfwhw_semaphore(sc);
4145 CSR_WRITE(sc, WMREG_CTRL, reg);
4146 delay(20*1000);
4147 wm_put_swfwhw_semaphore(sc);
4148 break;
4149 case WM_T_82542_2_0:
4150 case WM_T_82542_2_1:
4151 case WM_T_82543:
4152 case WM_T_82540:
4153 case WM_T_82545:
4154 case WM_T_82546:
4155 case WM_T_82571:
4156 case WM_T_82572:
4157 case WM_T_82573:
4158 case WM_T_82574:
4159 case WM_T_82575:
4160 case WM_T_82576:
4161 case WM_T_82580:
4162 case WM_T_82580ER:
4163 case WM_T_82583:
4164 case WM_T_I350:
4165 default:
4166 /* Everything else can safely use the documented method. */
4167 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4168 break;
4169 }
4170
4171 if (phy_reset != 0)
4172 wm_get_cfg_done(sc);
4173
4174 /* reload EEPROM */
4175 switch (sc->sc_type) {
4176 case WM_T_82542_2_0:
4177 case WM_T_82542_2_1:
4178 case WM_T_82543:
4179 case WM_T_82544:
4180 delay(10);
4181 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4182 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4183 delay(2000);
4184 break;
4185 case WM_T_82540:
4186 case WM_T_82545:
4187 case WM_T_82545_3:
4188 case WM_T_82546:
4189 case WM_T_82546_3:
4190 delay(5*1000);
4191 /* XXX Disable HW ARPs on ASF enabled adapters */
4192 break;
4193 case WM_T_82541:
4194 case WM_T_82541_2:
4195 case WM_T_82547:
4196 case WM_T_82547_2:
4197 delay(20000);
4198 /* XXX Disable HW ARPs on ASF enabled adapters */
4199 break;
4200 case WM_T_82571:
4201 case WM_T_82572:
4202 case WM_T_82573:
4203 case WM_T_82574:
4204 case WM_T_82583:
4205 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4206 delay(10);
4207 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4208 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4209 }
4210 /* check EECD_EE_AUTORD */
4211 wm_get_auto_rd_done(sc);
4212 /*
4213 * Phy configuration from NVM just starts after EECD_AUTO_RD
4214 * is set.
4215 */
4216 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4217 || (sc->sc_type == WM_T_82583))
4218 delay(25*1000);
4219 break;
4220 case WM_T_82575:
4221 case WM_T_82576:
4222 case WM_T_82580:
4223 case WM_T_82580ER:
4224 case WM_T_I350:
4225 case WM_T_80003:
4226 case WM_T_ICH8:
4227 case WM_T_ICH9:
4228 /* check EECD_EE_AUTORD */
4229 wm_get_auto_rd_done(sc);
4230 break;
4231 case WM_T_ICH10:
4232 case WM_T_PCH:
4233 case WM_T_PCH2:
4234 wm_lan_init_done(sc);
4235 break;
4236 default:
4237 panic("%s: unknown type\n", __func__);
4238 }
4239
4240 /* Check whether EEPROM is present or not */
4241 switch (sc->sc_type) {
4242 case WM_T_82575:
4243 case WM_T_82576:
4244 #if 0 /* XXX */
4245 case WM_T_82580:
4246 case WM_T_82580ER:
4247 #endif
4248 case WM_T_I350:
4249 case WM_T_ICH8:
4250 case WM_T_ICH9:
4251 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4252 /* Not found */
4253 sc->sc_flags |= WM_F_EEPROM_INVALID;
4254 if ((sc->sc_type == WM_T_82575)
4255 || (sc->sc_type == WM_T_82576)
4256 || (sc->sc_type == WM_T_82580)
4257 || (sc->sc_type == WM_T_82580ER)
4258 || (sc->sc_type == WM_T_I350))
4259 wm_reset_init_script_82575(sc);
4260 }
4261 break;
4262 default:
4263 break;
4264 }
4265
4266 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4267 || (sc->sc_type == WM_T_I350)) {
4268 /* clear global device reset status bit */
4269 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4270 }
4271
4272 /* Clear any pending interrupt events. */
4273 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4274 reg = CSR_READ(sc, WMREG_ICR);
4275
4276 /* reload sc_ctrl */
4277 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4278
4279 if (sc->sc_type == WM_T_I350)
4280 wm_set_eee_i350(sc);
4281
4282 /* dummy read from WUC */
4283 if (sc->sc_type == WM_T_PCH)
4284 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4285 /*
4286 * For PCH, this write will make sure that any noise will be detected
4287 * as a CRC error and be dropped rather than show up as a bad packet
4288 * to the DMA engine
4289 */
4290 if (sc->sc_type == WM_T_PCH)
4291 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4292
4293 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4294 CSR_WRITE(sc, WMREG_WUC, 0);
4295
4296 /* XXX need special handling for 82580 */
4297 }
4298
4299 static void
4300 wm_set_vlan(struct wm_softc *sc)
4301 {
4302 /* Deal with VLAN enables. */
4303 if (VLAN_ATTACHED(&sc->sc_ethercom))
4304 sc->sc_ctrl |= CTRL_VME;
4305 else
4306 sc->sc_ctrl &= ~CTRL_VME;
4307
4308 /* Write the control registers. */
4309 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4310 }
4311
4312 /*
4313 * wm_init: [ifnet interface function]
4314 *
4315 * Initialize the interface. Must be called at splnet().
4316 */
4317 static int
4318 wm_init(struct ifnet *ifp)
4319 {
4320 struct wm_softc *sc = ifp->if_softc;
4321 struct wm_rxsoft *rxs;
4322 int i, j, trynum, error = 0;
4323 uint32_t reg;
4324
4325 /*
4326 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4327 * There is a small but measurable benefit to avoiding the adjusment
4328 * of the descriptor so that the headers are aligned, for normal mtu,
4329 * on such platforms. One possibility is that the DMA itself is
4330 * slightly more efficient if the front of the entire packet (instead
4331 * of the front of the headers) is aligned.
4332 *
4333 * Note we must always set align_tweak to 0 if we are using
4334 * jumbo frames.
4335 */
4336 #ifdef __NO_STRICT_ALIGNMENT
4337 sc->sc_align_tweak = 0;
4338 #else
4339 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4340 sc->sc_align_tweak = 0;
4341 else
4342 sc->sc_align_tweak = 2;
4343 #endif /* __NO_STRICT_ALIGNMENT */
4344
4345 /* Cancel any pending I/O. */
4346 wm_stop(ifp, 0);
4347
4348 /* update statistics before reset */
4349 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4350 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4351
4352 /* Reset the chip to a known state. */
4353 wm_reset(sc);
4354
4355 switch (sc->sc_type) {
4356 case WM_T_82571:
4357 case WM_T_82572:
4358 case WM_T_82573:
4359 case WM_T_82574:
4360 case WM_T_82583:
4361 case WM_T_80003:
4362 case WM_T_ICH8:
4363 case WM_T_ICH9:
4364 case WM_T_ICH10:
4365 case WM_T_PCH:
4366 case WM_T_PCH2:
4367 if (wm_check_mng_mode(sc) != 0)
4368 wm_get_hw_control(sc);
4369 break;
4370 default:
4371 break;
4372 }
4373
4374 /* Reset the PHY. */
4375 if (sc->sc_flags & WM_F_HAS_MII)
4376 wm_gmii_reset(sc);
4377
4378 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4379 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4380 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
4381 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4382
4383 /* Initialize the transmit descriptor ring. */
4384 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4385 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4386 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4387 sc->sc_txfree = WM_NTXDESC(sc);
4388 sc->sc_txnext = 0;
4389
4390 if (sc->sc_type < WM_T_82543) {
4391 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4392 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4393 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4394 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4395 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4396 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4397 } else {
4398 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4399 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4400 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4401 CSR_WRITE(sc, WMREG_TDH, 0);
4402 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4403 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4404
4405 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4406 /*
4407 * Don't write TDT before TCTL.EN is set.
4408 * See the document.
4409 */
4410 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4411 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4412 | TXDCTL_WTHRESH(0));
4413 else {
4414 CSR_WRITE(sc, WMREG_TDT, 0);
4415 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4416 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4417 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4418 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4419 }
4420 }
4421 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4422 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4423
4424 /* Initialize the transmit job descriptors. */
4425 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4426 sc->sc_txsoft[i].txs_mbuf = NULL;
4427 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4428 sc->sc_txsnext = 0;
4429 sc->sc_txsdirty = 0;
4430
4431 /*
4432 * Initialize the receive descriptor and receive job
4433 * descriptor rings.
4434 */
4435 if (sc->sc_type < WM_T_82543) {
4436 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4437 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4438 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4439 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4440 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4441 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4442
4443 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4444 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4445 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4446 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4447 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4448 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4449 } else {
4450 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4451 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4452 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4453 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4454 CSR_WRITE(sc, WMREG_EITR(0), 450);
4455 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4456 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4457 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4458 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4459 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4460 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4461 | RXDCTL_WTHRESH(1));
4462 } else {
4463 CSR_WRITE(sc, WMREG_RDH, 0);
4464 CSR_WRITE(sc, WMREG_RDT, 0);
4465 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4466 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4467 }
4468 }
4469 for (i = 0; i < WM_NRXDESC; i++) {
4470 rxs = &sc->sc_rxsoft[i];
4471 if (rxs->rxs_mbuf == NULL) {
4472 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4473 log(LOG_ERR, "%s: unable to allocate or map "
4474 "rx buffer %d, error = %d\n",
4475 device_xname(sc->sc_dev), i, error);
4476 /*
4477 * XXX Should attempt to run with fewer receive
4478 * XXX buffers instead of just failing.
4479 */
4480 wm_rxdrain(sc);
4481 goto out;
4482 }
4483 } else {
4484 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4485 WM_INIT_RXDESC(sc, i);
4486 /*
4487 * For 82575 and newer device, the RX descriptors
4488 * must be initialized after the setting of RCTL.EN in
4489 * wm_set_filter()
4490 */
4491 }
4492 }
4493 sc->sc_rxptr = 0;
4494 sc->sc_rxdiscard = 0;
4495 WM_RXCHAIN_RESET(sc);
4496
4497 /*
4498 * Clear out the VLAN table -- we don't use it (yet).
4499 */
4500 CSR_WRITE(sc, WMREG_VET, 0);
4501 if (sc->sc_type == WM_T_I350)
4502 trynum = 10; /* Due to hw errata */
4503 else
4504 trynum = 1;
4505 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4506 for (j = 0; j < trynum; j++)
4507 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4508
4509 /*
4510 * Set up flow-control parameters.
4511 *
4512 * XXX Values could probably stand some tuning.
4513 */
4514 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4515 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4516 && (sc->sc_type != WM_T_PCH2)) {
4517 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4518 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4519 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4520 }
4521
4522 sc->sc_fcrtl = FCRTL_DFLT;
4523 if (sc->sc_type < WM_T_82543) {
4524 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4525 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4526 } else {
4527 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4528 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4529 }
4530
4531 if (sc->sc_type == WM_T_80003)
4532 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4533 else
4534 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4535
4536 /* Writes the control register. */
4537 wm_set_vlan(sc);
4538
4539 if (sc->sc_flags & WM_F_HAS_MII) {
4540 int val;
4541
4542 switch (sc->sc_type) {
4543 case WM_T_80003:
4544 case WM_T_ICH8:
4545 case WM_T_ICH9:
4546 case WM_T_ICH10:
4547 case WM_T_PCH:
4548 case WM_T_PCH2:
4549 /*
4550 * Set the mac to wait the maximum time between each
4551 * iteration and increase the max iterations when
4552 * polling the phy; this fixes erroneous timeouts at
4553 * 10Mbps.
4554 */
4555 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4556 0xFFFF);
4557 val = wm_kmrn_readreg(sc,
4558 KUMCTRLSTA_OFFSET_INB_PARAM);
4559 val |= 0x3F;
4560 wm_kmrn_writereg(sc,
4561 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4562 break;
4563 default:
4564 break;
4565 }
4566
4567 if (sc->sc_type == WM_T_80003) {
4568 val = CSR_READ(sc, WMREG_CTRL_EXT);
4569 val &= ~CTRL_EXT_LINK_MODE_MASK;
4570 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4571
4572 /* Bypass RX and TX FIFO's */
4573 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4574 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4575 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4576 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4577 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4578 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4579 }
4580 }
4581 #if 0
4582 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4583 #endif
4584
4585 /*
4586 * Set up checksum offload parameters.
4587 */
4588 reg = CSR_READ(sc, WMREG_RXCSUM);
4589 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4590 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4591 reg |= RXCSUM_IPOFL;
4592 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4593 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4594 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4595 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4596 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4597
4598 /* Reset TBI's RXCFG count */
4599 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4600
4601 /*
4602 * Set up the interrupt registers.
4603 */
4604 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4605 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4606 ICR_RXO | ICR_RXT0;
4607 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4608 sc->sc_icr |= ICR_RXCFG;
4609 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4610
4611 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4612 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4613 || (sc->sc_type == WM_T_PCH2)) {
4614 reg = CSR_READ(sc, WMREG_KABGTXD);
4615 reg |= KABGTXD_BGSQLBIAS;
4616 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4617 }
4618
4619 /* Set up the inter-packet gap. */
4620 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4621
4622 if (sc->sc_type >= WM_T_82543) {
4623 /*
4624 * Set up the interrupt throttling register (units of 256ns)
4625 * Note that a footnote in Intel's documentation says this
4626 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4627 * or 10Mbit mode. Empirically, it appears to be the case
4628 * that that is also true for the 1024ns units of the other
4629 * interrupt-related timer registers -- so, really, we ought
4630 * to divide this value by 4 when the link speed is low.
4631 *
4632 * XXX implement this division at link speed change!
4633 */
4634
4635 /*
4636 * For N interrupts/sec, set this value to:
4637 * 1000000000 / (N * 256). Note that we set the
4638 * absolute and packet timer values to this value
4639 * divided by 4 to get "simple timer" behavior.
4640 */
4641
4642 sc->sc_itr = 1500; /* 2604 ints/sec */
4643 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4644 }
4645
4646 /* Set the VLAN ethernetype. */
4647 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4648
4649 /*
4650 * Set up the transmit control register; we start out with
4651 * a collision distance suitable for FDX, but update it whe
4652 * we resolve the media type.
4653 */
4654 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4655 | TCTL_CT(TX_COLLISION_THRESHOLD)
4656 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4657 if (sc->sc_type >= WM_T_82571)
4658 sc->sc_tctl |= TCTL_MULR;
4659 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4660
4661 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4662 /*
4663 * Write TDT after TCTL.EN is set.
4664 * See the document.
4665 */
4666 CSR_WRITE(sc, WMREG_TDT, 0);
4667 }
4668
4669 if (sc->sc_type == WM_T_80003) {
4670 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4671 reg &= ~TCTL_EXT_GCEX_MASK;
4672 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4673 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4674 }
4675
4676 /* Set the media. */
4677 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4678 goto out;
4679
4680 /* Configure for OS presence */
4681 wm_init_manageability(sc);
4682
4683 /*
4684 * Set up the receive control register; we actually program
4685 * the register when we set the receive filter. Use multicast
4686 * address offset type 0.
4687 *
4688 * Only the i82544 has the ability to strip the incoming
4689 * CRC, so we don't enable that feature.
4690 */
4691 sc->sc_mchash_type = 0;
4692 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4693 | RCTL_MO(sc->sc_mchash_type);
4694
4695 /*
4696 * The I350 has a bug where it always strips the CRC whether
4697 * asked to or not. So ask for stripped CRC here and cope in rxeof
4698 */
4699 if (sc->sc_type == WM_T_I350)
4700 sc->sc_rctl |= RCTL_SECRC;
4701
4702 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4703 && (ifp->if_mtu > ETHERMTU)) {
4704 sc->sc_rctl |= RCTL_LPE;
4705 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4706 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4707 }
4708
4709 if (MCLBYTES == 2048) {
4710 sc->sc_rctl |= RCTL_2k;
4711 } else {
4712 if (sc->sc_type >= WM_T_82543) {
4713 switch (MCLBYTES) {
4714 case 4096:
4715 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4716 break;
4717 case 8192:
4718 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4719 break;
4720 case 16384:
4721 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4722 break;
4723 default:
4724 panic("wm_init: MCLBYTES %d unsupported",
4725 MCLBYTES);
4726 break;
4727 }
4728 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4729 }
4730
4731 /* Set the receive filter. */
4732 wm_set_filter(sc);
4733
4734 /* On 575 and later set RDT only if RX enabled */
4735 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4736 for (i = 0; i < WM_NRXDESC; i++)
4737 WM_INIT_RXDESC(sc, i);
4738
4739 /* Start the one second link check clock. */
4740 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4741
4742 /* ...all done! */
4743 ifp->if_flags |= IFF_RUNNING;
4744 ifp->if_flags &= ~IFF_OACTIVE;
4745
4746 out:
4747 sc->sc_if_flags = ifp->if_flags;
4748 if (error)
4749 log(LOG_ERR, "%s: interface not running\n",
4750 device_xname(sc->sc_dev));
4751 return error;
4752 }
4753
4754 /*
4755 * wm_rxdrain:
4756 *
4757 * Drain the receive queue.
4758 */
4759 static void
4760 wm_rxdrain(struct wm_softc *sc)
4761 {
4762 struct wm_rxsoft *rxs;
4763 int i;
4764
4765 for (i = 0; i < WM_NRXDESC; i++) {
4766 rxs = &sc->sc_rxsoft[i];
4767 if (rxs->rxs_mbuf != NULL) {
4768 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4769 m_freem(rxs->rxs_mbuf);
4770 rxs->rxs_mbuf = NULL;
4771 }
4772 }
4773 }
4774
4775 /*
4776 * wm_stop: [ifnet interface function]
4777 *
4778 * Stop transmission on the interface.
4779 */
4780 static void
4781 wm_stop(struct ifnet *ifp, int disable)
4782 {
4783 struct wm_softc *sc = ifp->if_softc;
4784 struct wm_txsoft *txs;
4785 int i;
4786
4787 /* Stop the one second clock. */
4788 callout_stop(&sc->sc_tick_ch);
4789
4790 /* Stop the 82547 Tx FIFO stall check timer. */
4791 if (sc->sc_type == WM_T_82547)
4792 callout_stop(&sc->sc_txfifo_ch);
4793
4794 if (sc->sc_flags & WM_F_HAS_MII) {
4795 /* Down the MII. */
4796 mii_down(&sc->sc_mii);
4797 } else {
4798 #if 0
4799 /* Should we clear PHY's status properly? */
4800 wm_reset(sc);
4801 #endif
4802 }
4803
4804 /* Stop the transmit and receive processes. */
4805 CSR_WRITE(sc, WMREG_TCTL, 0);
4806 CSR_WRITE(sc, WMREG_RCTL, 0);
4807 sc->sc_rctl &= ~RCTL_EN;
4808
4809 /*
4810 * Clear the interrupt mask to ensure the device cannot assert its
4811 * interrupt line.
4812 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4813 * any currently pending or shared interrupt.
4814 */
4815 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4816 sc->sc_icr = 0;
4817
4818 /* Release any queued transmit buffers. */
4819 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4820 txs = &sc->sc_txsoft[i];
4821 if (txs->txs_mbuf != NULL) {
4822 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4823 m_freem(txs->txs_mbuf);
4824 txs->txs_mbuf = NULL;
4825 }
4826 }
4827
4828 /* Mark the interface as down and cancel the watchdog timer. */
4829 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4830 ifp->if_timer = 0;
4831
4832 if (disable)
4833 wm_rxdrain(sc);
4834
4835 #if 0 /* notyet */
4836 if (sc->sc_type >= WM_T_82544)
4837 CSR_WRITE(sc, WMREG_WUC, 0);
4838 #endif
4839 }
4840
4841 void
4842 wm_get_auto_rd_done(struct wm_softc *sc)
4843 {
4844 int i;
4845
4846 /* wait for eeprom to reload */
4847 switch (sc->sc_type) {
4848 case WM_T_82571:
4849 case WM_T_82572:
4850 case WM_T_82573:
4851 case WM_T_82574:
4852 case WM_T_82583:
4853 case WM_T_82575:
4854 case WM_T_82576:
4855 case WM_T_82580:
4856 case WM_T_82580ER:
4857 case WM_T_I350:
4858 case WM_T_80003:
4859 case WM_T_ICH8:
4860 case WM_T_ICH9:
4861 for (i = 0; i < 10; i++) {
4862 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4863 break;
4864 delay(1000);
4865 }
4866 if (i == 10) {
4867 log(LOG_ERR, "%s: auto read from eeprom failed to "
4868 "complete\n", device_xname(sc->sc_dev));
4869 }
4870 break;
4871 default:
4872 break;
4873 }
4874 }
4875
4876 void
4877 wm_lan_init_done(struct wm_softc *sc)
4878 {
4879 uint32_t reg = 0;
4880 int i;
4881
4882 /* wait for eeprom to reload */
4883 switch (sc->sc_type) {
4884 case WM_T_ICH10:
4885 case WM_T_PCH:
4886 case WM_T_PCH2:
4887 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4888 reg = CSR_READ(sc, WMREG_STATUS);
4889 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4890 break;
4891 delay(100);
4892 }
4893 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4894 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4895 "complete\n", device_xname(sc->sc_dev), __func__);
4896 }
4897 break;
4898 default:
4899 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4900 __func__);
4901 break;
4902 }
4903
4904 reg &= ~STATUS_LAN_INIT_DONE;
4905 CSR_WRITE(sc, WMREG_STATUS, reg);
4906 }
4907
4908 void
4909 wm_get_cfg_done(struct wm_softc *sc)
4910 {
4911 int mask;
4912 uint32_t reg;
4913 int i;
4914
4915 /* wait for eeprom to reload */
4916 switch (sc->sc_type) {
4917 case WM_T_82542_2_0:
4918 case WM_T_82542_2_1:
4919 /* null */
4920 break;
4921 case WM_T_82543:
4922 case WM_T_82544:
4923 case WM_T_82540:
4924 case WM_T_82545:
4925 case WM_T_82545_3:
4926 case WM_T_82546:
4927 case WM_T_82546_3:
4928 case WM_T_82541:
4929 case WM_T_82541_2:
4930 case WM_T_82547:
4931 case WM_T_82547_2:
4932 case WM_T_82573:
4933 case WM_T_82574:
4934 case WM_T_82583:
4935 /* generic */
4936 delay(10*1000);
4937 break;
4938 case WM_T_80003:
4939 case WM_T_82571:
4940 case WM_T_82572:
4941 case WM_T_82575:
4942 case WM_T_82576:
4943 case WM_T_82580:
4944 case WM_T_82580ER:
4945 case WM_T_I350:
4946 if (sc->sc_type == WM_T_82571) {
4947 /* Only 82571 shares port 0 */
4948 mask = EEMNGCTL_CFGDONE_0;
4949 } else
4950 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4951 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4952 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4953 break;
4954 delay(1000);
4955 }
4956 if (i >= WM_PHY_CFG_TIMEOUT) {
4957 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4958 device_xname(sc->sc_dev), __func__));
4959 }
4960 break;
4961 case WM_T_ICH8:
4962 case WM_T_ICH9:
4963 case WM_T_ICH10:
4964 case WM_T_PCH:
4965 case WM_T_PCH2:
4966 if (sc->sc_type >= WM_T_PCH) {
4967 reg = CSR_READ(sc, WMREG_STATUS);
4968 if ((reg & STATUS_PHYRA) != 0)
4969 CSR_WRITE(sc, WMREG_STATUS,
4970 reg & ~STATUS_PHYRA);
4971 }
4972 delay(10*1000);
4973 break;
4974 default:
4975 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4976 __func__);
4977 break;
4978 }
4979 }
4980
4981 /*
4982 * wm_acquire_eeprom:
4983 *
4984 * Perform the EEPROM handshake required on some chips.
4985 */
4986 static int
4987 wm_acquire_eeprom(struct wm_softc *sc)
4988 {
4989 uint32_t reg;
4990 int x;
4991 int ret = 0;
4992
4993 /* always success */
4994 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4995 return 0;
4996
4997 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4998 ret = wm_get_swfwhw_semaphore(sc);
4999 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5000 /* this will also do wm_get_swsm_semaphore() if needed */
5001 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5002 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5003 ret = wm_get_swsm_semaphore(sc);
5004 }
5005
5006 if (ret) {
5007 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5008 __func__);
5009 return 1;
5010 }
5011
5012 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5013 reg = CSR_READ(sc, WMREG_EECD);
5014
5015 /* Request EEPROM access. */
5016 reg |= EECD_EE_REQ;
5017 CSR_WRITE(sc, WMREG_EECD, reg);
5018
5019 /* ..and wait for it to be granted. */
5020 for (x = 0; x < 1000; x++) {
5021 reg = CSR_READ(sc, WMREG_EECD);
5022 if (reg & EECD_EE_GNT)
5023 break;
5024 delay(5);
5025 }
5026 if ((reg & EECD_EE_GNT) == 0) {
5027 aprint_error_dev(sc->sc_dev,
5028 "could not acquire EEPROM GNT\n");
5029 reg &= ~EECD_EE_REQ;
5030 CSR_WRITE(sc, WMREG_EECD, reg);
5031 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5032 wm_put_swfwhw_semaphore(sc);
5033 if (sc->sc_flags & WM_F_SWFW_SYNC)
5034 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5035 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5036 wm_put_swsm_semaphore(sc);
5037 return 1;
5038 }
5039 }
5040
5041 return 0;
5042 }
5043
5044 /*
5045 * wm_release_eeprom:
5046 *
5047 * Release the EEPROM mutex.
5048 */
5049 static void
5050 wm_release_eeprom(struct wm_softc *sc)
5051 {
5052 uint32_t reg;
5053
5054 /* always success */
5055 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5056 return;
5057
5058 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5059 reg = CSR_READ(sc, WMREG_EECD);
5060 reg &= ~EECD_EE_REQ;
5061 CSR_WRITE(sc, WMREG_EECD, reg);
5062 }
5063
5064 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5065 wm_put_swfwhw_semaphore(sc);
5066 if (sc->sc_flags & WM_F_SWFW_SYNC)
5067 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5068 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5069 wm_put_swsm_semaphore(sc);
5070 }
5071
5072 /*
5073 * wm_eeprom_sendbits:
5074 *
5075 * Send a series of bits to the EEPROM.
5076 */
5077 static void
5078 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5079 {
5080 uint32_t reg;
5081 int x;
5082
5083 reg = CSR_READ(sc, WMREG_EECD);
5084
5085 for (x = nbits; x > 0; x--) {
5086 if (bits & (1U << (x - 1)))
5087 reg |= EECD_DI;
5088 else
5089 reg &= ~EECD_DI;
5090 CSR_WRITE(sc, WMREG_EECD, reg);
5091 delay(2);
5092 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5093 delay(2);
5094 CSR_WRITE(sc, WMREG_EECD, reg);
5095 delay(2);
5096 }
5097 }
5098
5099 /*
5100 * wm_eeprom_recvbits:
5101 *
5102 * Receive a series of bits from the EEPROM.
5103 */
5104 static void
5105 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5106 {
5107 uint32_t reg, val;
5108 int x;
5109
5110 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5111
5112 val = 0;
5113 for (x = nbits; x > 0; x--) {
5114 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5115 delay(2);
5116 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5117 val |= (1U << (x - 1));
5118 CSR_WRITE(sc, WMREG_EECD, reg);
5119 delay(2);
5120 }
5121 *valp = val;
5122 }
5123
5124 /*
5125 * wm_read_eeprom_uwire:
5126 *
5127 * Read a word from the EEPROM using the MicroWire protocol.
5128 */
5129 static int
5130 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5131 {
5132 uint32_t reg, val;
5133 int i;
5134
5135 for (i = 0; i < wordcnt; i++) {
5136 /* Clear SK and DI. */
5137 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5138 CSR_WRITE(sc, WMREG_EECD, reg);
5139
5140 /*
5141 * XXX: workaround for a bug in qemu-0.12.x and prior
5142 * and Xen.
5143 *
5144 * We use this workaround only for 82540 because qemu's
5145 * e1000 act as 82540.
5146 */
5147 if (sc->sc_type == WM_T_82540) {
5148 reg |= EECD_SK;
5149 CSR_WRITE(sc, WMREG_EECD, reg);
5150 reg &= ~EECD_SK;
5151 CSR_WRITE(sc, WMREG_EECD, reg);
5152 delay(2);
5153 }
5154 /* XXX: end of workaround */
5155
5156 /* Set CHIP SELECT. */
5157 reg |= EECD_CS;
5158 CSR_WRITE(sc, WMREG_EECD, reg);
5159 delay(2);
5160
5161 /* Shift in the READ command. */
5162 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5163
5164 /* Shift in address. */
5165 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5166
5167 /* Shift out the data. */
5168 wm_eeprom_recvbits(sc, &val, 16);
5169 data[i] = val & 0xffff;
5170
5171 /* Clear CHIP SELECT. */
5172 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5173 CSR_WRITE(sc, WMREG_EECD, reg);
5174 delay(2);
5175 }
5176
5177 return 0;
5178 }
5179
5180 /*
5181 * wm_spi_eeprom_ready:
5182 *
5183 * Wait for a SPI EEPROM to be ready for commands.
5184 */
5185 static int
5186 wm_spi_eeprom_ready(struct wm_softc *sc)
5187 {
5188 uint32_t val;
5189 int usec;
5190
5191 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5192 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5193 wm_eeprom_recvbits(sc, &val, 8);
5194 if ((val & SPI_SR_RDY) == 0)
5195 break;
5196 }
5197 if (usec >= SPI_MAX_RETRIES) {
5198 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5199 return 1;
5200 }
5201 return 0;
5202 }
5203
5204 /*
5205 * wm_read_eeprom_spi:
5206 *
5207 * Read a work from the EEPROM using the SPI protocol.
5208 */
5209 static int
5210 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5211 {
5212 uint32_t reg, val;
5213 int i;
5214 uint8_t opc;
5215
5216 /* Clear SK and CS. */
5217 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5218 CSR_WRITE(sc, WMREG_EECD, reg);
5219 delay(2);
5220
5221 if (wm_spi_eeprom_ready(sc))
5222 return 1;
5223
5224 /* Toggle CS to flush commands. */
5225 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5226 delay(2);
5227 CSR_WRITE(sc, WMREG_EECD, reg);
5228 delay(2);
5229
5230 opc = SPI_OPC_READ;
5231 if (sc->sc_ee_addrbits == 8 && word >= 128)
5232 opc |= SPI_OPC_A8;
5233
5234 wm_eeprom_sendbits(sc, opc, 8);
5235 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5236
5237 for (i = 0; i < wordcnt; i++) {
5238 wm_eeprom_recvbits(sc, &val, 16);
5239 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5240 }
5241
5242 /* Raise CS and clear SK. */
5243 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5244 CSR_WRITE(sc, WMREG_EECD, reg);
5245 delay(2);
5246
5247 return 0;
5248 }
5249
5250 #define EEPROM_CHECKSUM 0xBABA
5251 #define EEPROM_SIZE 0x0040
5252
5253 /*
5254 * wm_validate_eeprom_checksum
5255 *
5256 * The checksum is defined as the sum of the first 64 (16 bit) words.
5257 */
5258 static int
5259 wm_validate_eeprom_checksum(struct wm_softc *sc)
5260 {
5261 uint16_t checksum;
5262 uint16_t eeprom_data;
5263 int i;
5264
5265 checksum = 0;
5266
5267 #ifdef WM_DEBUG
5268 /* Dump EEPROM image for debug */
5269 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5270 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5271 || (sc->sc_type == WM_T_PCH2)) {
5272 wm_read_eeprom(sc, 0x19, 1, &eeprom_data);
5273 if ((eeprom_data & 0x40) == 0) {
5274 DPRINTF(WM_DEBUG_NVM,("%s: NVM need to be updated\n",
5275 device_xname(sc->sc_dev)));
5276 }
5277 }
5278
5279 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5280 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5281 for (i = 0; i < EEPROM_SIZE; i++) {
5282 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5283 printf("XX ");
5284 else
5285 printf("%04x ", eeprom_data);
5286 if (i % 8 == 7)
5287 printf("\n");
5288 }
5289 }
5290
5291 #endif /* WM_DEBUG */
5292
5293 for (i = 0; i < EEPROM_SIZE; i++) {
5294 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5295 return 1;
5296 checksum += eeprom_data;
5297 }
5298
5299 if (checksum != (uint16_t) EEPROM_CHECKSUM)
5300 return 1;
5301
5302 return 0;
5303 }
5304
5305 /*
5306 * wm_read_eeprom:
5307 *
5308 * Read data from the serial EEPROM.
5309 */
5310 static int
5311 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5312 {
5313 int rv;
5314
5315 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5316 return 1;
5317
5318 if (wm_acquire_eeprom(sc))
5319 return 1;
5320
5321 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5322 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5323 || (sc->sc_type == WM_T_PCH2))
5324 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5325 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5326 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5327 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5328 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5329 else
5330 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5331
5332 wm_release_eeprom(sc);
5333 return rv;
5334 }
5335
5336 static int
5337 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5338 uint16_t *data)
5339 {
5340 int i, eerd = 0;
5341 int error = 0;
5342
5343 for (i = 0; i < wordcnt; i++) {
5344 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5345
5346 CSR_WRITE(sc, WMREG_EERD, eerd);
5347 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5348 if (error != 0)
5349 break;
5350
5351 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5352 }
5353
5354 return error;
5355 }
5356
5357 static int
5358 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5359 {
5360 uint32_t attempts = 100000;
5361 uint32_t i, reg = 0;
5362 int32_t done = -1;
5363
5364 for (i = 0; i < attempts; i++) {
5365 reg = CSR_READ(sc, rw);
5366
5367 if (reg & EERD_DONE) {
5368 done = 0;
5369 break;
5370 }
5371 delay(5);
5372 }
5373
5374 return done;
5375 }
5376
5377 static int
5378 wm_check_alt_mac_addr(struct wm_softc *sc)
5379 {
5380 uint16_t myea[ETHER_ADDR_LEN / 2];
5381 uint16_t offset = EEPROM_OFF_MACADDR;
5382
5383 /* Try to read alternative MAC address pointer */
5384 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5385 return -1;
5386
5387 /* Check pointer */
5388 if (offset == 0xffff)
5389 return -1;
5390
5391 /*
5392 * Check whether alternative MAC address is valid or not.
5393 * Some cards have non 0xffff pointer but those don't use
5394 * alternative MAC address in reality.
5395 *
5396 * Check whether the broadcast bit is set or not.
5397 */
5398 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5399 if (((myea[0] & 0xff) & 0x01) == 0)
5400 return 0; /* found! */
5401
5402 /* not found */
5403 return -1;
5404 }
5405
5406 static int
5407 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5408 {
5409 uint16_t myea[ETHER_ADDR_LEN / 2];
5410 uint16_t offset = EEPROM_OFF_MACADDR;
5411 int do_invert = 0;
5412
5413 switch (sc->sc_type) {
5414 case WM_T_82580:
5415 case WM_T_82580ER:
5416 case WM_T_I350:
5417 switch (sc->sc_funcid) {
5418 case 0:
5419 /* default value (== EEPROM_OFF_MACADDR) */
5420 break;
5421 case 1:
5422 offset = EEPROM_OFF_LAN1;
5423 break;
5424 case 2:
5425 offset = EEPROM_OFF_LAN2;
5426 break;
5427 case 3:
5428 offset = EEPROM_OFF_LAN3;
5429 break;
5430 default:
5431 goto bad;
5432 /* NOTREACHED */
5433 break;
5434 }
5435 break;
5436 case WM_T_82571:
5437 case WM_T_82575:
5438 case WM_T_82576:
5439 case WM_T_80003:
5440 if (wm_check_alt_mac_addr(sc) != 0) {
5441 /* reset the offset to LAN0 */
5442 offset = EEPROM_OFF_MACADDR;
5443 if ((sc->sc_funcid & 0x01) == 1)
5444 do_invert = 1;
5445 goto do_read;
5446 }
5447 switch (sc->sc_funcid) {
5448 case 0:
5449 /*
5450 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5451 * itself.
5452 */
5453 break;
5454 case 1:
5455 offset += EEPROM_OFF_MACADDR_LAN1;
5456 break;
5457 case 2:
5458 offset += EEPROM_OFF_MACADDR_LAN2;
5459 break;
5460 case 3:
5461 offset += EEPROM_OFF_MACADDR_LAN3;
5462 break;
5463 default:
5464 goto bad;
5465 /* NOTREACHED */
5466 break;
5467 }
5468 break;
5469 default:
5470 if ((sc->sc_funcid & 0x01) == 1)
5471 do_invert = 1;
5472 break;
5473 }
5474
5475 do_read:
5476 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5477 myea) != 0) {
5478 goto bad;
5479 }
5480
5481 enaddr[0] = myea[0] & 0xff;
5482 enaddr[1] = myea[0] >> 8;
5483 enaddr[2] = myea[1] & 0xff;
5484 enaddr[3] = myea[1] >> 8;
5485 enaddr[4] = myea[2] & 0xff;
5486 enaddr[5] = myea[2] >> 8;
5487
5488 /*
5489 * Toggle the LSB of the MAC address on the second port
5490 * of some dual port cards.
5491 */
5492 if (do_invert != 0)
5493 enaddr[5] ^= 1;
5494
5495 return 0;
5496
5497 bad:
5498 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5499
5500 return -1;
5501 }
5502
5503 /*
5504 * wm_add_rxbuf:
5505 *
5506 * Add a receive buffer to the indiciated descriptor.
5507 */
5508 static int
5509 wm_add_rxbuf(struct wm_softc *sc, int idx)
5510 {
5511 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5512 struct mbuf *m;
5513 int error;
5514
5515 MGETHDR(m, M_DONTWAIT, MT_DATA);
5516 if (m == NULL)
5517 return ENOBUFS;
5518
5519 MCLGET(m, M_DONTWAIT);
5520 if ((m->m_flags & M_EXT) == 0) {
5521 m_freem(m);
5522 return ENOBUFS;
5523 }
5524
5525 if (rxs->rxs_mbuf != NULL)
5526 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5527
5528 rxs->rxs_mbuf = m;
5529
5530 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5531 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5532 BUS_DMA_READ|BUS_DMA_NOWAIT);
5533 if (error) {
5534 /* XXX XXX XXX */
5535 aprint_error_dev(sc->sc_dev,
5536 "unable to load rx DMA map %d, error = %d\n",
5537 idx, error);
5538 panic("wm_add_rxbuf");
5539 }
5540
5541 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5542 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5543
5544 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5545 if ((sc->sc_rctl & RCTL_EN) != 0)
5546 WM_INIT_RXDESC(sc, idx);
5547 } else
5548 WM_INIT_RXDESC(sc, idx);
5549
5550 return 0;
5551 }
5552
5553 /*
5554 * wm_set_ral:
5555 *
5556 * Set an entery in the receive address list.
5557 */
5558 static void
5559 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5560 {
5561 uint32_t ral_lo, ral_hi;
5562
5563 if (enaddr != NULL) {
5564 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5565 (enaddr[3] << 24);
5566 ral_hi = enaddr[4] | (enaddr[5] << 8);
5567 ral_hi |= RAL_AV;
5568 } else {
5569 ral_lo = 0;
5570 ral_hi = 0;
5571 }
5572
5573 if (sc->sc_type >= WM_T_82544) {
5574 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5575 ral_lo);
5576 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5577 ral_hi);
5578 } else {
5579 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5580 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5581 }
5582 }
5583
5584 /*
5585 * wm_mchash:
5586 *
5587 * Compute the hash of the multicast address for the 4096-bit
5588 * multicast filter.
5589 */
5590 static uint32_t
5591 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5592 {
5593 static const int lo_shift[4] = { 4, 3, 2, 0 };
5594 static const int hi_shift[4] = { 4, 5, 6, 8 };
5595 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5596 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5597 uint32_t hash;
5598
5599 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5600 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5601 || (sc->sc_type == WM_T_PCH2)) {
5602 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5603 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5604 return (hash & 0x3ff);
5605 }
5606 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5607 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5608
5609 return (hash & 0xfff);
5610 }
5611
5612 /*
5613 * wm_set_filter:
5614 *
5615 * Set up the receive filter.
5616 */
5617 static void
5618 wm_set_filter(struct wm_softc *sc)
5619 {
5620 struct ethercom *ec = &sc->sc_ethercom;
5621 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5622 struct ether_multi *enm;
5623 struct ether_multistep step;
5624 bus_addr_t mta_reg;
5625 uint32_t hash, reg, bit;
5626 int i, size;
5627
5628 if (sc->sc_type >= WM_T_82544)
5629 mta_reg = WMREG_CORDOVA_MTA;
5630 else
5631 mta_reg = WMREG_MTA;
5632
5633 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5634
5635 if (ifp->if_flags & IFF_BROADCAST)
5636 sc->sc_rctl |= RCTL_BAM;
5637 if (ifp->if_flags & IFF_PROMISC) {
5638 sc->sc_rctl |= RCTL_UPE;
5639 goto allmulti;
5640 }
5641
5642 /*
5643 * Set the station address in the first RAL slot, and
5644 * clear the remaining slots.
5645 */
5646 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5647 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5648 || (sc->sc_type == WM_T_PCH2))
5649 size = WM_ICH8_RAL_TABSIZE;
5650 else
5651 size = WM_RAL_TABSIZE;
5652 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5653 for (i = 1; i < size; i++)
5654 wm_set_ral(sc, NULL, i);
5655
5656 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5657 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5658 || (sc->sc_type == WM_T_PCH2))
5659 size = WM_ICH8_MC_TABSIZE;
5660 else
5661 size = WM_MC_TABSIZE;
5662 /* Clear out the multicast table. */
5663 for (i = 0; i < size; i++)
5664 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5665
5666 ETHER_FIRST_MULTI(step, ec, enm);
5667 while (enm != NULL) {
5668 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5669 /*
5670 * We must listen to a range of multicast addresses.
5671 * For now, just accept all multicasts, rather than
5672 * trying to set only those filter bits needed to match
5673 * the range. (At this time, the only use of address
5674 * ranges is for IP multicast routing, for which the
5675 * range is big enough to require all bits set.)
5676 */
5677 goto allmulti;
5678 }
5679
5680 hash = wm_mchash(sc, enm->enm_addrlo);
5681
5682 reg = (hash >> 5);
5683 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5684 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5685 || (sc->sc_type == WM_T_PCH2))
5686 reg &= 0x1f;
5687 else
5688 reg &= 0x7f;
5689 bit = hash & 0x1f;
5690
5691 hash = CSR_READ(sc, mta_reg + (reg << 2));
5692 hash |= 1U << bit;
5693
5694 /* XXX Hardware bug?? */
5695 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5696 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5697 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5698 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5699 } else
5700 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5701
5702 ETHER_NEXT_MULTI(step, enm);
5703 }
5704
5705 ifp->if_flags &= ~IFF_ALLMULTI;
5706 goto setit;
5707
5708 allmulti:
5709 ifp->if_flags |= IFF_ALLMULTI;
5710 sc->sc_rctl |= RCTL_MPE;
5711
5712 setit:
5713 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5714 }
5715
5716 /*
5717 * wm_tbi_mediainit:
5718 *
5719 * Initialize media for use on 1000BASE-X devices.
5720 */
5721 static void
5722 wm_tbi_mediainit(struct wm_softc *sc)
5723 {
5724 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5725 const char *sep = "";
5726
5727 if (sc->sc_type < WM_T_82543)
5728 sc->sc_tipg = TIPG_WM_DFLT;
5729 else
5730 sc->sc_tipg = TIPG_LG_DFLT;
5731
5732 sc->sc_tbi_anegticks = 5;
5733
5734 /* Initialize our media structures */
5735 sc->sc_mii.mii_ifp = ifp;
5736
5737 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5738 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5739 wm_tbi_mediastatus);
5740
5741 /*
5742 * SWD Pins:
5743 *
5744 * 0 = Link LED (output)
5745 * 1 = Loss Of Signal (input)
5746 */
5747 sc->sc_ctrl |= CTRL_SWDPIO(0);
5748 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5749
5750 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5751
5752 #define ADD(ss, mm, dd) \
5753 do { \
5754 aprint_normal("%s%s", sep, ss); \
5755 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5756 sep = ", "; \
5757 } while (/*CONSTCOND*/0)
5758
5759 aprint_normal_dev(sc->sc_dev, "");
5760 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5761 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5762 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5763 aprint_normal("\n");
5764
5765 #undef ADD
5766
5767 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5768 }
5769
5770 /*
5771 * wm_tbi_mediastatus: [ifmedia interface function]
5772 *
5773 * Get the current interface media status on a 1000BASE-X device.
5774 */
5775 static void
5776 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5777 {
5778 struct wm_softc *sc = ifp->if_softc;
5779 uint32_t ctrl, status;
5780
5781 ifmr->ifm_status = IFM_AVALID;
5782 ifmr->ifm_active = IFM_ETHER;
5783
5784 status = CSR_READ(sc, WMREG_STATUS);
5785 if ((status & STATUS_LU) == 0) {
5786 ifmr->ifm_active |= IFM_NONE;
5787 return;
5788 }
5789
5790 ifmr->ifm_status |= IFM_ACTIVE;
5791 ifmr->ifm_active |= IFM_1000_SX;
5792 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5793 ifmr->ifm_active |= IFM_FDX;
5794 ctrl = CSR_READ(sc, WMREG_CTRL);
5795 if (ctrl & CTRL_RFCE)
5796 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5797 if (ctrl & CTRL_TFCE)
5798 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5799 }
5800
5801 /*
5802 * wm_tbi_mediachange: [ifmedia interface function]
5803 *
5804 * Set hardware to newly-selected media on a 1000BASE-X device.
5805 */
5806 static int
5807 wm_tbi_mediachange(struct ifnet *ifp)
5808 {
5809 struct wm_softc *sc = ifp->if_softc;
5810 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5811 uint32_t status;
5812 int i;
5813
5814 sc->sc_txcw = 0;
5815 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5816 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5817 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5818 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5819 sc->sc_txcw |= TXCW_ANE;
5820 } else {
5821 /*
5822 * If autonegotiation is turned off, force link up and turn on
5823 * full duplex
5824 */
5825 sc->sc_txcw &= ~TXCW_ANE;
5826 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5827 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5828 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5829 delay(1000);
5830 }
5831
5832 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5833 device_xname(sc->sc_dev),sc->sc_txcw));
5834 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5835 delay(10000);
5836
5837 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5838 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5839
5840 /*
5841 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5842 * optics detect a signal, 0 if they don't.
5843 */
5844 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5845 /* Have signal; wait for the link to come up. */
5846
5847 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5848 /*
5849 * Reset the link, and let autonegotiation do its thing
5850 */
5851 sc->sc_ctrl |= CTRL_LRST;
5852 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5853 delay(1000);
5854 sc->sc_ctrl &= ~CTRL_LRST;
5855 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5856 delay(1000);
5857 }
5858
5859 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5860 delay(10000);
5861 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5862 break;
5863 }
5864
5865 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5866 device_xname(sc->sc_dev),i));
5867
5868 status = CSR_READ(sc, WMREG_STATUS);
5869 DPRINTF(WM_DEBUG_LINK,
5870 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5871 device_xname(sc->sc_dev),status, STATUS_LU));
5872 if (status & STATUS_LU) {
5873 /* Link is up. */
5874 DPRINTF(WM_DEBUG_LINK,
5875 ("%s: LINK: set media -> link up %s\n",
5876 device_xname(sc->sc_dev),
5877 (status & STATUS_FD) ? "FDX" : "HDX"));
5878
5879 /*
5880 * NOTE: CTRL will update TFCE and RFCE automatically,
5881 * so we should update sc->sc_ctrl
5882 */
5883 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5884 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5885 sc->sc_fcrtl &= ~FCRTL_XONE;
5886 if (status & STATUS_FD)
5887 sc->sc_tctl |=
5888 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5889 else
5890 sc->sc_tctl |=
5891 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5892 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5893 sc->sc_fcrtl |= FCRTL_XONE;
5894 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5895 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5896 WMREG_OLD_FCRTL : WMREG_FCRTL,
5897 sc->sc_fcrtl);
5898 sc->sc_tbi_linkup = 1;
5899 } else {
5900 if (i == WM_LINKUP_TIMEOUT)
5901 wm_check_for_link(sc);
5902 /* Link is down. */
5903 DPRINTF(WM_DEBUG_LINK,
5904 ("%s: LINK: set media -> link down\n",
5905 device_xname(sc->sc_dev)));
5906 sc->sc_tbi_linkup = 0;
5907 }
5908 } else {
5909 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5910 device_xname(sc->sc_dev)));
5911 sc->sc_tbi_linkup = 0;
5912 }
5913
5914 wm_tbi_set_linkled(sc);
5915
5916 return 0;
5917 }
5918
5919 /*
5920 * wm_tbi_set_linkled:
5921 *
5922 * Update the link LED on 1000BASE-X devices.
5923 */
5924 static void
5925 wm_tbi_set_linkled(struct wm_softc *sc)
5926 {
5927
5928 if (sc->sc_tbi_linkup)
5929 sc->sc_ctrl |= CTRL_SWDPIN(0);
5930 else
5931 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5932
5933 /* 82540 or newer devices are active low */
5934 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5935
5936 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5937 }
5938
5939 /*
5940 * wm_tbi_check_link:
5941 *
5942 * Check the link on 1000BASE-X devices.
5943 */
5944 static void
5945 wm_tbi_check_link(struct wm_softc *sc)
5946 {
5947 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5948 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5949 uint32_t rxcw, ctrl, status;
5950
5951 status = CSR_READ(sc, WMREG_STATUS);
5952
5953 rxcw = CSR_READ(sc, WMREG_RXCW);
5954 ctrl = CSR_READ(sc, WMREG_CTRL);
5955
5956 /* set link status */
5957 if ((status & STATUS_LU) == 0) {
5958 DPRINTF(WM_DEBUG_LINK,
5959 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5960 sc->sc_tbi_linkup = 0;
5961 } else if (sc->sc_tbi_linkup == 0) {
5962 DPRINTF(WM_DEBUG_LINK,
5963 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5964 (status & STATUS_FD) ? "FDX" : "HDX"));
5965 sc->sc_tbi_linkup = 1;
5966 }
5967
5968 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5969 && ((status & STATUS_LU) == 0)) {
5970 sc->sc_tbi_linkup = 0;
5971 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5972 /* RXCFG storm! */
5973 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5974 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5975 wm_init(ifp);
5976 ifp->if_start(ifp);
5977 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5978 /* If the timer expired, retry autonegotiation */
5979 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5980 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5981 sc->sc_tbi_ticks = 0;
5982 /*
5983 * Reset the link, and let autonegotiation do
5984 * its thing
5985 */
5986 sc->sc_ctrl |= CTRL_LRST;
5987 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5988 delay(1000);
5989 sc->sc_ctrl &= ~CTRL_LRST;
5990 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5991 delay(1000);
5992 CSR_WRITE(sc, WMREG_TXCW,
5993 sc->sc_txcw & ~TXCW_ANE);
5994 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5995 }
5996 }
5997 }
5998
5999 wm_tbi_set_linkled(sc);
6000 }
6001
6002 /*
6003 * wm_gmii_reset:
6004 *
6005 * Reset the PHY.
6006 */
6007 static void
6008 wm_gmii_reset(struct wm_softc *sc)
6009 {
6010 uint32_t reg;
6011 int rv;
6012
6013 /* get phy semaphore */
6014 switch (sc->sc_type) {
6015 case WM_T_82571:
6016 case WM_T_82572:
6017 case WM_T_82573:
6018 case WM_T_82574:
6019 case WM_T_82583:
6020 /* XXX should get sw semaphore, too */
6021 rv = wm_get_swsm_semaphore(sc);
6022 break;
6023 case WM_T_82575:
6024 case WM_T_82576:
6025 case WM_T_82580:
6026 case WM_T_82580ER:
6027 case WM_T_I350:
6028 case WM_T_80003:
6029 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6030 break;
6031 case WM_T_ICH8:
6032 case WM_T_ICH9:
6033 case WM_T_ICH10:
6034 case WM_T_PCH:
6035 case WM_T_PCH2:
6036 rv = wm_get_swfwhw_semaphore(sc);
6037 break;
6038 default:
6039 /* nothing to do*/
6040 rv = 0;
6041 break;
6042 }
6043 if (rv != 0) {
6044 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6045 __func__);
6046 return;
6047 }
6048
6049 switch (sc->sc_type) {
6050 case WM_T_82542_2_0:
6051 case WM_T_82542_2_1:
6052 /* null */
6053 break;
6054 case WM_T_82543:
6055 /*
6056 * With 82543, we need to force speed and duplex on the MAC
6057 * equal to what the PHY speed and duplex configuration is.
6058 * In addition, we need to perform a hardware reset on the PHY
6059 * to take it out of reset.
6060 */
6061 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6062 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6063
6064 /* The PHY reset pin is active-low. */
6065 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6066 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6067 CTRL_EXT_SWDPIN(4));
6068 reg |= CTRL_EXT_SWDPIO(4);
6069
6070 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6071 delay(10*1000);
6072
6073 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6074 delay(150);
6075 #if 0
6076 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6077 #endif
6078 delay(20*1000); /* XXX extra delay to get PHY ID? */
6079 break;
6080 case WM_T_82544: /* reset 10000us */
6081 case WM_T_82540:
6082 case WM_T_82545:
6083 case WM_T_82545_3:
6084 case WM_T_82546:
6085 case WM_T_82546_3:
6086 case WM_T_82541:
6087 case WM_T_82541_2:
6088 case WM_T_82547:
6089 case WM_T_82547_2:
6090 case WM_T_82571: /* reset 100us */
6091 case WM_T_82572:
6092 case WM_T_82573:
6093 case WM_T_82574:
6094 case WM_T_82575:
6095 case WM_T_82576:
6096 case WM_T_82580:
6097 case WM_T_82580ER:
6098 case WM_T_I350:
6099 case WM_T_82583:
6100 case WM_T_80003:
6101 /* generic reset */
6102 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6103 delay(20000);
6104 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6105 delay(20000);
6106
6107 if ((sc->sc_type == WM_T_82541)
6108 || (sc->sc_type == WM_T_82541_2)
6109 || (sc->sc_type == WM_T_82547)
6110 || (sc->sc_type == WM_T_82547_2)) {
6111 /* workaround for igp are done in igp_reset() */
6112 /* XXX add code to set LED after phy reset */
6113 }
6114 break;
6115 case WM_T_ICH8:
6116 case WM_T_ICH9:
6117 case WM_T_ICH10:
6118 case WM_T_PCH:
6119 case WM_T_PCH2:
6120 /* generic reset */
6121 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6122 delay(100);
6123 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6124 delay(150);
6125 break;
6126 default:
6127 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6128 __func__);
6129 break;
6130 }
6131
6132 /* release PHY semaphore */
6133 switch (sc->sc_type) {
6134 case WM_T_82571:
6135 case WM_T_82572:
6136 case WM_T_82573:
6137 case WM_T_82574:
6138 case WM_T_82583:
6139 /* XXX should put sw semaphore, too */
6140 wm_put_swsm_semaphore(sc);
6141 break;
6142 case WM_T_82575:
6143 case WM_T_82576:
6144 case WM_T_82580:
6145 case WM_T_82580ER:
6146 case WM_T_I350:
6147 case WM_T_80003:
6148 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6149 break;
6150 case WM_T_ICH8:
6151 case WM_T_ICH9:
6152 case WM_T_ICH10:
6153 case WM_T_PCH:
6154 case WM_T_PCH2:
6155 wm_put_swfwhw_semaphore(sc);
6156 break;
6157 default:
6158 /* nothing to do*/
6159 rv = 0;
6160 break;
6161 }
6162
6163 /* get_cfg_done */
6164 wm_get_cfg_done(sc);
6165
6166 /* extra setup */
6167 switch (sc->sc_type) {
6168 case WM_T_82542_2_0:
6169 case WM_T_82542_2_1:
6170 case WM_T_82543:
6171 case WM_T_82544:
6172 case WM_T_82540:
6173 case WM_T_82545:
6174 case WM_T_82545_3:
6175 case WM_T_82546:
6176 case WM_T_82546_3:
6177 case WM_T_82541_2:
6178 case WM_T_82547_2:
6179 case WM_T_82571:
6180 case WM_T_82572:
6181 case WM_T_82573:
6182 case WM_T_82574:
6183 case WM_T_82575:
6184 case WM_T_82576:
6185 case WM_T_82580:
6186 case WM_T_82580ER:
6187 case WM_T_I350:
6188 case WM_T_82583:
6189 case WM_T_80003:
6190 /* null */
6191 break;
6192 case WM_T_82541:
6193 case WM_T_82547:
6194 /* XXX Configure actively LED after PHY reset */
6195 break;
6196 case WM_T_ICH8:
6197 case WM_T_ICH9:
6198 case WM_T_ICH10:
6199 case WM_T_PCH:
6200 case WM_T_PCH2:
6201 /* Allow time for h/w to get to a quiescent state afer reset */
6202 delay(10*1000);
6203
6204 if (sc->sc_type == WM_T_PCH)
6205 wm_hv_phy_workaround_ich8lan(sc);
6206
6207 if (sc->sc_type == WM_T_PCH2)
6208 wm_lv_phy_workaround_ich8lan(sc);
6209
6210 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6211 /*
6212 * dummy read to clear the phy wakeup bit after lcd
6213 * reset
6214 */
6215 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6216 }
6217
6218 /*
6219 * XXX Configure the LCD with th extended configuration region
6220 * in NVM
6221 */
6222
6223 /* Configure the LCD with the OEM bits in NVM */
6224 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6225 /*
6226 * Disable LPLU.
6227 * XXX It seems that 82567 has LPLU, too.
6228 */
6229 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6230 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6231 reg |= HV_OEM_BITS_ANEGNOW;
6232 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6233 }
6234 break;
6235 default:
6236 panic("%s: unknown type\n", __func__);
6237 break;
6238 }
6239 }
6240
6241 /*
6242 * wm_gmii_mediainit:
6243 *
6244 * Initialize media for use on 1000BASE-T devices.
6245 */
6246 static void
6247 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6248 {
6249 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6250
6251 /* We have MII. */
6252 sc->sc_flags |= WM_F_HAS_MII;
6253
6254 if (sc->sc_type == WM_T_80003)
6255 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6256 else
6257 sc->sc_tipg = TIPG_1000T_DFLT;
6258
6259 /*
6260 * Let the chip set speed/duplex on its own based on
6261 * signals from the PHY.
6262 * XXXbouyer - I'm not sure this is right for the 80003,
6263 * the em driver only sets CTRL_SLU here - but it seems to work.
6264 */
6265 sc->sc_ctrl |= CTRL_SLU;
6266 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6267
6268 /* Initialize our media structures and probe the GMII. */
6269 sc->sc_mii.mii_ifp = ifp;
6270
6271 switch (prodid) {
6272 case PCI_PRODUCT_INTEL_PCH_M_LM:
6273 case PCI_PRODUCT_INTEL_PCH_M_LC:
6274 /* 82577 */
6275 sc->sc_phytype = WMPHY_82577;
6276 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6277 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6278 break;
6279 case PCI_PRODUCT_INTEL_PCH_D_DM:
6280 case PCI_PRODUCT_INTEL_PCH_D_DC:
6281 /* 82578 */
6282 sc->sc_phytype = WMPHY_82578;
6283 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6284 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6285 break;
6286 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6287 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6288 /* 82578 */
6289 sc->sc_phytype = WMPHY_82579;
6290 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6291 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6292 break;
6293 case PCI_PRODUCT_INTEL_82801I_BM:
6294 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6295 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6296 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6297 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6298 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6299 /* 82567 */
6300 sc->sc_phytype = WMPHY_BM;
6301 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6302 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6303 break;
6304 default:
6305 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6306 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
6307 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
6308 } else if (sc->sc_type >= WM_T_80003) {
6309 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
6310 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
6311 } else if (sc->sc_type >= WM_T_82544) {
6312 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
6313 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
6314 } else {
6315 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
6316 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
6317 }
6318 break;
6319 }
6320 sc->sc_mii.mii_statchg = wm_gmii_statchg;
6321
6322 wm_gmii_reset(sc);
6323
6324 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6325 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
6326 wm_gmii_mediastatus);
6327
6328 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6329 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6330 || (sc->sc_type == WM_T_I350)) {
6331 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6332 /* Attach only one port */
6333 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6334 MII_OFFSET_ANY, MIIF_DOPAUSE);
6335 } else {
6336 int i;
6337 uint32_t ctrl_ext;
6338
6339 /* Power on sgmii phy if it is disabled */
6340 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6341 CSR_WRITE(sc, WMREG_CTRL_EXT,
6342 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6343 CSR_WRITE_FLUSH(sc);
6344 delay(300*1000); /* XXX too long */
6345
6346 /* from 1 to 8 */
6347 for (i = 1; i < 8; i++)
6348 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6349 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6350
6351 /* restore previous sfp cage power state */
6352 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6353 }
6354 } else {
6355 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6356 MII_OFFSET_ANY, MIIF_DOPAUSE);
6357 }
6358
6359 if ((sc->sc_type == WM_T_PCH2) &&
6360 (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
6361 wm_set_mdio_slow_mode_hv(sc);
6362 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6363 MII_OFFSET_ANY, MIIF_DOPAUSE);
6364 }
6365
6366 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6367 /* if failed, retry with *_bm_* */
6368 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6369 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6370
6371 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6372 MII_OFFSET_ANY, MIIF_DOPAUSE);
6373 }
6374 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6375 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6376 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
6377 sc->sc_phytype = WMPHY_NONE;
6378 } else {
6379 /* Check PHY type */
6380 uint32_t model;
6381 struct mii_softc *child;
6382
6383 child = LIST_FIRST(&sc->sc_mii.mii_phys);
6384 if (device_is_a(child->mii_dev, "igphy")) {
6385 struct igphy_softc *isc = (struct igphy_softc *)child;
6386
6387 model = isc->sc_mii.mii_mpd_model;
6388 if (model == MII_MODEL_yyINTEL_I82566)
6389 sc->sc_phytype = WMPHY_IGP_3;
6390 }
6391
6392 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
6393 }
6394 }
6395
6396 /*
6397 * wm_gmii_mediastatus: [ifmedia interface function]
6398 *
6399 * Get the current interface media status on a 1000BASE-T device.
6400 */
6401 static void
6402 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6403 {
6404 struct wm_softc *sc = ifp->if_softc;
6405
6406 ether_mediastatus(ifp, ifmr);
6407 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6408 | sc->sc_flowflags;
6409 }
6410
6411 /*
6412 * wm_gmii_mediachange: [ifmedia interface function]
6413 *
6414 * Set hardware to newly-selected media on a 1000BASE-T device.
6415 */
6416 static int
6417 wm_gmii_mediachange(struct ifnet *ifp)
6418 {
6419 struct wm_softc *sc = ifp->if_softc;
6420 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6421 int rc;
6422
6423 if ((ifp->if_flags & IFF_UP) == 0)
6424 return 0;
6425
6426 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6427 sc->sc_ctrl |= CTRL_SLU;
6428 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6429 || (sc->sc_type > WM_T_82543)) {
6430 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6431 } else {
6432 sc->sc_ctrl &= ~CTRL_ASDE;
6433 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6434 if (ife->ifm_media & IFM_FDX)
6435 sc->sc_ctrl |= CTRL_FD;
6436 switch (IFM_SUBTYPE(ife->ifm_media)) {
6437 case IFM_10_T:
6438 sc->sc_ctrl |= CTRL_SPEED_10;
6439 break;
6440 case IFM_100_TX:
6441 sc->sc_ctrl |= CTRL_SPEED_100;
6442 break;
6443 case IFM_1000_T:
6444 sc->sc_ctrl |= CTRL_SPEED_1000;
6445 break;
6446 default:
6447 panic("wm_gmii_mediachange: bad media 0x%x",
6448 ife->ifm_media);
6449 }
6450 }
6451 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6452 if (sc->sc_type <= WM_T_82543)
6453 wm_gmii_reset(sc);
6454
6455 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6456 return 0;
6457 return rc;
6458 }
6459
6460 #define MDI_IO CTRL_SWDPIN(2)
6461 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6462 #define MDI_CLK CTRL_SWDPIN(3)
6463
6464 static void
6465 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6466 {
6467 uint32_t i, v;
6468
6469 v = CSR_READ(sc, WMREG_CTRL);
6470 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6471 v |= MDI_DIR | CTRL_SWDPIO(3);
6472
6473 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6474 if (data & i)
6475 v |= MDI_IO;
6476 else
6477 v &= ~MDI_IO;
6478 CSR_WRITE(sc, WMREG_CTRL, v);
6479 delay(10);
6480 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6481 delay(10);
6482 CSR_WRITE(sc, WMREG_CTRL, v);
6483 delay(10);
6484 }
6485 }
6486
6487 static uint32_t
6488 i82543_mii_recvbits(struct wm_softc *sc)
6489 {
6490 uint32_t v, i, data = 0;
6491
6492 v = CSR_READ(sc, WMREG_CTRL);
6493 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6494 v |= CTRL_SWDPIO(3);
6495
6496 CSR_WRITE(sc, WMREG_CTRL, v);
6497 delay(10);
6498 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6499 delay(10);
6500 CSR_WRITE(sc, WMREG_CTRL, v);
6501 delay(10);
6502
6503 for (i = 0; i < 16; i++) {
6504 data <<= 1;
6505 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6506 delay(10);
6507 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6508 data |= 1;
6509 CSR_WRITE(sc, WMREG_CTRL, v);
6510 delay(10);
6511 }
6512
6513 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6514 delay(10);
6515 CSR_WRITE(sc, WMREG_CTRL, v);
6516 delay(10);
6517
6518 return data;
6519 }
6520
6521 #undef MDI_IO
6522 #undef MDI_DIR
6523 #undef MDI_CLK
6524
6525 /*
6526 * wm_gmii_i82543_readreg: [mii interface function]
6527 *
6528 * Read a PHY register on the GMII (i82543 version).
6529 */
6530 static int
6531 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6532 {
6533 struct wm_softc *sc = device_private(self);
6534 int rv;
6535
6536 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6537 i82543_mii_sendbits(sc, reg | (phy << 5) |
6538 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6539 rv = i82543_mii_recvbits(sc) & 0xffff;
6540
6541 DPRINTF(WM_DEBUG_GMII,
6542 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6543 device_xname(sc->sc_dev), phy, reg, rv));
6544
6545 return rv;
6546 }
6547
6548 /*
6549 * wm_gmii_i82543_writereg: [mii interface function]
6550 *
6551 * Write a PHY register on the GMII (i82543 version).
6552 */
6553 static void
6554 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6555 {
6556 struct wm_softc *sc = device_private(self);
6557
6558 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6559 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6560 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6561 (MII_COMMAND_START << 30), 32);
6562 }
6563
6564 /*
6565 * wm_gmii_i82544_readreg: [mii interface function]
6566 *
6567 * Read a PHY register on the GMII.
6568 */
6569 static int
6570 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6571 {
6572 struct wm_softc *sc = device_private(self);
6573 uint32_t mdic = 0;
6574 int i, rv;
6575
6576 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6577 MDIC_REGADD(reg));
6578
6579 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6580 mdic = CSR_READ(sc, WMREG_MDIC);
6581 if (mdic & MDIC_READY)
6582 break;
6583 delay(50);
6584 }
6585
6586 if ((mdic & MDIC_READY) == 0) {
6587 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6588 device_xname(sc->sc_dev), phy, reg);
6589 rv = 0;
6590 } else if (mdic & MDIC_E) {
6591 #if 0 /* This is normal if no PHY is present. */
6592 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6593 device_xname(sc->sc_dev), phy, reg);
6594 #endif
6595 rv = 0;
6596 } else {
6597 rv = MDIC_DATA(mdic);
6598 if (rv == 0xffff)
6599 rv = 0;
6600 }
6601
6602 return rv;
6603 }
6604
6605 /*
6606 * wm_gmii_i82544_writereg: [mii interface function]
6607 *
6608 * Write a PHY register on the GMII.
6609 */
6610 static void
6611 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6612 {
6613 struct wm_softc *sc = device_private(self);
6614 uint32_t mdic = 0;
6615 int i;
6616
6617 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6618 MDIC_REGADD(reg) | MDIC_DATA(val));
6619
6620 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6621 mdic = CSR_READ(sc, WMREG_MDIC);
6622 if (mdic & MDIC_READY)
6623 break;
6624 delay(50);
6625 }
6626
6627 if ((mdic & MDIC_READY) == 0)
6628 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6629 device_xname(sc->sc_dev), phy, reg);
6630 else if (mdic & MDIC_E)
6631 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6632 device_xname(sc->sc_dev), phy, reg);
6633 }
6634
6635 /*
6636 * wm_gmii_i80003_readreg: [mii interface function]
6637 *
6638 * Read a PHY register on the kumeran
6639 * This could be handled by the PHY layer if we didn't have to lock the
6640 * ressource ...
6641 */
6642 static int
6643 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6644 {
6645 struct wm_softc *sc = device_private(self);
6646 int sem;
6647 int rv;
6648
6649 if (phy != 1) /* only one PHY on kumeran bus */
6650 return 0;
6651
6652 sem = swfwphysem[sc->sc_funcid];
6653 if (wm_get_swfw_semaphore(sc, sem)) {
6654 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6655 __func__);
6656 return 0;
6657 }
6658
6659 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6660 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6661 reg >> GG82563_PAGE_SHIFT);
6662 } else {
6663 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6664 reg >> GG82563_PAGE_SHIFT);
6665 }
6666 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6667 delay(200);
6668 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6669 delay(200);
6670
6671 wm_put_swfw_semaphore(sc, sem);
6672 return rv;
6673 }
6674
6675 /*
6676 * wm_gmii_i80003_writereg: [mii interface function]
6677 *
6678 * Write a PHY register on the kumeran.
6679 * This could be handled by the PHY layer if we didn't have to lock the
6680 * ressource ...
6681 */
6682 static void
6683 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6684 {
6685 struct wm_softc *sc = device_private(self);
6686 int sem;
6687
6688 if (phy != 1) /* only one PHY on kumeran bus */
6689 return;
6690
6691 sem = swfwphysem[sc->sc_funcid];
6692 if (wm_get_swfw_semaphore(sc, sem)) {
6693 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6694 __func__);
6695 return;
6696 }
6697
6698 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6699 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6700 reg >> GG82563_PAGE_SHIFT);
6701 } else {
6702 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6703 reg >> GG82563_PAGE_SHIFT);
6704 }
6705 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6706 delay(200);
6707 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6708 delay(200);
6709
6710 wm_put_swfw_semaphore(sc, sem);
6711 }
6712
6713 /*
6714 * wm_gmii_bm_readreg: [mii interface function]
6715 *
6716 * Read a PHY register on the kumeran
6717 * This could be handled by the PHY layer if we didn't have to lock the
6718 * ressource ...
6719 */
6720 static int
6721 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6722 {
6723 struct wm_softc *sc = device_private(self);
6724 int sem;
6725 int rv;
6726
6727 sem = swfwphysem[sc->sc_funcid];
6728 if (wm_get_swfw_semaphore(sc, sem)) {
6729 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6730 __func__);
6731 return 0;
6732 }
6733
6734 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6735 if (phy == 1)
6736 wm_gmii_i82544_writereg(self, phy, 0x1f,
6737 reg);
6738 else
6739 wm_gmii_i82544_writereg(self, phy,
6740 GG82563_PHY_PAGE_SELECT,
6741 reg >> GG82563_PAGE_SHIFT);
6742 }
6743
6744 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6745 wm_put_swfw_semaphore(sc, sem);
6746 return rv;
6747 }
6748
6749 /*
6750 * wm_gmii_bm_writereg: [mii interface function]
6751 *
6752 * Write a PHY register on the kumeran.
6753 * This could be handled by the PHY layer if we didn't have to lock the
6754 * ressource ...
6755 */
6756 static void
6757 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6758 {
6759 struct wm_softc *sc = device_private(self);
6760 int sem;
6761
6762 sem = swfwphysem[sc->sc_funcid];
6763 if (wm_get_swfw_semaphore(sc, sem)) {
6764 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6765 __func__);
6766 return;
6767 }
6768
6769 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6770 if (phy == 1)
6771 wm_gmii_i82544_writereg(self, phy, 0x1f,
6772 reg);
6773 else
6774 wm_gmii_i82544_writereg(self, phy,
6775 GG82563_PHY_PAGE_SELECT,
6776 reg >> GG82563_PAGE_SHIFT);
6777 }
6778
6779 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6780 wm_put_swfw_semaphore(sc, sem);
6781 }
6782
6783 static void
6784 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6785 {
6786 struct wm_softc *sc = device_private(self);
6787 uint16_t regnum = BM_PHY_REG_NUM(offset);
6788 uint16_t wuce;
6789
6790 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6791 if (sc->sc_type == WM_T_PCH) {
6792 /* XXX e1000 driver do nothing... why? */
6793 }
6794
6795 /* Set page 769 */
6796 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6797 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6798
6799 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6800
6801 wuce &= ~BM_WUC_HOST_WU_BIT;
6802 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6803 wuce | BM_WUC_ENABLE_BIT);
6804
6805 /* Select page 800 */
6806 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6807 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6808
6809 /* Write page 800 */
6810 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6811
6812 if (rd)
6813 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6814 else
6815 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6816
6817 /* Set page 769 */
6818 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6819 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6820
6821 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6822 }
6823
6824 /*
6825 * wm_gmii_hv_readreg: [mii interface function]
6826 *
6827 * Read a PHY register on the kumeran
6828 * This could be handled by the PHY layer if we didn't have to lock the
6829 * ressource ...
6830 */
6831 static int
6832 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6833 {
6834 struct wm_softc *sc = device_private(self);
6835 uint16_t page = BM_PHY_REG_PAGE(reg);
6836 uint16_t regnum = BM_PHY_REG_NUM(reg);
6837 uint16_t val;
6838 int rv;
6839
6840 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6841 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6842 __func__);
6843 return 0;
6844 }
6845
6846 /* XXX Workaround failure in MDIO access while cable is disconnected */
6847 if (sc->sc_phytype == WMPHY_82577) {
6848 /* XXX must write */
6849 }
6850
6851 /* Page 800 works differently than the rest so it has its own func */
6852 if (page == BM_WUC_PAGE) {
6853 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6854 return val;
6855 }
6856
6857 /*
6858 * Lower than page 768 works differently than the rest so it has its
6859 * own func
6860 */
6861 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6862 printf("gmii_hv_readreg!!!\n");
6863 return 0;
6864 }
6865
6866 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6867 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6868 page << BME1000_PAGE_SHIFT);
6869 }
6870
6871 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6872 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6873 return rv;
6874 }
6875
6876 /*
6877 * wm_gmii_hv_writereg: [mii interface function]
6878 *
6879 * Write a PHY register on the kumeran.
6880 * This could be handled by the PHY layer if we didn't have to lock the
6881 * ressource ...
6882 */
6883 static void
6884 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6885 {
6886 struct wm_softc *sc = device_private(self);
6887 uint16_t page = BM_PHY_REG_PAGE(reg);
6888 uint16_t regnum = BM_PHY_REG_NUM(reg);
6889
6890 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6891 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6892 __func__);
6893 return;
6894 }
6895
6896 /* XXX Workaround failure in MDIO access while cable is disconnected */
6897
6898 /* Page 800 works differently than the rest so it has its own func */
6899 if (page == BM_WUC_PAGE) {
6900 uint16_t tmp;
6901
6902 tmp = val;
6903 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6904 return;
6905 }
6906
6907 /*
6908 * Lower than page 768 works differently than the rest so it has its
6909 * own func
6910 */
6911 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6912 printf("gmii_hv_writereg!!!\n");
6913 return;
6914 }
6915
6916 /*
6917 * XXX Workaround MDIO accesses being disabled after entering IEEE
6918 * Power Down (whenever bit 11 of the PHY control register is set)
6919 */
6920
6921 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6922 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6923 page << BME1000_PAGE_SHIFT);
6924 }
6925
6926 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6927 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6928 }
6929
6930 /*
6931 * wm_gmii_hv_readreg: [mii interface function]
6932 *
6933 * Read a PHY register on the kumeran
6934 * This could be handled by the PHY layer if we didn't have to lock the
6935 * ressource ...
6936 */
6937 static int
6938 wm_sgmii_readreg(device_t self, int phy, int reg)
6939 {
6940 struct wm_softc *sc = device_private(self);
6941 uint32_t i2ccmd;
6942 int i, rv;
6943
6944 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6945 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6946 __func__);
6947 return 0;
6948 }
6949
6950 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6951 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6952 | I2CCMD_OPCODE_READ;
6953 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6954
6955 /* Poll the ready bit */
6956 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6957 delay(50);
6958 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6959 if (i2ccmd & I2CCMD_READY)
6960 break;
6961 }
6962 if ((i2ccmd & I2CCMD_READY) == 0)
6963 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6964 if ((i2ccmd & I2CCMD_ERROR) != 0)
6965 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6966
6967 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6968
6969 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6970 return rv;
6971 }
6972
6973 /*
6974 * wm_gmii_hv_writereg: [mii interface function]
6975 *
6976 * Write a PHY register on the kumeran.
6977 * This could be handled by the PHY layer if we didn't have to lock the
6978 * ressource ...
6979 */
6980 static void
6981 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6982 {
6983 struct wm_softc *sc = device_private(self);
6984 uint32_t i2ccmd;
6985 int i;
6986
6987 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6988 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6989 __func__);
6990 return;
6991 }
6992
6993 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6994 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6995 | I2CCMD_OPCODE_WRITE;
6996 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6997
6998 /* Poll the ready bit */
6999 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7000 delay(50);
7001 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7002 if (i2ccmd & I2CCMD_READY)
7003 break;
7004 }
7005 if ((i2ccmd & I2CCMD_READY) == 0)
7006 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7007 if ((i2ccmd & I2CCMD_ERROR) != 0)
7008 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7009
7010 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7011 }
7012
7013 /*
7014 * wm_gmii_statchg: [mii interface function]
7015 *
7016 * Callback from MII layer when media changes.
7017 */
7018 static void
7019 wm_gmii_statchg(struct ifnet *ifp)
7020 {
7021 struct wm_softc *sc = ifp->if_softc;
7022 struct mii_data *mii = &sc->sc_mii;
7023
7024 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7025 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7026 sc->sc_fcrtl &= ~FCRTL_XONE;
7027
7028 /*
7029 * Get flow control negotiation result.
7030 */
7031 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7032 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7033 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7034 mii->mii_media_active &= ~IFM_ETH_FMASK;
7035 }
7036
7037 if (sc->sc_flowflags & IFM_FLOW) {
7038 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7039 sc->sc_ctrl |= CTRL_TFCE;
7040 sc->sc_fcrtl |= FCRTL_XONE;
7041 }
7042 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7043 sc->sc_ctrl |= CTRL_RFCE;
7044 }
7045
7046 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7047 DPRINTF(WM_DEBUG_LINK,
7048 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7049 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7050 } else {
7051 DPRINTF(WM_DEBUG_LINK,
7052 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7053 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7054 }
7055
7056 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7057 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7058 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7059 : WMREG_FCRTL, sc->sc_fcrtl);
7060 if (sc->sc_type == WM_T_80003) {
7061 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7062 case IFM_1000_T:
7063 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7064 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7065 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7066 break;
7067 default:
7068 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7069 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7070 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7071 break;
7072 }
7073 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7074 }
7075 }
7076
7077 /*
7078 * wm_kmrn_readreg:
7079 *
7080 * Read a kumeran register
7081 */
7082 static int
7083 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7084 {
7085 int rv;
7086
7087 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7088 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7089 aprint_error_dev(sc->sc_dev,
7090 "%s: failed to get semaphore\n", __func__);
7091 return 0;
7092 }
7093 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7094 if (wm_get_swfwhw_semaphore(sc)) {
7095 aprint_error_dev(sc->sc_dev,
7096 "%s: failed to get semaphore\n", __func__);
7097 return 0;
7098 }
7099 }
7100
7101 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7102 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7103 KUMCTRLSTA_REN);
7104 delay(2);
7105
7106 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7107
7108 if (sc->sc_flags == WM_F_SWFW_SYNC)
7109 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7110 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7111 wm_put_swfwhw_semaphore(sc);
7112
7113 return rv;
7114 }
7115
7116 /*
7117 * wm_kmrn_writereg:
7118 *
7119 * Write a kumeran register
7120 */
7121 static void
7122 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7123 {
7124
7125 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7126 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7127 aprint_error_dev(sc->sc_dev,
7128 "%s: failed to get semaphore\n", __func__);
7129 return;
7130 }
7131 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7132 if (wm_get_swfwhw_semaphore(sc)) {
7133 aprint_error_dev(sc->sc_dev,
7134 "%s: failed to get semaphore\n", __func__);
7135 return;
7136 }
7137 }
7138
7139 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7140 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7141 (val & KUMCTRLSTA_MASK));
7142
7143 if (sc->sc_flags == WM_F_SWFW_SYNC)
7144 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7145 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7146 wm_put_swfwhw_semaphore(sc);
7147 }
7148
7149 static int
7150 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7151 {
7152 uint32_t eecd = 0;
7153
7154 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7155 || sc->sc_type == WM_T_82583) {
7156 eecd = CSR_READ(sc, WMREG_EECD);
7157
7158 /* Isolate bits 15 & 16 */
7159 eecd = ((eecd >> 15) & 0x03);
7160
7161 /* If both bits are set, device is Flash type */
7162 if (eecd == 0x03)
7163 return 0;
7164 }
7165 return 1;
7166 }
7167
7168 static int
7169 wm_get_swsm_semaphore(struct wm_softc *sc)
7170 {
7171 int32_t timeout;
7172 uint32_t swsm;
7173
7174 /* Get the FW semaphore. */
7175 timeout = 1000 + 1; /* XXX */
7176 while (timeout) {
7177 swsm = CSR_READ(sc, WMREG_SWSM);
7178 swsm |= SWSM_SWESMBI;
7179 CSR_WRITE(sc, WMREG_SWSM, swsm);
7180 /* if we managed to set the bit we got the semaphore. */
7181 swsm = CSR_READ(sc, WMREG_SWSM);
7182 if (swsm & SWSM_SWESMBI)
7183 break;
7184
7185 delay(50);
7186 timeout--;
7187 }
7188
7189 if (timeout == 0) {
7190 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7191 /* Release semaphores */
7192 wm_put_swsm_semaphore(sc);
7193 return 1;
7194 }
7195 return 0;
7196 }
7197
7198 static void
7199 wm_put_swsm_semaphore(struct wm_softc *sc)
7200 {
7201 uint32_t swsm;
7202
7203 swsm = CSR_READ(sc, WMREG_SWSM);
7204 swsm &= ~(SWSM_SWESMBI);
7205 CSR_WRITE(sc, WMREG_SWSM, swsm);
7206 }
7207
7208 static int
7209 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7210 {
7211 uint32_t swfw_sync;
7212 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7213 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7214 int timeout = 200;
7215
7216 for (timeout = 0; timeout < 200; timeout++) {
7217 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7218 if (wm_get_swsm_semaphore(sc)) {
7219 aprint_error_dev(sc->sc_dev,
7220 "%s: failed to get semaphore\n",
7221 __func__);
7222 return 1;
7223 }
7224 }
7225 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7226 if ((swfw_sync & (swmask | fwmask)) == 0) {
7227 swfw_sync |= swmask;
7228 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7229 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7230 wm_put_swsm_semaphore(sc);
7231 return 0;
7232 }
7233 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7234 wm_put_swsm_semaphore(sc);
7235 delay(5000);
7236 }
7237 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7238 device_xname(sc->sc_dev), mask, swfw_sync);
7239 return 1;
7240 }
7241
7242 static void
7243 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7244 {
7245 uint32_t swfw_sync;
7246
7247 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7248 while (wm_get_swsm_semaphore(sc) != 0)
7249 continue;
7250 }
7251 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7252 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7253 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7254 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7255 wm_put_swsm_semaphore(sc);
7256 }
7257
7258 static int
7259 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7260 {
7261 uint32_t ext_ctrl;
7262 int timeout = 200;
7263
7264 for (timeout = 0; timeout < 200; timeout++) {
7265 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7266 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7267 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7268
7269 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7270 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7271 return 0;
7272 delay(5000);
7273 }
7274 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7275 device_xname(sc->sc_dev), ext_ctrl);
7276 return 1;
7277 }
7278
7279 static void
7280 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7281 {
7282 uint32_t ext_ctrl;
7283 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7284 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7285 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7286 }
7287
7288 static int
7289 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7290 {
7291 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7292 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7293
7294 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7295 /* Value of bit 22 corresponds to the flash bank we're on. */
7296 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7297 } else {
7298 uint8_t bank_high_byte;
7299 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
7300 if ((bank_high_byte & 0xc0) == 0x80)
7301 *bank = 0;
7302 else {
7303 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7304 &bank_high_byte);
7305 if ((bank_high_byte & 0xc0) == 0x80)
7306 *bank = 1;
7307 else {
7308 aprint_error_dev(sc->sc_dev,
7309 "EEPROM not present\n");
7310 return -1;
7311 }
7312 }
7313 }
7314
7315 return 0;
7316 }
7317
7318 /******************************************************************************
7319 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7320 * register.
7321 *
7322 * sc - Struct containing variables accessed by shared code
7323 * offset - offset of word in the EEPROM to read
7324 * data - word read from the EEPROM
7325 * words - number of words to read
7326 *****************************************************************************/
7327 static int
7328 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7329 {
7330 int32_t error = 0;
7331 uint32_t flash_bank = 0;
7332 uint32_t act_offset = 0;
7333 uint32_t bank_offset = 0;
7334 uint16_t word = 0;
7335 uint16_t i = 0;
7336
7337 /* We need to know which is the valid flash bank. In the event
7338 * that we didn't allocate eeprom_shadow_ram, we may not be
7339 * managing flash_bank. So it cannot be trusted and needs
7340 * to be updated with each read.
7341 */
7342 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7343 if (error) {
7344 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7345 __func__);
7346 return error;
7347 }
7348
7349 /*
7350 * Adjust offset appropriately if we're on bank 1 - adjust for word
7351 * size
7352 */
7353 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7354
7355 error = wm_get_swfwhw_semaphore(sc);
7356 if (error) {
7357 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7358 __func__);
7359 return error;
7360 }
7361
7362 for (i = 0; i < words; i++) {
7363 /* The NVM part needs a byte offset, hence * 2 */
7364 act_offset = bank_offset + ((offset + i) * 2);
7365 error = wm_read_ich8_word(sc, act_offset, &word);
7366 if (error) {
7367 aprint_error_dev(sc->sc_dev,
7368 "%s: failed to read NVM\n", __func__);
7369 break;
7370 }
7371 data[i] = word;
7372 }
7373
7374 wm_put_swfwhw_semaphore(sc);
7375 return error;
7376 }
7377
7378 /******************************************************************************
7379 * This function does initial flash setup so that a new read/write/erase cycle
7380 * can be started.
7381 *
7382 * sc - The pointer to the hw structure
7383 ****************************************************************************/
7384 static int32_t
7385 wm_ich8_cycle_init(struct wm_softc *sc)
7386 {
7387 uint16_t hsfsts;
7388 int32_t error = 1;
7389 int32_t i = 0;
7390
7391 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7392
7393 /* May be check the Flash Des Valid bit in Hw status */
7394 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7395 return error;
7396 }
7397
7398 /* Clear FCERR in Hw status by writing 1 */
7399 /* Clear DAEL in Hw status by writing a 1 */
7400 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7401
7402 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7403
7404 /*
7405 * Either we should have a hardware SPI cycle in progress bit to check
7406 * against, in order to start a new cycle or FDONE bit should be
7407 * changed in the hardware so that it is 1 after harware reset, which
7408 * can then be used as an indication whether a cycle is in progress or
7409 * has been completed .. we should also have some software semaphore
7410 * mechanism to guard FDONE or the cycle in progress bit so that two
7411 * threads access to those bits can be sequentiallized or a way so that
7412 * 2 threads dont start the cycle at the same time
7413 */
7414
7415 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7416 /*
7417 * There is no cycle running at present, so we can start a
7418 * cycle
7419 */
7420
7421 /* Begin by setting Flash Cycle Done. */
7422 hsfsts |= HSFSTS_DONE;
7423 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7424 error = 0;
7425 } else {
7426 /*
7427 * otherwise poll for sometime so the current cycle has a
7428 * chance to end before giving up.
7429 */
7430 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7431 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7432 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7433 error = 0;
7434 break;
7435 }
7436 delay(1);
7437 }
7438 if (error == 0) {
7439 /*
7440 * Successful in waiting for previous cycle to timeout,
7441 * now set the Flash Cycle Done.
7442 */
7443 hsfsts |= HSFSTS_DONE;
7444 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7445 }
7446 }
7447 return error;
7448 }
7449
7450 /******************************************************************************
7451 * This function starts a flash cycle and waits for its completion
7452 *
7453 * sc - The pointer to the hw structure
7454 ****************************************************************************/
7455 static int32_t
7456 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7457 {
7458 uint16_t hsflctl;
7459 uint16_t hsfsts;
7460 int32_t error = 1;
7461 uint32_t i = 0;
7462
7463 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7464 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7465 hsflctl |= HSFCTL_GO;
7466 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7467
7468 /* wait till FDONE bit is set to 1 */
7469 do {
7470 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7471 if (hsfsts & HSFSTS_DONE)
7472 break;
7473 delay(1);
7474 i++;
7475 } while (i < timeout);
7476 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7477 error = 0;
7478
7479 return error;
7480 }
7481
7482 /******************************************************************************
7483 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7484 *
7485 * sc - The pointer to the hw structure
7486 * index - The index of the byte or word to read.
7487 * size - Size of data to read, 1=byte 2=word
7488 * data - Pointer to the word to store the value read.
7489 *****************************************************************************/
7490 static int32_t
7491 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7492 uint32_t size, uint16_t* data)
7493 {
7494 uint16_t hsfsts;
7495 uint16_t hsflctl;
7496 uint32_t flash_linear_address;
7497 uint32_t flash_data = 0;
7498 int32_t error = 1;
7499 int32_t count = 0;
7500
7501 if (size < 1 || size > 2 || data == 0x0 ||
7502 index > ICH_FLASH_LINEAR_ADDR_MASK)
7503 return error;
7504
7505 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7506 sc->sc_ich8_flash_base;
7507
7508 do {
7509 delay(1);
7510 /* Steps */
7511 error = wm_ich8_cycle_init(sc);
7512 if (error)
7513 break;
7514
7515 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7516 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7517 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7518 & HSFCTL_BCOUNT_MASK;
7519 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7520 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7521
7522 /*
7523 * Write the last 24 bits of index into Flash Linear address
7524 * field in Flash Address
7525 */
7526 /* TODO: TBD maybe check the index against the size of flash */
7527
7528 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7529
7530 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7531
7532 /*
7533 * Check if FCERR is set to 1, if set to 1, clear it and try
7534 * the whole sequence a few more times, else read in (shift in)
7535 * the Flash Data0, the order is least significant byte first
7536 * msb to lsb
7537 */
7538 if (error == 0) {
7539 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7540 if (size == 1)
7541 *data = (uint8_t)(flash_data & 0x000000FF);
7542 else if (size == 2)
7543 *data = (uint16_t)(flash_data & 0x0000FFFF);
7544 break;
7545 } else {
7546 /*
7547 * If we've gotten here, then things are probably
7548 * completely hosed, but if the error condition is
7549 * detected, it won't hurt to give it another try...
7550 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7551 */
7552 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7553 if (hsfsts & HSFSTS_ERR) {
7554 /* Repeat for some time before giving up. */
7555 continue;
7556 } else if ((hsfsts & HSFSTS_DONE) == 0)
7557 break;
7558 }
7559 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7560
7561 return error;
7562 }
7563
7564 /******************************************************************************
7565 * Reads a single byte from the NVM using the ICH8 flash access registers.
7566 *
7567 * sc - pointer to wm_hw structure
7568 * index - The index of the byte to read.
7569 * data - Pointer to a byte to store the value read.
7570 *****************************************************************************/
7571 static int32_t
7572 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7573 {
7574 int32_t status;
7575 uint16_t word = 0;
7576
7577 status = wm_read_ich8_data(sc, index, 1, &word);
7578 if (status == 0)
7579 *data = (uint8_t)word;
7580 else
7581 *data = 0;
7582
7583 return status;
7584 }
7585
7586 /******************************************************************************
7587 * Reads a word from the NVM using the ICH8 flash access registers.
7588 *
7589 * sc - pointer to wm_hw structure
7590 * index - The starting byte index of the word to read.
7591 * data - Pointer to a word to store the value read.
7592 *****************************************************************************/
7593 static int32_t
7594 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7595 {
7596 int32_t status;
7597
7598 status = wm_read_ich8_data(sc, index, 2, data);
7599 return status;
7600 }
7601
7602 static int
7603 wm_check_mng_mode(struct wm_softc *sc)
7604 {
7605 int rv;
7606
7607 switch (sc->sc_type) {
7608 case WM_T_ICH8:
7609 case WM_T_ICH9:
7610 case WM_T_ICH10:
7611 case WM_T_PCH:
7612 case WM_T_PCH2:
7613 rv = wm_check_mng_mode_ich8lan(sc);
7614 break;
7615 case WM_T_82574:
7616 case WM_T_82583:
7617 rv = wm_check_mng_mode_82574(sc);
7618 break;
7619 case WM_T_82571:
7620 case WM_T_82572:
7621 case WM_T_82573:
7622 case WM_T_80003:
7623 rv = wm_check_mng_mode_generic(sc);
7624 break;
7625 default:
7626 /* noting to do */
7627 rv = 0;
7628 break;
7629 }
7630
7631 return rv;
7632 }
7633
7634 static int
7635 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7636 {
7637 uint32_t fwsm;
7638
7639 fwsm = CSR_READ(sc, WMREG_FWSM);
7640
7641 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7642 return 1;
7643
7644 return 0;
7645 }
7646
7647 static int
7648 wm_check_mng_mode_82574(struct wm_softc *sc)
7649 {
7650 uint16_t data;
7651
7652 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7653
7654 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7655 return 1;
7656
7657 return 0;
7658 }
7659
7660 static int
7661 wm_check_mng_mode_generic(struct wm_softc *sc)
7662 {
7663 uint32_t fwsm;
7664
7665 fwsm = CSR_READ(sc, WMREG_FWSM);
7666
7667 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7668 return 1;
7669
7670 return 0;
7671 }
7672
7673 static int
7674 wm_enable_mng_pass_thru(struct wm_softc *sc)
7675 {
7676 uint32_t manc, fwsm, factps;
7677
7678 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7679 return 0;
7680
7681 manc = CSR_READ(sc, WMREG_MANC);
7682
7683 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7684 device_xname(sc->sc_dev), manc));
7685 if (((manc & MANC_RECV_TCO_EN) == 0)
7686 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7687 return 0;
7688
7689 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7690 fwsm = CSR_READ(sc, WMREG_FWSM);
7691 factps = CSR_READ(sc, WMREG_FACTPS);
7692 if (((factps & FACTPS_MNGCG) == 0)
7693 && ((fwsm & FWSM_MODE_MASK)
7694 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7695 return 1;
7696 } else if (((manc & MANC_SMBUS_EN) != 0)
7697 && ((manc & MANC_ASF_EN) == 0))
7698 return 1;
7699
7700 return 0;
7701 }
7702
7703 static int
7704 wm_check_reset_block(struct wm_softc *sc)
7705 {
7706 uint32_t reg;
7707
7708 switch (sc->sc_type) {
7709 case WM_T_ICH8:
7710 case WM_T_ICH9:
7711 case WM_T_ICH10:
7712 case WM_T_PCH:
7713 case WM_T_PCH2:
7714 reg = CSR_READ(sc, WMREG_FWSM);
7715 if ((reg & FWSM_RSPCIPHY) != 0)
7716 return 0;
7717 else
7718 return -1;
7719 break;
7720 case WM_T_82571:
7721 case WM_T_82572:
7722 case WM_T_82573:
7723 case WM_T_82574:
7724 case WM_T_82583:
7725 case WM_T_80003:
7726 reg = CSR_READ(sc, WMREG_MANC);
7727 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7728 return -1;
7729 else
7730 return 0;
7731 break;
7732 default:
7733 /* no problem */
7734 break;
7735 }
7736
7737 return 0;
7738 }
7739
7740 static void
7741 wm_get_hw_control(struct wm_softc *sc)
7742 {
7743 uint32_t reg;
7744
7745 switch (sc->sc_type) {
7746 case WM_T_82573:
7747 reg = CSR_READ(sc, WMREG_SWSM);
7748 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7749 break;
7750 case WM_T_82571:
7751 case WM_T_82572:
7752 case WM_T_82574:
7753 case WM_T_82583:
7754 case WM_T_80003:
7755 case WM_T_ICH8:
7756 case WM_T_ICH9:
7757 case WM_T_ICH10:
7758 case WM_T_PCH:
7759 case WM_T_PCH2:
7760 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7761 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7762 break;
7763 default:
7764 break;
7765 }
7766 }
7767
7768 static void
7769 wm_release_hw_control(struct wm_softc *sc)
7770 {
7771 uint32_t reg;
7772
7773 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7774 return;
7775
7776 if (sc->sc_type == WM_T_82573) {
7777 reg = CSR_READ(sc, WMREG_SWSM);
7778 reg &= ~SWSM_DRV_LOAD;
7779 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7780 } else {
7781 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7782 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7783 }
7784 }
7785
7786 /* XXX Currently TBI only */
7787 static int
7788 wm_check_for_link(struct wm_softc *sc)
7789 {
7790 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7791 uint32_t rxcw;
7792 uint32_t ctrl;
7793 uint32_t status;
7794 uint32_t sig;
7795
7796 rxcw = CSR_READ(sc, WMREG_RXCW);
7797 ctrl = CSR_READ(sc, WMREG_CTRL);
7798 status = CSR_READ(sc, WMREG_STATUS);
7799
7800 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7801
7802 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7803 device_xname(sc->sc_dev), __func__,
7804 ((ctrl & CTRL_SWDPIN(1)) == sig),
7805 ((status & STATUS_LU) != 0),
7806 ((rxcw & RXCW_C) != 0)
7807 ));
7808
7809 /*
7810 * SWDPIN LU RXCW
7811 * 0 0 0
7812 * 0 0 1 (should not happen)
7813 * 0 1 0 (should not happen)
7814 * 0 1 1 (should not happen)
7815 * 1 0 0 Disable autonego and force linkup
7816 * 1 0 1 got /C/ but not linkup yet
7817 * 1 1 0 (linkup)
7818 * 1 1 1 If IFM_AUTO, back to autonego
7819 *
7820 */
7821 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7822 && ((status & STATUS_LU) == 0)
7823 && ((rxcw & RXCW_C) == 0)) {
7824 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7825 __func__));
7826 sc->sc_tbi_linkup = 0;
7827 /* Disable auto-negotiation in the TXCW register */
7828 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7829
7830 /*
7831 * Force link-up and also force full-duplex.
7832 *
7833 * NOTE: CTRL was updated TFCE and RFCE automatically,
7834 * so we should update sc->sc_ctrl
7835 */
7836 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7837 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7838 } else if (((status & STATUS_LU) != 0)
7839 && ((rxcw & RXCW_C) != 0)
7840 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7841 sc->sc_tbi_linkup = 1;
7842 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7843 __func__));
7844 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7845 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7846 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7847 && ((rxcw & RXCW_C) != 0)) {
7848 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7849 } else {
7850 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7851 status));
7852 }
7853
7854 return 0;
7855 }
7856
7857 /* Work-around for 82566 Kumeran PCS lock loss */
7858 static void
7859 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7860 {
7861 int miistatus, active, i;
7862 int reg;
7863
7864 miistatus = sc->sc_mii.mii_media_status;
7865
7866 /* If the link is not up, do nothing */
7867 if ((miistatus & IFM_ACTIVE) != 0)
7868 return;
7869
7870 active = sc->sc_mii.mii_media_active;
7871
7872 /* Nothing to do if the link is other than 1Gbps */
7873 if (IFM_SUBTYPE(active) != IFM_1000_T)
7874 return;
7875
7876 for (i = 0; i < 10; i++) {
7877 /* read twice */
7878 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7879 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7880 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7881 goto out; /* GOOD! */
7882
7883 /* Reset the PHY */
7884 wm_gmii_reset(sc);
7885 delay(5*1000);
7886 }
7887
7888 /* Disable GigE link negotiation */
7889 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7890 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7891 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7892
7893 /*
7894 * Call gig speed drop workaround on Gig disable before accessing
7895 * any PHY registers.
7896 */
7897 wm_gig_downshift_workaround_ich8lan(sc);
7898
7899 out:
7900 return;
7901 }
7902
7903 /* WOL from S5 stops working */
7904 static void
7905 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7906 {
7907 uint16_t kmrn_reg;
7908
7909 /* Only for igp3 */
7910 if (sc->sc_phytype == WMPHY_IGP_3) {
7911 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7912 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7913 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7914 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7915 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7916 }
7917 }
7918
7919 #ifdef WM_WOL
7920 /* Power down workaround on D3 */
7921 static void
7922 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7923 {
7924 uint32_t reg;
7925 int i;
7926
7927 for (i = 0; i < 2; i++) {
7928 /* Disable link */
7929 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7930 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7931 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7932
7933 /*
7934 * Call gig speed drop workaround on Gig disable before
7935 * accessing any PHY registers
7936 */
7937 if (sc->sc_type == WM_T_ICH8)
7938 wm_gig_downshift_workaround_ich8lan(sc);
7939
7940 /* Write VR power-down enable */
7941 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7942 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7943 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7944 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7945
7946 /* Read it back and test */
7947 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7948 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7949 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7950 break;
7951
7952 /* Issue PHY reset and repeat at most one more time */
7953 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7954 }
7955 }
7956 #endif /* WM_WOL */
7957
7958 /*
7959 * Workaround for pch's PHYs
7960 * XXX should be moved to new PHY driver?
7961 */
7962 static void
7963 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7964 {
7965 if (sc->sc_phytype == WMPHY_82577)
7966 wm_set_mdio_slow_mode_hv(sc);
7967
7968 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7969
7970 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7971
7972 /* 82578 */
7973 if (sc->sc_phytype == WMPHY_82578) {
7974 /* PCH rev. < 3 */
7975 if (sc->sc_rev < 3) {
7976 /* XXX 6 bit shift? Why? Is it page2? */
7977 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7978 0x66c0);
7979 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7980 0xffff);
7981 }
7982
7983 /* XXX phy rev. < 2 */
7984 }
7985
7986 /* Select page 0 */
7987
7988 /* XXX acquire semaphore */
7989 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7990 /* XXX release semaphore */
7991
7992 /*
7993 * Configure the K1 Si workaround during phy reset assuming there is
7994 * link so that it disables K1 if link is in 1Gbps.
7995 */
7996 wm_k1_gig_workaround_hv(sc, 1);
7997 }
7998
7999 static void
8000 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8001 {
8002
8003 wm_set_mdio_slow_mode_hv(sc);
8004 }
8005
8006 static void
8007 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8008 {
8009 int k1_enable = sc->sc_nvm_k1_enabled;
8010
8011 /* XXX acquire semaphore */
8012
8013 if (link) {
8014 k1_enable = 0;
8015
8016 /* Link stall fix for link up */
8017 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8018 } else {
8019 /* Link stall fix for link down */
8020 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8021 }
8022
8023 wm_configure_k1_ich8lan(sc, k1_enable);
8024
8025 /* XXX release semaphore */
8026 }
8027
8028 static void
8029 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8030 {
8031 uint32_t reg;
8032
8033 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8034 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8035 reg | HV_KMRN_MDIO_SLOW);
8036 }
8037
8038 static void
8039 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8040 {
8041 uint32_t ctrl, ctrl_ext, tmp;
8042 uint16_t kmrn_reg;
8043
8044 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8045
8046 if (k1_enable)
8047 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8048 else
8049 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8050
8051 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8052
8053 delay(20);
8054
8055 ctrl = CSR_READ(sc, WMREG_CTRL);
8056 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8057
8058 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8059 tmp |= CTRL_FRCSPD;
8060
8061 CSR_WRITE(sc, WMREG_CTRL, tmp);
8062 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8063 delay(20);
8064
8065 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8066 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8067 delay(20);
8068 }
8069
8070 static void
8071 wm_smbustopci(struct wm_softc *sc)
8072 {
8073 uint32_t fwsm;
8074
8075 fwsm = CSR_READ(sc, WMREG_FWSM);
8076 if (((fwsm & FWSM_FW_VALID) == 0)
8077 && ((wm_check_reset_block(sc) == 0))) {
8078 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8079 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8080 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8081 delay(10);
8082 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8083 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8084 delay(50*1000);
8085
8086 /*
8087 * Gate automatic PHY configuration by hardware on non-managed
8088 * 82579
8089 */
8090 if (sc->sc_type == WM_T_PCH2)
8091 wm_gate_hw_phy_config_ich8lan(sc, 1);
8092 }
8093 }
8094
8095 static void
8096 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8097 {
8098 uint32_t gcr;
8099 pcireg_t ctrl2;
8100
8101 gcr = CSR_READ(sc, WMREG_GCR);
8102
8103 /* Only take action if timeout value is defaulted to 0 */
8104 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8105 goto out;
8106
8107 if ((gcr & GCR_CAP_VER2) == 0) {
8108 gcr |= GCR_CMPL_TMOUT_10MS;
8109 goto out;
8110 }
8111
8112 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8113 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
8114 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
8115 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8116 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
8117
8118 out:
8119 /* Disable completion timeout resend */
8120 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8121
8122 CSR_WRITE(sc, WMREG_GCR, gcr);
8123 }
8124
8125 /* special case - for 82575 - need to do manual init ... */
8126 static void
8127 wm_reset_init_script_82575(struct wm_softc *sc)
8128 {
8129 /*
8130 * remark: this is untested code - we have no board without EEPROM
8131 * same setup as mentioned int the freeBSD driver for the i82575
8132 */
8133
8134 /* SerDes configuration via SERDESCTRL */
8135 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8136 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8137 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8138 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8139
8140 /* CCM configuration via CCMCTL register */
8141 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8142 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8143
8144 /* PCIe lanes configuration */
8145 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8146 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8147 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8148 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8149
8150 /* PCIe PLL Configuration */
8151 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8152 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8153 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8154 }
8155
8156 static void
8157 wm_init_manageability(struct wm_softc *sc)
8158 {
8159
8160 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8161 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8162 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8163
8164 /* disabl hardware interception of ARP */
8165 manc &= ~MANC_ARP_EN;
8166
8167 /* enable receiving management packets to the host */
8168 if (sc->sc_type >= WM_T_82571) {
8169 manc |= MANC_EN_MNG2HOST;
8170 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8171 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8172
8173 }
8174
8175 CSR_WRITE(sc, WMREG_MANC, manc);
8176 }
8177 }
8178
8179 static void
8180 wm_release_manageability(struct wm_softc *sc)
8181 {
8182
8183 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8184 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8185
8186 if (sc->sc_type >= WM_T_82571)
8187 manc &= ~MANC_EN_MNG2HOST;
8188
8189 CSR_WRITE(sc, WMREG_MANC, manc);
8190 }
8191 }
8192
8193 static void
8194 wm_get_wakeup(struct wm_softc *sc)
8195 {
8196
8197 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8198 switch (sc->sc_type) {
8199 case WM_T_82573:
8200 case WM_T_82583:
8201 sc->sc_flags |= WM_F_HAS_AMT;
8202 /* FALLTHROUGH */
8203 case WM_T_80003:
8204 case WM_T_82541:
8205 case WM_T_82547:
8206 case WM_T_82571:
8207 case WM_T_82572:
8208 case WM_T_82574:
8209 case WM_T_82575:
8210 case WM_T_82576:
8211 #if 0 /* XXX */
8212 case WM_T_82580:
8213 case WM_T_82580ER:
8214 case WM_T_I350:
8215 #endif
8216 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8217 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8218 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8219 break;
8220 case WM_T_ICH8:
8221 case WM_T_ICH9:
8222 case WM_T_ICH10:
8223 case WM_T_PCH:
8224 case WM_T_PCH2:
8225 sc->sc_flags |= WM_F_HAS_AMT;
8226 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8227 break;
8228 default:
8229 break;
8230 }
8231
8232 /* 1: HAS_MANAGE */
8233 if (wm_enable_mng_pass_thru(sc) != 0)
8234 sc->sc_flags |= WM_F_HAS_MANAGE;
8235
8236 #ifdef WM_DEBUG
8237 printf("\n");
8238 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8239 printf("HAS_AMT,");
8240 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8241 printf("ARC_SUBSYS_VALID,");
8242 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8243 printf("ASF_FIRMWARE_PRES,");
8244 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8245 printf("HAS_MANAGE,");
8246 printf("\n");
8247 #endif
8248 /*
8249 * Note that the WOL flags is set after the resetting of the eeprom
8250 * stuff
8251 */
8252 }
8253
8254 #ifdef WM_WOL
8255 /* WOL in the newer chipset interfaces (pchlan) */
8256 static void
8257 wm_enable_phy_wakeup(struct wm_softc *sc)
8258 {
8259 #if 0
8260 uint16_t preg;
8261
8262 /* Copy MAC RARs to PHY RARs */
8263
8264 /* Copy MAC MTA to PHY MTA */
8265
8266 /* Configure PHY Rx Control register */
8267
8268 /* Enable PHY wakeup in MAC register */
8269
8270 /* Configure and enable PHY wakeup in PHY registers */
8271
8272 /* Activate PHY wakeup */
8273
8274 /* XXX */
8275 #endif
8276 }
8277
8278 static void
8279 wm_enable_wakeup(struct wm_softc *sc)
8280 {
8281 uint32_t reg, pmreg;
8282 pcireg_t pmode;
8283
8284 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8285 &pmreg, NULL) == 0)
8286 return;
8287
8288 /* Advertise the wakeup capability */
8289 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8290 | CTRL_SWDPIN(3));
8291 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8292
8293 /* ICH workaround */
8294 switch (sc->sc_type) {
8295 case WM_T_ICH8:
8296 case WM_T_ICH9:
8297 case WM_T_ICH10:
8298 case WM_T_PCH:
8299 case WM_T_PCH2:
8300 /* Disable gig during WOL */
8301 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8302 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8303 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8304 if (sc->sc_type == WM_T_PCH)
8305 wm_gmii_reset(sc);
8306
8307 /* Power down workaround */
8308 if (sc->sc_phytype == WMPHY_82577) {
8309 struct mii_softc *child;
8310
8311 /* Assume that the PHY is copper */
8312 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8313 if (child->mii_mpd_rev <= 2)
8314 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8315 (768 << 5) | 25, 0x0444); /* magic num */
8316 }
8317 break;
8318 default:
8319 break;
8320 }
8321
8322 /* Keep the laser running on fiber adapters */
8323 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8324 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8325 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8326 reg |= CTRL_EXT_SWDPIN(3);
8327 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8328 }
8329
8330 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8331 #if 0 /* for the multicast packet */
8332 reg |= WUFC_MC;
8333 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8334 #endif
8335
8336 if (sc->sc_type == WM_T_PCH) {
8337 wm_enable_phy_wakeup(sc);
8338 } else {
8339 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8340 CSR_WRITE(sc, WMREG_WUFC, reg);
8341 }
8342
8343 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8344 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8345 || (sc->sc_type == WM_T_PCH2))
8346 && (sc->sc_phytype == WMPHY_IGP_3))
8347 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8348
8349 /* Request PME */
8350 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8351 #if 0
8352 /* Disable WOL */
8353 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8354 #else
8355 /* For WOL */
8356 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8357 #endif
8358 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8359 }
8360 #endif /* WM_WOL */
8361
8362 static bool
8363 wm_suspend(device_t self, const pmf_qual_t *qual)
8364 {
8365 struct wm_softc *sc = device_private(self);
8366
8367 wm_release_manageability(sc);
8368 wm_release_hw_control(sc);
8369 #ifdef WM_WOL
8370 wm_enable_wakeup(sc);
8371 #endif
8372
8373 return true;
8374 }
8375
8376 static bool
8377 wm_resume(device_t self, const pmf_qual_t *qual)
8378 {
8379 struct wm_softc *sc = device_private(self);
8380
8381 wm_init_manageability(sc);
8382
8383 return true;
8384 }
8385
8386 static void
8387 wm_set_eee_i350(struct wm_softc * sc)
8388 {
8389 uint32_t ipcnfg, eeer;
8390
8391 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8392 eeer = CSR_READ(sc, WMREG_EEER);
8393
8394 if ((sc->sc_flags & WM_F_EEE) != 0) {
8395 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8396 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8397 | EEER_LPI_FC);
8398 } else {
8399 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8400 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8401 | EEER_LPI_FC);
8402 }
8403
8404 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8405 CSR_WRITE(sc, WMREG_EEER, eeer);
8406 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8407 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8408 }
8409