if_wm.c revision 1.298 1 /* $NetBSD: if_wm.c,v 1.298 2014/09/16 07:06:42 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.298 2014/09/16 07:06:42 msaitoh Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/mbuf.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/ioctl.h>
94 #include <sys/errno.h>
95 #include <sys/device.h>
96 #include <sys/queue.h>
97 #include <sys/syslog.h>
98
99 #include <sys/rnd.h>
100
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105
106 #include <net/bpf.h>
107
108 #include <netinet/in.h> /* XXX for struct ip */
109 #include <netinet/in_systm.h> /* XXX for struct ip */
110 #include <netinet/ip.h> /* XXX for struct ip */
111 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
112 #include <netinet/tcp.h> /* XXX for struct tcphdr */
113
114 #include <sys/bus.h>
115 #include <sys/intr.h>
116 #include <machine/endian.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/igphyvar.h>
125 #include <dev/mii/inbmphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 #define WM_DEBUG_MANAGE 0x10
140 #define WM_DEBUG_NVM 0x20
141 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
143
144 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
145 #else
146 #define DPRINTF(x, y) /* nothing */
147 #endif /* WM_DEBUG */
148
149 #ifdef NET_MPSAFE
150 #define WM_MPSAFE 1
151 #endif
152
153 /*
154 * Transmit descriptor list size. Due to errata, we can only have
155 * 256 hardware descriptors in the ring on < 82544, but we use 4096
156 * on >= 82544. We tell the upper layers that they can queue a lot
157 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
158 * of them at a time.
159 *
160 * We allow up to 256 (!) DMA segments per packet. Pathological packet
161 * chains containing many small mbufs have been observed in zero-copy
162 * situations with jumbo frames.
163 */
164 #define WM_NTXSEGS 256
165 #define WM_IFQUEUELEN 256
166 #define WM_TXQUEUELEN_MAX 64
167 #define WM_TXQUEUELEN_MAX_82547 16
168 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
169 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
170 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
171 #define WM_NTXDESC_82542 256
172 #define WM_NTXDESC_82544 4096
173 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
174 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
175 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
176 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
177 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
178
179 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
180
181 /*
182 * Receive descriptor list size. We have one Rx buffer for normal
183 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
184 * packet. We allocate 256 receive descriptors, each with a 2k
185 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
186 */
187 #define WM_NRXDESC 256
188 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
189 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
190 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
191
192 /*
193 * Control structures are DMA'd to the i82542 chip. We allocate them in
194 * a single clump that maps to a single DMA segment to make several things
195 * easier.
196 */
197 struct wm_control_data_82544 {
198 /*
199 * The receive descriptors.
200 */
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202
203 /*
204 * The transmit descriptors. Put these at the end, because
205 * we might use a smaller number of them.
206 */
207 union {
208 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
209 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
210 } wdc_u;
211 };
212
213 struct wm_control_data_82542 {
214 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
215 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
216 };
217
218 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
219 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
220 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
221
222 /*
223 * Software state for transmit jobs.
224 */
225 struct wm_txsoft {
226 struct mbuf *txs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t txs_dmamap; /* our DMA map */
228 int txs_firstdesc; /* first descriptor in packet */
229 int txs_lastdesc; /* last descriptor in packet */
230 int txs_ndesc; /* # of descriptors used */
231 };
232
233 /*
234 * Software state for receive buffers. Each descriptor gets a
235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
236 * more than one buffer, we chain them together.
237 */
238 struct wm_rxsoft {
239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
240 bus_dmamap_t rxs_dmamap; /* our DMA map */
241 };
242
243 #define WM_LINKUP_TIMEOUT 50
244
245 static uint16_t swfwphysem[] = {
246 SWFW_PHY0_SM,
247 SWFW_PHY1_SM,
248 SWFW_PHY2_SM,
249 SWFW_PHY3_SM
250 };
251
252 /*
253 * Software state per device.
254 */
255 struct wm_softc {
256 device_t sc_dev; /* generic device information */
257 bus_space_tag_t sc_st; /* bus space tag */
258 bus_space_handle_t sc_sh; /* bus space handle */
259 bus_size_t sc_ss; /* bus space size */
260 bus_space_tag_t sc_iot; /* I/O space tag */
261 bus_space_handle_t sc_ioh; /* I/O space handle */
262 bus_size_t sc_ios; /* I/O space size */
263 bus_space_tag_t sc_flasht; /* flash registers space tag */
264 bus_space_handle_t sc_flashh; /* flash registers space handle */
265 bus_dma_tag_t sc_dmat; /* bus DMA tag */
266
267 struct ethercom sc_ethercom; /* ethernet common data */
268 struct mii_data sc_mii; /* MII/media information */
269
270 pci_chipset_tag_t sc_pc;
271 pcitag_t sc_pcitag;
272 int sc_bus_speed; /* PCI/PCIX bus speed */
273 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
274
275 wm_chip_type sc_type; /* MAC type */
276 int sc_rev; /* MAC revision */
277 wm_phy_type sc_phytype; /* PHY type */
278 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
279 int sc_funcid; /* unit number of the chip (0 to 3) */
280 int sc_flags; /* flags; see below */
281 int sc_if_flags; /* last if_flags */
282 int sc_flowflags; /* 802.3x flow control flags */
283 int sc_align_tweak;
284
285 void *sc_ih; /* interrupt cookie */
286 callout_t sc_tick_ch; /* tick callout */
287 bool sc_stopping;
288
289 int sc_nvm_addrbits; /* NVM address bits */
290 unsigned int sc_nvm_wordsize; /* NVM word size */
291 int sc_ich8_flash_base;
292 int sc_ich8_flash_bank_size;
293 int sc_nvm_k1_enabled;
294
295 /* Software state for the transmit and receive descriptors. */
296 int sc_txnum; /* must be a power of two */
297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299
300 /* Control data structures. */
301 int sc_ntxdesc; /* must be a power of two */
302 struct wm_control_data_82544 *sc_control_data;
303 bus_dmamap_t sc_cddmamap; /* control data DMA map */
304 bus_dma_segment_t sc_cd_seg; /* control data segment */
305 int sc_cd_rseg; /* real number of control segment */
306 size_t sc_cd_size; /* control data size */
307 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
308 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
309 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
310 #define sc_rxdescs sc_control_data->wcd_rxdescs
311
312 #ifdef WM_EVENT_COUNTERS
313 /* Event counters. */
314 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
315 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
316 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
317 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
318 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
319 struct evcnt sc_ev_rxintr; /* Rx interrupts */
320 struct evcnt sc_ev_linkintr; /* Link interrupts */
321
322 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
323 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
324 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
325 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
326 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
327 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
328 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
329 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
330
331 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
332 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
333
334 struct evcnt sc_ev_tu; /* Tx underrun */
335
336 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
337 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
339 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
340 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
341 #endif /* WM_EVENT_COUNTERS */
342
343 bus_addr_t sc_tdt_reg; /* offset of TDT register */
344
345 int sc_txfree; /* number of free Tx descriptors */
346 int sc_txnext; /* next ready Tx descriptor */
347
348 int sc_txsfree; /* number of free Tx jobs */
349 int sc_txsnext; /* next free Tx job */
350 int sc_txsdirty; /* dirty Tx jobs */
351
352 /* These 5 variables are used only on the 82547. */
353 int sc_txfifo_size; /* Tx FIFO size */
354 int sc_txfifo_head; /* current head of FIFO */
355 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
356 int sc_txfifo_stall; /* Tx FIFO is stalled */
357 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
358
359 bus_addr_t sc_rdt_reg; /* offset of RDT register */
360
361 int sc_rxptr; /* next ready Rx descriptor/queue ent */
362 int sc_rxdiscard;
363 int sc_rxlen;
364 struct mbuf *sc_rxhead;
365 struct mbuf *sc_rxtail;
366 struct mbuf **sc_rxtailp;
367
368 uint32_t sc_ctrl; /* prototype CTRL register */
369 #if 0
370 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
371 #endif
372 uint32_t sc_icr; /* prototype interrupt bits */
373 uint32_t sc_itr; /* prototype intr throttling reg */
374 uint32_t sc_tctl; /* prototype TCTL register */
375 uint32_t sc_rctl; /* prototype RCTL register */
376 uint32_t sc_txcw; /* prototype TXCW register */
377 uint32_t sc_tipg; /* prototype TIPG register */
378 uint32_t sc_fcrtl; /* prototype FCRTL register */
379 uint32_t sc_pba; /* prototype PBA register */
380
381 int sc_tbi_linkup; /* TBI link status */
382 int sc_tbi_anegticks; /* autonegotiation ticks */
383 int sc_tbi_ticks; /* tbi ticks */
384
385 int sc_mchash_type; /* multicast filter offset */
386
387 krndsource_t rnd_source; /* random source */
388
389 kmutex_t *sc_tx_lock; /* lock for tx operations */
390 kmutex_t *sc_rx_lock; /* lock for rx operations */
391 };
392
393 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
394 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
395 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
396 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
397 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
398 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
399 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
400 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
401 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
402
403 #ifdef WM_MPSAFE
404 #define CALLOUT_FLAGS CALLOUT_MPSAFE
405 #else
406 #define CALLOUT_FLAGS 0
407 #endif
408
409 #define WM_RXCHAIN_RESET(sc) \
410 do { \
411 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
412 *(sc)->sc_rxtailp = NULL; \
413 (sc)->sc_rxlen = 0; \
414 } while (/*CONSTCOND*/0)
415
416 #define WM_RXCHAIN_LINK(sc, m) \
417 do { \
418 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
419 (sc)->sc_rxtailp = &(m)->m_next; \
420 } while (/*CONSTCOND*/0)
421
422 #ifdef WM_EVENT_COUNTERS
423 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
424 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
425 #else
426 #define WM_EVCNT_INCR(ev) /* nothing */
427 #define WM_EVCNT_ADD(ev, val) /* nothing */
428 #endif
429
430 #define CSR_READ(sc, reg) \
431 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define CSR_WRITE(sc, reg, val) \
433 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define CSR_WRITE_FLUSH(sc) \
435 (void) CSR_READ((sc), WMREG_STATUS)
436
437 #define ICH8_FLASH_READ32(sc, reg) \
438 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441
442 #define ICH8_FLASH_READ16(sc, reg) \
443 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446
447 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
449
450 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define WM_CDTXADDR_HI(sc, x) \
452 (sizeof(bus_addr_t) == 8 ? \
453 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454
455 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define WM_CDRXADDR_HI(sc, x) \
457 (sizeof(bus_addr_t) == 8 ? \
458 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459
460 #define WM_CDTXSYNC(sc, x, n, ops) \
461 do { \
462 int __x, __n; \
463 \
464 __x = (x); \
465 __n = (n); \
466 \
467 /* If it will wrap around, sync to the end of the ring. */ \
468 if ((__x + __n) > WM_NTXDESC(sc)) { \
469 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
470 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
471 (WM_NTXDESC(sc) - __x), (ops)); \
472 __n -= (WM_NTXDESC(sc) - __x); \
473 __x = 0; \
474 } \
475 \
476 /* Now sync whatever is left. */ \
477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
478 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
479 } while (/*CONSTCOND*/0)
480
481 #define WM_CDRXSYNC(sc, x, ops) \
482 do { \
483 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
484 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
485 } while (/*CONSTCOND*/0)
486
487 #define WM_INIT_RXDESC(sc, x) \
488 do { \
489 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
490 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
491 struct mbuf *__m = __rxs->rxs_mbuf; \
492 \
493 /* \
494 * Note: We scoot the packet forward 2 bytes in the buffer \
495 * so that the payload after the Ethernet header is aligned \
496 * to a 4-byte boundary. \
497 * \
498 * XXX BRAINDAMAGE ALERT! \
499 * The stupid chip uses the same size for every buffer, which \
500 * is set in the Receive Control register. We are using the 2K \
501 * size option, but what we REALLY want is (2K - 2)! For this \
502 * reason, we can't "scoot" packets longer than the standard \
503 * Ethernet MTU. On strict-alignment platforms, if the total \
504 * size exceeds (2K - 2) we set align_tweak to 0 and let \
505 * the upper layer copy the headers. \
506 */ \
507 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
508 \
509 wm_set_dma_addr(&__rxd->wrx_addr, \
510 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 __rxd->wrx_len = 0; \
512 __rxd->wrx_cksum = 0; \
513 __rxd->wrx_status = 0; \
514 __rxd->wrx_errors = 0; \
515 __rxd->wrx_special = 0; \
516 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 \
518 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
519 } while (/*CONSTCOND*/0)
520
521 /*
522 * Register read/write functions.
523 * Other than CSR_{READ|WRITE}().
524 */
525 #if 0
526 static inline uint32_t wm_io_read(struct wm_softc *, int);
527 #endif
528 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
529 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
530 uint32_t, uint32_t);
531 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
532
533 /*
534 * Device driver interface functions and commonly used functions.
535 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536 */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int wm_match(device_t, cfdata_t, void *);
539 static void wm_attach(device_t, device_t, void *);
540 static int wm_detach(device_t, int);
541 static bool wm_suspend(device_t, const pmf_qual_t *);
542 static bool wm_resume(device_t, const pmf_qual_t *);
543 static void wm_watchdog(struct ifnet *);
544 static void wm_tick(void *);
545 static int wm_ifflags_cb(struct ethercom *);
546 static int wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static int wm_check_alt_mac_addr(struct wm_softc *);
549 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
552 static void wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void wm_set_vlan(struct wm_softc *);
555 static void wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void wm_get_auto_rd_done(struct wm_softc *);
557 static void wm_lan_init_done(struct wm_softc *);
558 static void wm_get_cfg_done(struct wm_softc *);
559 static void wm_reset(struct wm_softc *);
560 static int wm_add_rxbuf(struct wm_softc *, int);
561 static void wm_rxdrain(struct wm_softc *);
562 static int wm_init(struct ifnet *);
563 static int wm_init_locked(struct ifnet *);
564 static void wm_stop(struct ifnet *, int);
565 static void wm_stop_locked(struct ifnet *, int);
566 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
567 uint32_t *, uint8_t *);
568 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
569 static void wm_82547_txfifo_stall(void *);
570 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
571 /* Start */
572 static void wm_start(struct ifnet *);
573 static void wm_start_locked(struct ifnet *);
574 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
575 uint32_t *, uint32_t *, bool *);
576 static void wm_nq_start(struct ifnet *);
577 static void wm_nq_start_locked(struct ifnet *);
578 /* Interrupt */
579 static void wm_txintr(struct wm_softc *);
580 static void wm_rxintr(struct wm_softc *);
581 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
582 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
583 static void wm_linkintr(struct wm_softc *, uint32_t);
584 static int wm_intr(void *);
585
586 /*
587 * Media related.
588 * GMII, SGMII, TBI, SERDES and SFP.
589 */
590 /* GMII related */
591 static void wm_gmii_reset(struct wm_softc *);
592 static int wm_get_phy_id_82575(struct wm_softc *);
593 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
594 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
595 static int wm_gmii_mediachange(struct ifnet *);
596 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
597 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
598 static int wm_gmii_i82543_readreg(device_t, int, int);
599 static void wm_gmii_i82543_writereg(device_t, int, int, int);
600 static int wm_gmii_i82544_readreg(device_t, int, int);
601 static void wm_gmii_i82544_writereg(device_t, int, int, int);
602 static int wm_gmii_i80003_readreg(device_t, int, int);
603 static void wm_gmii_i80003_writereg(device_t, int, int, int);
604 static int wm_gmii_bm_readreg(device_t, int, int);
605 static void wm_gmii_bm_writereg(device_t, int, int, int);
606 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
607 static int wm_gmii_hv_readreg(device_t, int, int);
608 static void wm_gmii_hv_writereg(device_t, int, int, int);
609 static int wm_gmii_82580_readreg(device_t, int, int);
610 static void wm_gmii_82580_writereg(device_t, int, int, int);
611 static void wm_gmii_statchg(struct ifnet *);
612 static int wm_kmrn_readreg(struct wm_softc *, int);
613 static void wm_kmrn_writereg(struct wm_softc *, int, int);
614 /* SGMII */
615 static bool wm_sgmii_uses_mdio(struct wm_softc *);
616 static int wm_sgmii_readreg(device_t, int, int);
617 static void wm_sgmii_writereg(device_t, int, int, int);
618 /* TBI related */
619 static int wm_check_for_link(struct wm_softc *);
620 static void wm_tbi_mediainit(struct wm_softc *);
621 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
622 static int wm_tbi_mediachange(struct ifnet *);
623 static void wm_tbi_set_linkled(struct wm_softc *);
624 static void wm_tbi_check_link(struct wm_softc *);
625 /* SFP related */
626 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
627 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
628
629 /*
630 * NVM related.
631 * Microwire, SPI (w/wo EERD) and Flash.
632 */
633 /* Misc functions */
634 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
635 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
636 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
637 /* Microwire */
638 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
639 /* SPI */
640 static int wm_nvm_ready_spi(struct wm_softc *);
641 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
642 /* Using with EERD */
643 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
644 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
645 /* Flash */
646 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
647 unsigned int *);
648 static int32_t wm_ich8_cycle_init(struct wm_softc *);
649 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
650 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
651 uint16_t *);
652 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
653 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
654 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
655 /* Lock, detecting NVM type, validate checksum and read */
656 static int wm_nvm_acquire(struct wm_softc *);
657 static void wm_nvm_release(struct wm_softc *);
658 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
659 static int wm_nvm_validate_checksum(struct wm_softc *);
660 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
661
662 /*
663 * Hardware semaphores.
664 * Very complexed...
665 */
666 static int wm_get_swsm_semaphore(struct wm_softc *);
667 static void wm_put_swsm_semaphore(struct wm_softc *);
668 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
669 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
670 static int wm_get_swfwhw_semaphore(struct wm_softc *);
671 static void wm_put_swfwhw_semaphore(struct wm_softc *);
672 static int wm_get_hw_semaphore_82573(struct wm_softc *);
673 static void wm_put_hw_semaphore_82573(struct wm_softc *);
674
675 /*
676 * Management mode and power management related subroutines.
677 * BMC, AMT, suspend/resume and EEE.
678 */
679 static int wm_check_mng_mode(struct wm_softc *);
680 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
681 static int wm_check_mng_mode_82574(struct wm_softc *);
682 static int wm_check_mng_mode_generic(struct wm_softc *);
683 static int wm_enable_mng_pass_thru(struct wm_softc *);
684 static int wm_check_reset_block(struct wm_softc *);
685 static void wm_get_hw_control(struct wm_softc *);
686 static void wm_release_hw_control(struct wm_softc *);
687 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
688 static void wm_smbustopci(struct wm_softc *);
689 static void wm_init_manageability(struct wm_softc *);
690 static void wm_release_manageability(struct wm_softc *);
691 static void wm_get_wakeup(struct wm_softc *);
692 #ifdef WM_WOL
693 static void wm_enable_phy_wakeup(struct wm_softc *);
694 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
695 static void wm_enable_wakeup(struct wm_softc *);
696 #endif
697 /* EEE */
698 static void wm_set_eee_i350(struct wm_softc *);
699
700 /*
701 * Workarounds (mainly PHY related).
702 * Basically, PHY's workarounds are in the PHY drivers.
703 */
704 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
705 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
706 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
707 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
708 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
709 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
710 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
711 static void wm_reset_init_script_82575(struct wm_softc *);
712
713 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
714 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
715
716 /*
717 * Devices supported by this driver.
718 */
719 static const struct wm_product {
720 pci_vendor_id_t wmp_vendor;
721 pci_product_id_t wmp_product;
722 const char *wmp_name;
723 wm_chip_type wmp_type;
724 uint32_t wmp_flags;
725 #define WMP_F_UNKNOWN 0x00
726 #define WMP_F_FIBER 0x01
727 #define WMP_F_COPPER 0x02
728 #define WMP_F_SERDES 0x03 /* Internal SERDES */
729 #define WMP_MEDIATYPE(x) ((x) & 0x03)
730 } wm_products[] = {
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
732 "Intel i82542 1000BASE-X Ethernet",
733 WM_T_82542_2_1, WMP_F_FIBER },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
736 "Intel i82543GC 1000BASE-X Ethernet",
737 WM_T_82543, WMP_F_FIBER },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
740 "Intel i82543GC 1000BASE-T Ethernet",
741 WM_T_82543, WMP_F_COPPER },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
744 "Intel i82544EI 1000BASE-T Ethernet",
745 WM_T_82544, WMP_F_COPPER },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
748 "Intel i82544EI 1000BASE-X Ethernet",
749 WM_T_82544, WMP_F_FIBER },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
752 "Intel i82544GC 1000BASE-T Ethernet",
753 WM_T_82544, WMP_F_COPPER },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
756 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
757 WM_T_82544, WMP_F_COPPER },
758
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
760 "Intel i82540EM 1000BASE-T Ethernet",
761 WM_T_82540, WMP_F_COPPER },
762
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
764 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
765 WM_T_82540, WMP_F_COPPER },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
768 "Intel i82540EP 1000BASE-T Ethernet",
769 WM_T_82540, WMP_F_COPPER },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
772 "Intel i82540EP 1000BASE-T Ethernet",
773 WM_T_82540, WMP_F_COPPER },
774
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
776 "Intel i82540EP 1000BASE-T Ethernet",
777 WM_T_82540, WMP_F_COPPER },
778
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
780 "Intel i82545EM 1000BASE-T Ethernet",
781 WM_T_82545, WMP_F_COPPER },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
784 "Intel i82545GM 1000BASE-T Ethernet",
785 WM_T_82545_3, WMP_F_COPPER },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
788 "Intel i82545GM 1000BASE-X Ethernet",
789 WM_T_82545_3, WMP_F_FIBER },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
792 "Intel i82545GM Gigabit Ethernet (SERDES)",
793 WM_T_82545_3, WMP_F_SERDES },
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
796 "Intel i82546EB 1000BASE-T Ethernet",
797 WM_T_82546, WMP_F_COPPER },
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
800 "Intel i82546EB 1000BASE-T Ethernet",
801 WM_T_82546, WMP_F_COPPER },
802
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
804 "Intel i82545EM 1000BASE-X Ethernet",
805 WM_T_82545, WMP_F_FIBER },
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
808 "Intel i82546EB 1000BASE-X Ethernet",
809 WM_T_82546, WMP_F_FIBER },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
812 "Intel i82546GB 1000BASE-T Ethernet",
813 WM_T_82546_3, WMP_F_COPPER },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
816 "Intel i82546GB 1000BASE-X Ethernet",
817 WM_T_82546_3, WMP_F_FIBER },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
820 "Intel i82546GB Gigabit Ethernet (SERDES)",
821 WM_T_82546_3, WMP_F_SERDES },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
824 "i82546GB quad-port Gigabit Ethernet",
825 WM_T_82546_3, WMP_F_COPPER },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
828 "i82546GB quad-port Gigabit Ethernet (KSP3)",
829 WM_T_82546_3, WMP_F_COPPER },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
832 "Intel PRO/1000MT (82546GB)",
833 WM_T_82546_3, WMP_F_COPPER },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
836 "Intel i82541EI 1000BASE-T Ethernet",
837 WM_T_82541, WMP_F_COPPER },
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
840 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
841 WM_T_82541, WMP_F_COPPER },
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
844 "Intel i82541EI Mobile 1000BASE-T Ethernet",
845 WM_T_82541, WMP_F_COPPER },
846
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
848 "Intel i82541ER 1000BASE-T Ethernet",
849 WM_T_82541_2, WMP_F_COPPER },
850
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
852 "Intel i82541GI 1000BASE-T Ethernet",
853 WM_T_82541_2, WMP_F_COPPER },
854
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
856 "Intel i82541GI Mobile 1000BASE-T Ethernet",
857 WM_T_82541_2, WMP_F_COPPER },
858
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
860 "Intel i82541PI 1000BASE-T Ethernet",
861 WM_T_82541_2, WMP_F_COPPER },
862
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
864 "Intel i82547EI 1000BASE-T Ethernet",
865 WM_T_82547, WMP_F_COPPER },
866
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
868 "Intel i82547EI Mobile 1000BASE-T Ethernet",
869 WM_T_82547, WMP_F_COPPER },
870
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
872 "Intel i82547GI 1000BASE-T Ethernet",
873 WM_T_82547_2, WMP_F_COPPER },
874
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
876 "Intel PRO/1000 PT (82571EB)",
877 WM_T_82571, WMP_F_COPPER },
878
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
880 "Intel PRO/1000 PF (82571EB)",
881 WM_T_82571, WMP_F_FIBER },
882
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
884 "Intel PRO/1000 PB (82571EB)",
885 WM_T_82571, WMP_F_SERDES },
886
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
888 "Intel PRO/1000 QT (82571EB)",
889 WM_T_82571, WMP_F_COPPER },
890
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
892 "Intel i82572EI 1000baseT Ethernet",
893 WM_T_82572, WMP_F_COPPER },
894
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
896 "Intel PRO/1000 PT Quad Port Server Adapter",
897 WM_T_82571, WMP_F_COPPER, },
898
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
900 "Intel i82572EI 1000baseX Ethernet",
901 WM_T_82572, WMP_F_FIBER },
902
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
904 "Intel i82572EI Gigabit Ethernet (SERDES)",
905 WM_T_82572, WMP_F_SERDES },
906
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
908 "Intel i82572EI 1000baseT Ethernet",
909 WM_T_82572, WMP_F_COPPER },
910
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
912 "Intel i82573E",
913 WM_T_82573, WMP_F_COPPER },
914
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
916 "Intel i82573E IAMT",
917 WM_T_82573, WMP_F_COPPER },
918
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
920 "Intel i82573L Gigabit Ethernet",
921 WM_T_82573, WMP_F_COPPER },
922
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
924 "Intel i82574L",
925 WM_T_82574, WMP_F_COPPER },
926
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
928 "Intel i82583V",
929 WM_T_82583, WMP_F_COPPER },
930
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
932 "i80003 dual 1000baseT Ethernet",
933 WM_T_80003, WMP_F_COPPER },
934
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
936 "i80003 dual 1000baseX Ethernet",
937 WM_T_80003, WMP_F_COPPER },
938
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
940 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
941 WM_T_80003, WMP_F_SERDES },
942
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
944 "Intel i80003 1000baseT Ethernet",
945 WM_T_80003, WMP_F_COPPER },
946
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
948 "Intel i80003 Gigabit Ethernet (SERDES)",
949 WM_T_80003, WMP_F_SERDES },
950
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
952 "Intel i82801H (M_AMT) LAN Controller",
953 WM_T_ICH8, WMP_F_COPPER },
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
955 "Intel i82801H (AMT) LAN Controller",
956 WM_T_ICH8, WMP_F_COPPER },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
958 "Intel i82801H LAN Controller",
959 WM_T_ICH8, WMP_F_COPPER },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
961 "Intel i82801H (IFE) LAN Controller",
962 WM_T_ICH8, WMP_F_COPPER },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
964 "Intel i82801H (M) LAN Controller",
965 WM_T_ICH8, WMP_F_COPPER },
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
967 "Intel i82801H IFE (GT) LAN Controller",
968 WM_T_ICH8, WMP_F_COPPER },
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
970 "Intel i82801H IFE (G) LAN Controller",
971 WM_T_ICH8, WMP_F_COPPER },
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
973 "82801I (AMT) LAN Controller",
974 WM_T_ICH9, WMP_F_COPPER },
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
976 "82801I LAN Controller",
977 WM_T_ICH9, WMP_F_COPPER },
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
979 "82801I (G) LAN Controller",
980 WM_T_ICH9, WMP_F_COPPER },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
982 "82801I (GT) LAN Controller",
983 WM_T_ICH9, WMP_F_COPPER },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
985 "82801I (C) LAN Controller",
986 WM_T_ICH9, WMP_F_COPPER },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
988 "82801I mobile LAN Controller",
989 WM_T_ICH9, WMP_F_COPPER },
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
991 "82801I mobile (V) LAN Controller",
992 WM_T_ICH9, WMP_F_COPPER },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
994 "82801I mobile (AMT) LAN Controller",
995 WM_T_ICH9, WMP_F_COPPER },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
997 "82567LM-4 LAN Controller",
998 WM_T_ICH9, WMP_F_COPPER },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1000 "82567V-3 LAN Controller",
1001 WM_T_ICH9, WMP_F_COPPER },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1003 "82567LM-2 LAN Controller",
1004 WM_T_ICH10, WMP_F_COPPER },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1006 "82567LF-2 LAN Controller",
1007 WM_T_ICH10, WMP_F_COPPER },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1009 "82567LM-3 LAN Controller",
1010 WM_T_ICH10, WMP_F_COPPER },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1012 "82567LF-3 LAN Controller",
1013 WM_T_ICH10, WMP_F_COPPER },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1015 "82567V-2 LAN Controller",
1016 WM_T_ICH10, WMP_F_COPPER },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1018 "82567V-3? LAN Controller",
1019 WM_T_ICH10, WMP_F_COPPER },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1021 "HANKSVILLE LAN Controller",
1022 WM_T_ICH10, WMP_F_COPPER },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1024 "PCH LAN (82577LM) Controller",
1025 WM_T_PCH, WMP_F_COPPER },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1027 "PCH LAN (82577LC) Controller",
1028 WM_T_PCH, WMP_F_COPPER },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1030 "PCH LAN (82578DM) Controller",
1031 WM_T_PCH, WMP_F_COPPER },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1033 "PCH LAN (82578DC) Controller",
1034 WM_T_PCH, WMP_F_COPPER },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1036 "PCH2 LAN (82579LM) Controller",
1037 WM_T_PCH2, WMP_F_COPPER },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1039 "PCH2 LAN (82579V) Controller",
1040 WM_T_PCH2, WMP_F_COPPER },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1042 "82575EB dual-1000baseT Ethernet",
1043 WM_T_82575, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1045 "82575EB dual-1000baseX Ethernet (SERDES)",
1046 WM_T_82575, WMP_F_SERDES },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1048 "82575GB quad-1000baseT Ethernet",
1049 WM_T_82575, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1051 "82575GB quad-1000baseT Ethernet (PM)",
1052 WM_T_82575, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1054 "82576 1000BaseT Ethernet",
1055 WM_T_82576, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1057 "82576 1000BaseX Ethernet",
1058 WM_T_82576, WMP_F_FIBER },
1059
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1061 "82576 gigabit Ethernet (SERDES)",
1062 WM_T_82576, WMP_F_SERDES },
1063
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1065 "82576 quad-1000BaseT Ethernet",
1066 WM_T_82576, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1068 "82576 gigabit Ethernet",
1069 WM_T_82576, WMP_F_COPPER },
1070
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1072 "82576 gigabit Ethernet (SERDES)",
1073 WM_T_82576, WMP_F_SERDES },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1075 "82576 quad-gigabit Ethernet (SERDES)",
1076 WM_T_82576, WMP_F_SERDES },
1077
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1079 "82580 1000BaseT Ethernet",
1080 WM_T_82580, WMP_F_COPPER },
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1082 "82580 1000BaseX Ethernet",
1083 WM_T_82580, WMP_F_FIBER },
1084
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1086 "82580 1000BaseT Ethernet (SERDES)",
1087 WM_T_82580, WMP_F_SERDES },
1088
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1090 "82580 gigabit Ethernet (SGMII)",
1091 WM_T_82580, WMP_F_COPPER },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1093 "82580 dual-1000BaseT Ethernet",
1094 WM_T_82580, WMP_F_COPPER },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1096 "82580 1000BaseT Ethernet",
1097 WM_T_82580ER, WMP_F_COPPER },
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1099 "82580 dual-1000BaseT Ethernet",
1100 WM_T_82580ER, WMP_F_COPPER },
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1102 "82580 quad-1000BaseX Ethernet",
1103 WM_T_82580, WMP_F_FIBER },
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1105 "I350 Gigabit Network Connection",
1106 WM_T_I350, WMP_F_COPPER },
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1108 "I350 Gigabit Fiber Network Connection",
1109 WM_T_I350, WMP_F_FIBER },
1110
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1112 "I350 Gigabit Backplane Connection",
1113 WM_T_I350, WMP_F_SERDES },
1114
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1116 "I350 Gigabit Connection",
1117 WM_T_I350, WMP_F_COPPER },
1118
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1120 "I354 Gigabit Connection",
1121 WM_T_I354, WMP_F_COPPER },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1123 "I210-T1 Ethernet Server Adapter",
1124 WM_T_I210, WMP_F_COPPER },
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1126 "I210 Ethernet (Copper OEM)",
1127 WM_T_I210, WMP_F_COPPER },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1129 "I210 Ethernet (Copper IT)",
1130 WM_T_I210, WMP_F_COPPER },
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1132 "I210 Gigabit Ethernet (Fiber)",
1133 WM_T_I210, WMP_F_FIBER },
1134
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1136 "I210 Gigabit Ethernet (SERDES)",
1137 WM_T_I210, WMP_F_SERDES },
1138
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1140 "I210 Gigabit Ethernet (SGMII)",
1141 WM_T_I210, WMP_F_COPPER },
1142
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1144 "I211 Ethernet (COPPER)",
1145 WM_T_I211, WMP_F_COPPER },
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1147 "I217 V Ethernet Connection",
1148 WM_T_PCH_LPT, WMP_F_COPPER },
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1150 "I217 LM Ethernet Connection",
1151 WM_T_PCH_LPT, WMP_F_COPPER },
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1153 "I218 V Ethernet Connection",
1154 WM_T_PCH_LPT, WMP_F_COPPER },
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1156 "I218 V Ethernet Connection",
1157 WM_T_PCH_LPT, WMP_F_COPPER },
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1159 "I218 V Ethernet Connection",
1160 WM_T_PCH_LPT, WMP_F_COPPER },
1161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1162 "I218 LM Ethernet Connection",
1163 WM_T_PCH_LPT, WMP_F_COPPER },
1164 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1165 "I218 LM Ethernet Connection",
1166 WM_T_PCH_LPT, WMP_F_COPPER },
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1168 "I218 LM Ethernet Connection",
1169 WM_T_PCH_LPT, WMP_F_COPPER },
1170 { 0, 0,
1171 NULL,
1172 0, 0 },
1173 };
1174
1175 #ifdef WM_EVENT_COUNTERS
1176 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1177 #endif /* WM_EVENT_COUNTERS */
1178
1179
1180 /*
1181 * Register read/write functions.
1182 * Other than CSR_{READ|WRITE}().
1183 */
1184
1185 #if 0 /* Not currently used */
1186 static inline uint32_t
1187 wm_io_read(struct wm_softc *sc, int reg)
1188 {
1189
1190 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1191 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1192 }
1193 #endif
1194
1195 static inline void
1196 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1197 {
1198
1199 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1200 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1201 }
1202
1203 static inline void
1204 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1205 uint32_t data)
1206 {
1207 uint32_t regval;
1208 int i;
1209
1210 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1211
1212 CSR_WRITE(sc, reg, regval);
1213
1214 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1215 delay(5);
1216 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1217 break;
1218 }
1219 if (i == SCTL_CTL_POLL_TIMEOUT) {
1220 aprint_error("%s: WARNING:"
1221 " i82575 reg 0x%08x setup did not indicate ready\n",
1222 device_xname(sc->sc_dev), reg);
1223 }
1224 }
1225
1226 static inline void
1227 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1228 {
1229 wa->wa_low = htole32(v & 0xffffffffU);
1230 if (sizeof(bus_addr_t) == 8)
1231 wa->wa_high = htole32((uint64_t) v >> 32);
1232 else
1233 wa->wa_high = 0;
1234 }
1235
1236 /*
1237 * Device driver interface functions and commonly used functions.
1238 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1239 */
1240
1241 /* Lookup supported device table */
1242 static const struct wm_product *
1243 wm_lookup(const struct pci_attach_args *pa)
1244 {
1245 const struct wm_product *wmp;
1246
1247 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1248 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1249 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1250 return wmp;
1251 }
1252 return NULL;
1253 }
1254
1255 /* The match function (ca_match) */
1256 static int
1257 wm_match(device_t parent, cfdata_t cf, void *aux)
1258 {
1259 struct pci_attach_args *pa = aux;
1260
1261 if (wm_lookup(pa) != NULL)
1262 return 1;
1263
1264 return 0;
1265 }
1266
1267 /* The attach function (ca_attach) */
1268 static void
1269 wm_attach(device_t parent, device_t self, void *aux)
1270 {
1271 struct wm_softc *sc = device_private(self);
1272 struct pci_attach_args *pa = aux;
1273 prop_dictionary_t dict;
1274 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1275 pci_chipset_tag_t pc = pa->pa_pc;
1276 pci_intr_handle_t ih;
1277 const char *intrstr = NULL;
1278 const char *eetype, *xname;
1279 bus_space_tag_t memt;
1280 bus_space_handle_t memh;
1281 bus_size_t memsize;
1282 int memh_valid;
1283 int i, error;
1284 const struct wm_product *wmp;
1285 prop_data_t ea;
1286 prop_number_t pn;
1287 uint8_t enaddr[ETHER_ADDR_LEN];
1288 uint16_t cfg1, cfg2, swdpin, io3;
1289 pcireg_t preg, memtype;
1290 uint16_t eeprom_data, apme_mask;
1291 bool force_clear_smbi;
1292 uint32_t link_mode;
1293 uint32_t reg;
1294 char intrbuf[PCI_INTRSTR_LEN];
1295
1296 sc->sc_dev = self;
1297 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1298 sc->sc_stopping = false;
1299
1300 wmp = wm_lookup(pa);
1301 #ifdef DIAGNOSTIC
1302 if (wmp == NULL) {
1303 printf("\n");
1304 panic("wm_attach: impossible");
1305 }
1306 #endif
1307 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1308
1309 sc->sc_pc = pa->pa_pc;
1310 sc->sc_pcitag = pa->pa_tag;
1311
1312 if (pci_dma64_available(pa))
1313 sc->sc_dmat = pa->pa_dmat64;
1314 else
1315 sc->sc_dmat = pa->pa_dmat;
1316
1317 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1318 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1319
1320 sc->sc_type = wmp->wmp_type;
1321 if (sc->sc_type < WM_T_82543) {
1322 if (sc->sc_rev < 2) {
1323 aprint_error_dev(sc->sc_dev,
1324 "i82542 must be at least rev. 2\n");
1325 return;
1326 }
1327 if (sc->sc_rev < 3)
1328 sc->sc_type = WM_T_82542_2_0;
1329 }
1330
1331 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1332 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1333 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1334 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1335 sc->sc_flags |= WM_F_NEWQUEUE;
1336
1337 /* Set device properties (mactype) */
1338 dict = device_properties(sc->sc_dev);
1339 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1340
1341 /*
1342 * Map the device. All devices support memory-mapped acccess,
1343 * and it is really required for normal operation.
1344 */
1345 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1346 switch (memtype) {
1347 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1348 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1349 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1350 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1351 break;
1352 default:
1353 memh_valid = 0;
1354 break;
1355 }
1356
1357 if (memh_valid) {
1358 sc->sc_st = memt;
1359 sc->sc_sh = memh;
1360 sc->sc_ss = memsize;
1361 } else {
1362 aprint_error_dev(sc->sc_dev,
1363 "unable to map device registers\n");
1364 return;
1365 }
1366
1367 /*
1368 * In addition, i82544 and later support I/O mapped indirect
1369 * register access. It is not desirable (nor supported in
1370 * this driver) to use it for normal operation, though it is
1371 * required to work around bugs in some chip versions.
1372 */
1373 if (sc->sc_type >= WM_T_82544) {
1374 /* First we have to find the I/O BAR. */
1375 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1376 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1377 if (memtype == PCI_MAPREG_TYPE_IO)
1378 break;
1379 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1380 PCI_MAPREG_MEM_TYPE_64BIT)
1381 i += 4; /* skip high bits, too */
1382 }
1383 if (i < PCI_MAPREG_END) {
1384 /*
1385 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1386 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1387 * It's no problem because newer chips has no this
1388 * bug.
1389 *
1390 * The i8254x doesn't apparently respond when the
1391 * I/O BAR is 0, which looks somewhat like it's not
1392 * been configured.
1393 */
1394 preg = pci_conf_read(pc, pa->pa_tag, i);
1395 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1396 aprint_error_dev(sc->sc_dev,
1397 "WARNING: I/O BAR at zero.\n");
1398 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1399 0, &sc->sc_iot, &sc->sc_ioh,
1400 NULL, &sc->sc_ios) == 0) {
1401 sc->sc_flags |= WM_F_IOH_VALID;
1402 } else {
1403 aprint_error_dev(sc->sc_dev,
1404 "WARNING: unable to map I/O space\n");
1405 }
1406 }
1407
1408 }
1409
1410 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1411 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1412 preg |= PCI_COMMAND_MASTER_ENABLE;
1413 if (sc->sc_type < WM_T_82542_2_1)
1414 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1415 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1416
1417 /* power up chip */
1418 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1419 NULL)) && error != EOPNOTSUPP) {
1420 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1421 return;
1422 }
1423
1424 /*
1425 * Map and establish our interrupt.
1426 */
1427 if (pci_intr_map(pa, &ih)) {
1428 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1429 return;
1430 }
1431 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1432 #ifdef WM_MPSAFE
1433 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1434 #endif
1435 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1436 if (sc->sc_ih == NULL) {
1437 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1438 if (intrstr != NULL)
1439 aprint_error(" at %s", intrstr);
1440 aprint_error("\n");
1441 return;
1442 }
1443 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1444
1445 /*
1446 * Check the function ID (unit number of the chip).
1447 */
1448 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1449 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1450 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1451 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1452 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1453 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1454 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1455 else
1456 sc->sc_funcid = 0;
1457
1458 /*
1459 * Determine a few things about the bus we're connected to.
1460 */
1461 if (sc->sc_type < WM_T_82543) {
1462 /* We don't really know the bus characteristics here. */
1463 sc->sc_bus_speed = 33;
1464 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1465 /*
1466 * CSA (Communication Streaming Architecture) is about as fast
1467 * a 32-bit 66MHz PCI Bus.
1468 */
1469 sc->sc_flags |= WM_F_CSA;
1470 sc->sc_bus_speed = 66;
1471 aprint_verbose_dev(sc->sc_dev,
1472 "Communication Streaming Architecture\n");
1473 if (sc->sc_type == WM_T_82547) {
1474 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1475 callout_setfunc(&sc->sc_txfifo_ch,
1476 wm_82547_txfifo_stall, sc);
1477 aprint_verbose_dev(sc->sc_dev,
1478 "using 82547 Tx FIFO stall work-around\n");
1479 }
1480 } else if (sc->sc_type >= WM_T_82571) {
1481 sc->sc_flags |= WM_F_PCIE;
1482 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1483 && (sc->sc_type != WM_T_ICH10)
1484 && (sc->sc_type != WM_T_PCH)
1485 && (sc->sc_type != WM_T_PCH2)
1486 && (sc->sc_type != WM_T_PCH_LPT)) {
1487 /* ICH* and PCH* have no PCIe capability registers */
1488 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1489 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1490 NULL) == 0)
1491 aprint_error_dev(sc->sc_dev,
1492 "unable to find PCIe capability\n");
1493 }
1494 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1495 } else {
1496 reg = CSR_READ(sc, WMREG_STATUS);
1497 if (reg & STATUS_BUS64)
1498 sc->sc_flags |= WM_F_BUS64;
1499 if ((reg & STATUS_PCIX_MODE) != 0) {
1500 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1501
1502 sc->sc_flags |= WM_F_PCIX;
1503 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1504 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1505 aprint_error_dev(sc->sc_dev,
1506 "unable to find PCIX capability\n");
1507 else if (sc->sc_type != WM_T_82545_3 &&
1508 sc->sc_type != WM_T_82546_3) {
1509 /*
1510 * Work around a problem caused by the BIOS
1511 * setting the max memory read byte count
1512 * incorrectly.
1513 */
1514 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1515 sc->sc_pcixe_capoff + PCIX_CMD);
1516 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1517 sc->sc_pcixe_capoff + PCIX_STATUS);
1518
1519 bytecnt =
1520 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1521 PCIX_CMD_BYTECNT_SHIFT;
1522 maxb =
1523 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1524 PCIX_STATUS_MAXB_SHIFT;
1525 if (bytecnt > maxb) {
1526 aprint_verbose_dev(sc->sc_dev,
1527 "resetting PCI-X MMRBC: %d -> %d\n",
1528 512 << bytecnt, 512 << maxb);
1529 pcix_cmd = (pcix_cmd &
1530 ~PCIX_CMD_BYTECNT_MASK) |
1531 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1532 pci_conf_write(pa->pa_pc, pa->pa_tag,
1533 sc->sc_pcixe_capoff + PCIX_CMD,
1534 pcix_cmd);
1535 }
1536 }
1537 }
1538 /*
1539 * The quad port adapter is special; it has a PCIX-PCIX
1540 * bridge on the board, and can run the secondary bus at
1541 * a higher speed.
1542 */
1543 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1544 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1545 : 66;
1546 } else if (sc->sc_flags & WM_F_PCIX) {
1547 switch (reg & STATUS_PCIXSPD_MASK) {
1548 case STATUS_PCIXSPD_50_66:
1549 sc->sc_bus_speed = 66;
1550 break;
1551 case STATUS_PCIXSPD_66_100:
1552 sc->sc_bus_speed = 100;
1553 break;
1554 case STATUS_PCIXSPD_100_133:
1555 sc->sc_bus_speed = 133;
1556 break;
1557 default:
1558 aprint_error_dev(sc->sc_dev,
1559 "unknown PCIXSPD %d; assuming 66MHz\n",
1560 reg & STATUS_PCIXSPD_MASK);
1561 sc->sc_bus_speed = 66;
1562 break;
1563 }
1564 } else
1565 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1566 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1567 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1568 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1569 }
1570
1571 /*
1572 * Allocate the control data structures, and create and load the
1573 * DMA map for it.
1574 *
1575 * NOTE: All Tx descriptors must be in the same 4G segment of
1576 * memory. So must Rx descriptors. We simplify by allocating
1577 * both sets within the same 4G segment.
1578 */
1579 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1580 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1581 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1582 sizeof(struct wm_control_data_82542) :
1583 sizeof(struct wm_control_data_82544);
1584 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1585 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1586 &sc->sc_cd_rseg, 0)) != 0) {
1587 aprint_error_dev(sc->sc_dev,
1588 "unable to allocate control data, error = %d\n",
1589 error);
1590 goto fail_0;
1591 }
1592
1593 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1594 sc->sc_cd_rseg, sc->sc_cd_size,
1595 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1596 aprint_error_dev(sc->sc_dev,
1597 "unable to map control data, error = %d\n", error);
1598 goto fail_1;
1599 }
1600
1601 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1602 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1603 aprint_error_dev(sc->sc_dev,
1604 "unable to create control data DMA map, error = %d\n",
1605 error);
1606 goto fail_2;
1607 }
1608
1609 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1610 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1611 aprint_error_dev(sc->sc_dev,
1612 "unable to load control data DMA map, error = %d\n",
1613 error);
1614 goto fail_3;
1615 }
1616
1617 /* Create the transmit buffer DMA maps. */
1618 WM_TXQUEUELEN(sc) =
1619 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1620 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1621 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1622 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1623 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1624 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1625 aprint_error_dev(sc->sc_dev,
1626 "unable to create Tx DMA map %d, error = %d\n",
1627 i, error);
1628 goto fail_4;
1629 }
1630 }
1631
1632 /* Create the receive buffer DMA maps. */
1633 for (i = 0; i < WM_NRXDESC; i++) {
1634 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1635 MCLBYTES, 0, 0,
1636 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1637 aprint_error_dev(sc->sc_dev,
1638 "unable to create Rx DMA map %d error = %d\n",
1639 i, error);
1640 goto fail_5;
1641 }
1642 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1643 }
1644
1645 /* clear interesting stat counters */
1646 CSR_READ(sc, WMREG_COLC);
1647 CSR_READ(sc, WMREG_RXERRC);
1648
1649 /* get PHY control from SMBus to PCIe */
1650 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1651 || (sc->sc_type == WM_T_PCH_LPT))
1652 wm_smbustopci(sc);
1653
1654 /* Reset the chip to a known state. */
1655 wm_reset(sc);
1656
1657 /* Get some information about the EEPROM. */
1658 switch (sc->sc_type) {
1659 case WM_T_82542_2_0:
1660 case WM_T_82542_2_1:
1661 case WM_T_82543:
1662 case WM_T_82544:
1663 /* Microwire */
1664 sc->sc_nvm_wordsize = 64;
1665 sc->sc_nvm_addrbits = 6;
1666 break;
1667 case WM_T_82540:
1668 case WM_T_82545:
1669 case WM_T_82545_3:
1670 case WM_T_82546:
1671 case WM_T_82546_3:
1672 /* Microwire */
1673 reg = CSR_READ(sc, WMREG_EECD);
1674 if (reg & EECD_EE_SIZE) {
1675 sc->sc_nvm_wordsize = 256;
1676 sc->sc_nvm_addrbits = 8;
1677 } else {
1678 sc->sc_nvm_wordsize = 64;
1679 sc->sc_nvm_addrbits = 6;
1680 }
1681 sc->sc_flags |= WM_F_LOCK_EECD;
1682 break;
1683 case WM_T_82541:
1684 case WM_T_82541_2:
1685 case WM_T_82547:
1686 case WM_T_82547_2:
1687 reg = CSR_READ(sc, WMREG_EECD);
1688 if (reg & EECD_EE_TYPE) {
1689 /* SPI */
1690 sc->sc_flags |= WM_F_EEPROM_SPI;
1691 wm_nvm_set_addrbits_size_eecd(sc);
1692 } else {
1693 /* Microwire */
1694 if ((reg & EECD_EE_ABITS) != 0) {
1695 sc->sc_nvm_wordsize = 256;
1696 sc->sc_nvm_addrbits = 8;
1697 } else {
1698 sc->sc_nvm_wordsize = 64;
1699 sc->sc_nvm_addrbits = 6;
1700 }
1701 }
1702 sc->sc_flags |= WM_F_LOCK_EECD;
1703 break;
1704 case WM_T_82571:
1705 case WM_T_82572:
1706 /* SPI */
1707 sc->sc_flags |= WM_F_EEPROM_SPI;
1708 wm_nvm_set_addrbits_size_eecd(sc);
1709 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1710 break;
1711 case WM_T_82573:
1712 sc->sc_flags |= WM_F_LOCK_SWSM;
1713 /* FALLTHROUGH */
1714 case WM_T_82574:
1715 case WM_T_82583:
1716 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1717 sc->sc_flags |= WM_F_EEPROM_FLASH;
1718 sc->sc_nvm_wordsize = 2048;
1719 } else {
1720 /* SPI */
1721 sc->sc_flags |= WM_F_EEPROM_SPI;
1722 wm_nvm_set_addrbits_size_eecd(sc);
1723 }
1724 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1725 break;
1726 case WM_T_82575:
1727 case WM_T_82576:
1728 case WM_T_82580:
1729 case WM_T_82580ER:
1730 case WM_T_I350:
1731 case WM_T_I354:
1732 case WM_T_80003:
1733 /* SPI */
1734 sc->sc_flags |= WM_F_EEPROM_SPI;
1735 wm_nvm_set_addrbits_size_eecd(sc);
1736 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1737 | WM_F_LOCK_SWSM;
1738 break;
1739 case WM_T_ICH8:
1740 case WM_T_ICH9:
1741 case WM_T_ICH10:
1742 case WM_T_PCH:
1743 case WM_T_PCH2:
1744 case WM_T_PCH_LPT:
1745 /* FLASH */
1746 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1747 sc->sc_nvm_wordsize = 2048;
1748 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1749 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1750 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1751 aprint_error_dev(sc->sc_dev,
1752 "can't map FLASH registers\n");
1753 goto fail_5;
1754 }
1755 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1756 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1757 ICH_FLASH_SECTOR_SIZE;
1758 sc->sc_ich8_flash_bank_size =
1759 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1760 sc->sc_ich8_flash_bank_size -=
1761 (reg & ICH_GFPREG_BASE_MASK);
1762 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1763 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1764 break;
1765 case WM_T_I210:
1766 case WM_T_I211:
1767 wm_nvm_set_addrbits_size_eecd(sc);
1768 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1769 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1770 break;
1771 default:
1772 break;
1773 }
1774
1775 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1776 switch (sc->sc_type) {
1777 case WM_T_82571:
1778 case WM_T_82572:
1779 reg = CSR_READ(sc, WMREG_SWSM2);
1780 if ((reg & SWSM2_LOCK) != 0) {
1781 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1782 force_clear_smbi = true;
1783 } else
1784 force_clear_smbi = false;
1785 break;
1786 case WM_T_82573:
1787 case WM_T_82574:
1788 case WM_T_82583:
1789 force_clear_smbi = true;
1790 break;
1791 default:
1792 force_clear_smbi = false;
1793 break;
1794 }
1795 if (force_clear_smbi) {
1796 reg = CSR_READ(sc, WMREG_SWSM);
1797 if ((reg & SWSM_SMBI) != 0)
1798 aprint_error_dev(sc->sc_dev,
1799 "Please update the Bootagent\n");
1800 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1801 }
1802
1803 /*
1804 * Defer printing the EEPROM type until after verifying the checksum
1805 * This allows the EEPROM type to be printed correctly in the case
1806 * that no EEPROM is attached.
1807 */
1808 /*
1809 * Validate the EEPROM checksum. If the checksum fails, flag
1810 * this for later, so we can fail future reads from the EEPROM.
1811 */
1812 if (wm_nvm_validate_checksum(sc)) {
1813 /*
1814 * Read twice again because some PCI-e parts fail the
1815 * first check due to the link being in sleep state.
1816 */
1817 if (wm_nvm_validate_checksum(sc))
1818 sc->sc_flags |= WM_F_EEPROM_INVALID;
1819 }
1820
1821 /* Set device properties (macflags) */
1822 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1823
1824 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1825 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1826 else {
1827 aprint_verbose_dev(sc->sc_dev, "%u words ",
1828 sc->sc_nvm_wordsize);
1829 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1830 aprint_verbose("FLASH(HW)\n");
1831 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1832 aprint_verbose("FLASH\n");
1833 } else {
1834 if (sc->sc_flags & WM_F_EEPROM_SPI)
1835 eetype = "SPI";
1836 else
1837 eetype = "MicroWire";
1838 aprint_verbose("(%d address bits) %s EEPROM\n",
1839 sc->sc_nvm_addrbits, eetype);
1840 }
1841 }
1842
1843 switch (sc->sc_type) {
1844 case WM_T_82571:
1845 case WM_T_82572:
1846 case WM_T_82573:
1847 case WM_T_82574:
1848 case WM_T_82583:
1849 case WM_T_80003:
1850 case WM_T_ICH8:
1851 case WM_T_ICH9:
1852 case WM_T_ICH10:
1853 case WM_T_PCH:
1854 case WM_T_PCH2:
1855 case WM_T_PCH_LPT:
1856 if (wm_check_mng_mode(sc) != 0)
1857 wm_get_hw_control(sc);
1858 break;
1859 default:
1860 break;
1861 }
1862 wm_get_wakeup(sc);
1863 /*
1864 * Read the Ethernet address from the EEPROM, if not first found
1865 * in device properties.
1866 */
1867 ea = prop_dictionary_get(dict, "mac-address");
1868 if (ea != NULL) {
1869 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1870 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1871 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1872 } else {
1873 if (wm_read_mac_addr(sc, enaddr) != 0) {
1874 aprint_error_dev(sc->sc_dev,
1875 "unable to read Ethernet address\n");
1876 goto fail_5;
1877 }
1878 }
1879
1880 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1881 ether_sprintf(enaddr));
1882
1883 /*
1884 * Read the config info from the EEPROM, and set up various
1885 * bits in the control registers based on their contents.
1886 */
1887 pn = prop_dictionary_get(dict, "i82543-cfg1");
1888 if (pn != NULL) {
1889 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1890 cfg1 = (uint16_t) prop_number_integer_value(pn);
1891 } else {
1892 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1893 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1894 goto fail_5;
1895 }
1896 }
1897
1898 pn = prop_dictionary_get(dict, "i82543-cfg2");
1899 if (pn != NULL) {
1900 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1901 cfg2 = (uint16_t) prop_number_integer_value(pn);
1902 } else {
1903 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1904 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1905 goto fail_5;
1906 }
1907 }
1908
1909 /* check for WM_F_WOL */
1910 switch (sc->sc_type) {
1911 case WM_T_82542_2_0:
1912 case WM_T_82542_2_1:
1913 case WM_T_82543:
1914 /* dummy? */
1915 eeprom_data = 0;
1916 apme_mask = NVM_CFG3_APME;
1917 break;
1918 case WM_T_82544:
1919 apme_mask = NVM_CFG2_82544_APM_EN;
1920 eeprom_data = cfg2;
1921 break;
1922 case WM_T_82546:
1923 case WM_T_82546_3:
1924 case WM_T_82571:
1925 case WM_T_82572:
1926 case WM_T_82573:
1927 case WM_T_82574:
1928 case WM_T_82583:
1929 case WM_T_80003:
1930 default:
1931 apme_mask = NVM_CFG3_APME;
1932 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
1933 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
1934 break;
1935 case WM_T_82575:
1936 case WM_T_82576:
1937 case WM_T_82580:
1938 case WM_T_82580ER:
1939 case WM_T_I350:
1940 case WM_T_I354: /* XXX ok? */
1941 case WM_T_ICH8:
1942 case WM_T_ICH9:
1943 case WM_T_ICH10:
1944 case WM_T_PCH:
1945 case WM_T_PCH2:
1946 case WM_T_PCH_LPT:
1947 /* XXX The funcid should be checked on some devices */
1948 apme_mask = WUC_APME;
1949 eeprom_data = CSR_READ(sc, WMREG_WUC);
1950 break;
1951 }
1952
1953 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1954 if ((eeprom_data & apme_mask) != 0)
1955 sc->sc_flags |= WM_F_WOL;
1956 #ifdef WM_DEBUG
1957 if ((sc->sc_flags & WM_F_WOL) != 0)
1958 printf("WOL\n");
1959 #endif
1960
1961 /*
1962 * XXX need special handling for some multiple port cards
1963 * to disable a paticular port.
1964 */
1965
1966 if (sc->sc_type >= WM_T_82544) {
1967 pn = prop_dictionary_get(dict, "i82543-swdpin");
1968 if (pn != NULL) {
1969 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1970 swdpin = (uint16_t) prop_number_integer_value(pn);
1971 } else {
1972 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
1973 aprint_error_dev(sc->sc_dev,
1974 "unable to read SWDPIN\n");
1975 goto fail_5;
1976 }
1977 }
1978 }
1979
1980 if (cfg1 & NVM_CFG1_ILOS)
1981 sc->sc_ctrl |= CTRL_ILOS;
1982 if (sc->sc_type >= WM_T_82544) {
1983 sc->sc_ctrl |=
1984 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1985 CTRL_SWDPIO_SHIFT;
1986 sc->sc_ctrl |=
1987 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1988 CTRL_SWDPINS_SHIFT;
1989 } else {
1990 sc->sc_ctrl |=
1991 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1992 CTRL_SWDPIO_SHIFT;
1993 }
1994
1995 #if 0
1996 if (sc->sc_type >= WM_T_82544) {
1997 if (cfg1 & NVM_CFG1_IPS0)
1998 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1999 if (cfg1 & NVM_CFG1_IPS1)
2000 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2001 sc->sc_ctrl_ext |=
2002 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2003 CTRL_EXT_SWDPIO_SHIFT;
2004 sc->sc_ctrl_ext |=
2005 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2006 CTRL_EXT_SWDPINS_SHIFT;
2007 } else {
2008 sc->sc_ctrl_ext |=
2009 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2010 CTRL_EXT_SWDPIO_SHIFT;
2011 }
2012 #endif
2013
2014 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2015 #if 0
2016 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2017 #endif
2018
2019 /*
2020 * Set up some register offsets that are different between
2021 * the i82542 and the i82543 and later chips.
2022 */
2023 if (sc->sc_type < WM_T_82543) {
2024 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2025 sc->sc_tdt_reg = WMREG_OLD_TDT;
2026 } else {
2027 sc->sc_rdt_reg = WMREG_RDT;
2028 sc->sc_tdt_reg = WMREG_TDT;
2029 }
2030
2031 if (sc->sc_type == WM_T_PCH) {
2032 uint16_t val;
2033
2034 /* Save the NVM K1 bit setting */
2035 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2036
2037 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2038 sc->sc_nvm_k1_enabled = 1;
2039 else
2040 sc->sc_nvm_k1_enabled = 0;
2041 }
2042
2043 /*
2044 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2045 * media structures accordingly.
2046 */
2047 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2048 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2049 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2050 || sc->sc_type == WM_T_82573
2051 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2052 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2053 wm_gmii_mediainit(sc, wmp->wmp_product);
2054 } else if (sc->sc_type < WM_T_82543 ||
2055 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2056 if (sc->sc_mediatype & WMP_F_COPPER) {
2057 aprint_error_dev(sc->sc_dev,
2058 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2059 sc->sc_mediatype = WMP_F_FIBER;
2060 }
2061 wm_tbi_mediainit(sc);
2062 } else {
2063 switch (sc->sc_type) {
2064 case WM_T_82575:
2065 case WM_T_82576:
2066 case WM_T_82580:
2067 case WM_T_82580ER:
2068 case WM_T_I350:
2069 case WM_T_I354:
2070 case WM_T_I210:
2071 case WM_T_I211:
2072 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2073 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2074 switch (link_mode) {
2075 case CTRL_EXT_LINK_MODE_1000KX:
2076 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2077 sc->sc_mediatype = WMP_F_SERDES;
2078 break;
2079 case CTRL_EXT_LINK_MODE_SGMII:
2080 if (wm_sgmii_uses_mdio(sc)) {
2081 aprint_verbose_dev(sc->sc_dev,
2082 "SGMII(MDIO)\n");
2083 sc->sc_flags |= WM_F_SGMII;
2084 sc->sc_mediatype = WMP_F_COPPER;
2085 break;
2086 }
2087 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2088 /*FALLTHROUGH*/
2089 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2090 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2091 if (sc->sc_mediatype == WMP_F_UNKNOWN) {
2092 if (link_mode
2093 == CTRL_EXT_LINK_MODE_SGMII) {
2094 sc->sc_mediatype
2095 = WMP_F_COPPER;
2096 sc->sc_flags |= WM_F_SGMII;
2097 } else {
2098 sc->sc_mediatype
2099 = WMP_F_SERDES;
2100 aprint_verbose_dev(sc->sc_dev,
2101 "SERDES\n");
2102 }
2103 break;
2104 }
2105 if (sc->sc_mediatype == WMP_F_SERDES)
2106 aprint_verbose_dev(sc->sc_dev,
2107 "SERDES\n");
2108
2109 /* Change current link mode setting */
2110 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2111 switch (sc->sc_mediatype) {
2112 case WMP_F_COPPER:
2113 reg |= CTRL_EXT_LINK_MODE_SGMII;
2114 break;
2115 case WMP_F_SERDES:
2116 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2117 break;
2118 default:
2119 break;
2120 }
2121 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2122 break;
2123 case CTRL_EXT_LINK_MODE_GMII:
2124 default:
2125 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2126 sc->sc_mediatype = WMP_F_COPPER;
2127 break;
2128 }
2129
2130 reg &= ~CTRL_EXT_I2C_ENA;
2131 if ((sc->sc_flags & WM_F_SGMII) != 0)
2132 reg |= CTRL_EXT_I2C_ENA;
2133 else
2134 reg &= ~CTRL_EXT_I2C_ENA;
2135 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2136
2137 if (sc->sc_mediatype == WMP_F_COPPER)
2138 wm_gmii_mediainit(sc, wmp->wmp_product);
2139 else
2140 wm_tbi_mediainit(sc);
2141 break;
2142 default:
2143 if (sc->sc_mediatype & WMP_F_FIBER)
2144 aprint_error_dev(sc->sc_dev,
2145 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2146 sc->sc_mediatype = WMP_F_COPPER;
2147 wm_gmii_mediainit(sc, wmp->wmp_product);
2148 }
2149 }
2150
2151 ifp = &sc->sc_ethercom.ec_if;
2152 xname = device_xname(sc->sc_dev);
2153 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2154 ifp->if_softc = sc;
2155 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2156 ifp->if_ioctl = wm_ioctl;
2157 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2158 ifp->if_start = wm_nq_start;
2159 else
2160 ifp->if_start = wm_start;
2161 ifp->if_watchdog = wm_watchdog;
2162 ifp->if_init = wm_init;
2163 ifp->if_stop = wm_stop;
2164 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2165 IFQ_SET_READY(&ifp->if_snd);
2166
2167 /* Check for jumbo frame */
2168 switch (sc->sc_type) {
2169 case WM_T_82573:
2170 /* XXX limited to 9234 if ASPM is disabled */
2171 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2172 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2173 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2174 break;
2175 case WM_T_82571:
2176 case WM_T_82572:
2177 case WM_T_82574:
2178 case WM_T_82575:
2179 case WM_T_82576:
2180 case WM_T_82580:
2181 case WM_T_82580ER:
2182 case WM_T_I350:
2183 case WM_T_I354: /* XXXX ok? */
2184 case WM_T_I210:
2185 case WM_T_I211:
2186 case WM_T_80003:
2187 case WM_T_ICH9:
2188 case WM_T_ICH10:
2189 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2190 case WM_T_PCH_LPT:
2191 /* XXX limited to 9234 */
2192 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2193 break;
2194 case WM_T_PCH:
2195 /* XXX limited to 4096 */
2196 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2197 break;
2198 case WM_T_82542_2_0:
2199 case WM_T_82542_2_1:
2200 case WM_T_82583:
2201 case WM_T_ICH8:
2202 /* No support for jumbo frame */
2203 break;
2204 default:
2205 /* ETHER_MAX_LEN_JUMBO */
2206 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2207 break;
2208 }
2209
2210 /* If we're a i82543 or greater, we can support VLANs. */
2211 if (sc->sc_type >= WM_T_82543)
2212 sc->sc_ethercom.ec_capabilities |=
2213 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2214
2215 /*
2216 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2217 * on i82543 and later.
2218 */
2219 if (sc->sc_type >= WM_T_82543) {
2220 ifp->if_capabilities |=
2221 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2222 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2223 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2224 IFCAP_CSUM_TCPv6_Tx |
2225 IFCAP_CSUM_UDPv6_Tx;
2226 }
2227
2228 /*
2229 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2230 *
2231 * 82541GI (8086:1076) ... no
2232 * 82572EI (8086:10b9) ... yes
2233 */
2234 if (sc->sc_type >= WM_T_82571) {
2235 ifp->if_capabilities |=
2236 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2237 }
2238
2239 /*
2240 * If we're a i82544 or greater (except i82547), we can do
2241 * TCP segmentation offload.
2242 */
2243 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2244 ifp->if_capabilities |= IFCAP_TSOv4;
2245 }
2246
2247 if (sc->sc_type >= WM_T_82571) {
2248 ifp->if_capabilities |= IFCAP_TSOv6;
2249 }
2250
2251 #ifdef WM_MPSAFE
2252 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2253 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2254 #else
2255 sc->sc_tx_lock = NULL;
2256 sc->sc_rx_lock = NULL;
2257 #endif
2258
2259 /* Attach the interface. */
2260 if_attach(ifp);
2261 ether_ifattach(ifp, enaddr);
2262 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2263 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2264 RND_FLAG_DEFAULT);
2265
2266 #ifdef WM_EVENT_COUNTERS
2267 /* Attach event counters. */
2268 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2269 NULL, xname, "txsstall");
2270 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2271 NULL, xname, "txdstall");
2272 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2273 NULL, xname, "txfifo_stall");
2274 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2275 NULL, xname, "txdw");
2276 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2277 NULL, xname, "txqe");
2278 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2279 NULL, xname, "rxintr");
2280 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2281 NULL, xname, "linkintr");
2282
2283 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2284 NULL, xname, "rxipsum");
2285 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2286 NULL, xname, "rxtusum");
2287 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2288 NULL, xname, "txipsum");
2289 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2290 NULL, xname, "txtusum");
2291 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2292 NULL, xname, "txtusum6");
2293
2294 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2295 NULL, xname, "txtso");
2296 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2297 NULL, xname, "txtso6");
2298 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2299 NULL, xname, "txtsopain");
2300
2301 for (i = 0; i < WM_NTXSEGS; i++) {
2302 snprintf(wm_txseg_evcnt_names[i],
2303 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2304 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2305 NULL, xname, wm_txseg_evcnt_names[i]);
2306 }
2307
2308 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2309 NULL, xname, "txdrop");
2310
2311 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2312 NULL, xname, "tu");
2313
2314 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2315 NULL, xname, "tx_xoff");
2316 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2317 NULL, xname, "tx_xon");
2318 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2319 NULL, xname, "rx_xoff");
2320 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2321 NULL, xname, "rx_xon");
2322 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2323 NULL, xname, "rx_macctl");
2324 #endif /* WM_EVENT_COUNTERS */
2325
2326 if (pmf_device_register(self, wm_suspend, wm_resume))
2327 pmf_class_network_register(self, ifp);
2328 else
2329 aprint_error_dev(self, "couldn't establish power handler\n");
2330
2331 sc->sc_flags |= WM_F_ATTACHED;
2332 return;
2333
2334 /*
2335 * Free any resources we've allocated during the failed attach
2336 * attempt. Do this in reverse order and fall through.
2337 */
2338 fail_5:
2339 for (i = 0; i < WM_NRXDESC; i++) {
2340 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2341 bus_dmamap_destroy(sc->sc_dmat,
2342 sc->sc_rxsoft[i].rxs_dmamap);
2343 }
2344 fail_4:
2345 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2346 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2347 bus_dmamap_destroy(sc->sc_dmat,
2348 sc->sc_txsoft[i].txs_dmamap);
2349 }
2350 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2351 fail_3:
2352 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2353 fail_2:
2354 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2355 sc->sc_cd_size);
2356 fail_1:
2357 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2358 fail_0:
2359 return;
2360 }
2361
2362 /* The detach function (ca_detach) */
2363 static int
2364 wm_detach(device_t self, int flags __unused)
2365 {
2366 struct wm_softc *sc = device_private(self);
2367 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2368 int i;
2369 #ifndef WM_MPSAFE
2370 int s;
2371 #endif
2372
2373 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2374 return 0;
2375
2376 #ifndef WM_MPSAFE
2377 s = splnet();
2378 #endif
2379 /* Stop the interface. Callouts are stopped in it. */
2380 wm_stop(ifp, 1);
2381
2382 #ifndef WM_MPSAFE
2383 splx(s);
2384 #endif
2385
2386 pmf_device_deregister(self);
2387
2388 /* Tell the firmware about the release */
2389 WM_BOTH_LOCK(sc);
2390 wm_release_manageability(sc);
2391 wm_release_hw_control(sc);
2392 WM_BOTH_UNLOCK(sc);
2393
2394 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2395
2396 /* Delete all remaining media. */
2397 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2398
2399 ether_ifdetach(ifp);
2400 if_detach(ifp);
2401
2402
2403 /* Unload RX dmamaps and free mbufs */
2404 WM_RX_LOCK(sc);
2405 wm_rxdrain(sc);
2406 WM_RX_UNLOCK(sc);
2407 /* Must unlock here */
2408
2409 /* Free dmamap. It's the same as the end of the wm_attach() function */
2410 for (i = 0; i < WM_NRXDESC; i++) {
2411 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2412 bus_dmamap_destroy(sc->sc_dmat,
2413 sc->sc_rxsoft[i].rxs_dmamap);
2414 }
2415 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2416 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2417 bus_dmamap_destroy(sc->sc_dmat,
2418 sc->sc_txsoft[i].txs_dmamap);
2419 }
2420 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2421 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2422 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2423 sc->sc_cd_size);
2424 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2425
2426 /* Disestablish the interrupt handler */
2427 if (sc->sc_ih != NULL) {
2428 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2429 sc->sc_ih = NULL;
2430 }
2431
2432 /* Unmap the registers */
2433 if (sc->sc_ss) {
2434 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2435 sc->sc_ss = 0;
2436 }
2437
2438 if (sc->sc_ios) {
2439 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2440 sc->sc_ios = 0;
2441 }
2442
2443 if (sc->sc_tx_lock)
2444 mutex_obj_free(sc->sc_tx_lock);
2445 if (sc->sc_rx_lock)
2446 mutex_obj_free(sc->sc_rx_lock);
2447
2448 return 0;
2449 }
2450
2451 static bool
2452 wm_suspend(device_t self, const pmf_qual_t *qual)
2453 {
2454 struct wm_softc *sc = device_private(self);
2455
2456 wm_release_manageability(sc);
2457 wm_release_hw_control(sc);
2458 #ifdef WM_WOL
2459 wm_enable_wakeup(sc);
2460 #endif
2461
2462 return true;
2463 }
2464
2465 static bool
2466 wm_resume(device_t self, const pmf_qual_t *qual)
2467 {
2468 struct wm_softc *sc = device_private(self);
2469
2470 wm_init_manageability(sc);
2471
2472 return true;
2473 }
2474
2475 /*
2476 * wm_watchdog: [ifnet interface function]
2477 *
2478 * Watchdog timer handler.
2479 */
2480 static void
2481 wm_watchdog(struct ifnet *ifp)
2482 {
2483 struct wm_softc *sc = ifp->if_softc;
2484
2485 /*
2486 * Since we're using delayed interrupts, sweep up
2487 * before we report an error.
2488 */
2489 WM_TX_LOCK(sc);
2490 wm_txintr(sc);
2491 WM_TX_UNLOCK(sc);
2492
2493 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2494 #ifdef WM_DEBUG
2495 int i, j;
2496 struct wm_txsoft *txs;
2497 #endif
2498 log(LOG_ERR,
2499 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2500 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2501 sc->sc_txnext);
2502 ifp->if_oerrors++;
2503 #ifdef WM_DEBUG
2504 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2505 i = WM_NEXTTXS(sc, i)) {
2506 txs = &sc->sc_txsoft[i];
2507 printf("txs %d tx %d -> %d\n",
2508 i, txs->txs_firstdesc, txs->txs_lastdesc);
2509 for (j = txs->txs_firstdesc; ;
2510 j = WM_NEXTTX(sc, j)) {
2511 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2512 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2513 printf("\t %#08x%08x\n",
2514 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2515 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2516 if (j == txs->txs_lastdesc)
2517 break;
2518 }
2519 }
2520 #endif
2521 /* Reset the interface. */
2522 (void) wm_init(ifp);
2523 }
2524
2525 /* Try to get more packets going. */
2526 ifp->if_start(ifp);
2527 }
2528
2529 /*
2530 * wm_tick:
2531 *
2532 * One second timer, used to check link status, sweep up
2533 * completed transmit jobs, etc.
2534 */
2535 static void
2536 wm_tick(void *arg)
2537 {
2538 struct wm_softc *sc = arg;
2539 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2540 #ifndef WM_MPSAFE
2541 int s;
2542
2543 s = splnet();
2544 #endif
2545
2546 WM_TX_LOCK(sc);
2547
2548 if (sc->sc_stopping)
2549 goto out;
2550
2551 if (sc->sc_type >= WM_T_82542_2_1) {
2552 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2553 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2554 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2555 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2556 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2557 }
2558
2559 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2560 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2561 + CSR_READ(sc, WMREG_CRCERRS)
2562 + CSR_READ(sc, WMREG_ALGNERRC)
2563 + CSR_READ(sc, WMREG_SYMERRC)
2564 + CSR_READ(sc, WMREG_RXERRC)
2565 + CSR_READ(sc, WMREG_SEC)
2566 + CSR_READ(sc, WMREG_CEXTERR)
2567 + CSR_READ(sc, WMREG_RLEC);
2568 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2569
2570 if (sc->sc_flags & WM_F_HAS_MII)
2571 mii_tick(&sc->sc_mii);
2572 else
2573 wm_tbi_check_link(sc);
2574
2575 out:
2576 WM_TX_UNLOCK(sc);
2577 #ifndef WM_MPSAFE
2578 splx(s);
2579 #endif
2580
2581 if (!sc->sc_stopping)
2582 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2583 }
2584
2585 static int
2586 wm_ifflags_cb(struct ethercom *ec)
2587 {
2588 struct ifnet *ifp = &ec->ec_if;
2589 struct wm_softc *sc = ifp->if_softc;
2590 int change = ifp->if_flags ^ sc->sc_if_flags;
2591 int rc = 0;
2592
2593 WM_BOTH_LOCK(sc);
2594
2595 if (change != 0)
2596 sc->sc_if_flags = ifp->if_flags;
2597
2598 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2599 rc = ENETRESET;
2600 goto out;
2601 }
2602
2603 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2604 wm_set_filter(sc);
2605
2606 wm_set_vlan(sc);
2607
2608 out:
2609 WM_BOTH_UNLOCK(sc);
2610
2611 return rc;
2612 }
2613
2614 /*
2615 * wm_ioctl: [ifnet interface function]
2616 *
2617 * Handle control requests from the operator.
2618 */
2619 static int
2620 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2621 {
2622 struct wm_softc *sc = ifp->if_softc;
2623 struct ifreq *ifr = (struct ifreq *) data;
2624 struct ifaddr *ifa = (struct ifaddr *)data;
2625 struct sockaddr_dl *sdl;
2626 int s, error;
2627
2628 #ifndef WM_MPSAFE
2629 s = splnet();
2630 #endif
2631 WM_BOTH_LOCK(sc);
2632
2633 switch (cmd) {
2634 case SIOCSIFMEDIA:
2635 case SIOCGIFMEDIA:
2636 /* Flow control requires full-duplex mode. */
2637 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2638 (ifr->ifr_media & IFM_FDX) == 0)
2639 ifr->ifr_media &= ~IFM_ETH_FMASK;
2640 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2641 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2642 /* We can do both TXPAUSE and RXPAUSE. */
2643 ifr->ifr_media |=
2644 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2645 }
2646 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2647 }
2648 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2649 break;
2650 case SIOCINITIFADDR:
2651 if (ifa->ifa_addr->sa_family == AF_LINK) {
2652 sdl = satosdl(ifp->if_dl->ifa_addr);
2653 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2654 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2655 /* unicast address is first multicast entry */
2656 wm_set_filter(sc);
2657 error = 0;
2658 break;
2659 }
2660 /*FALLTHROUGH*/
2661 default:
2662 WM_BOTH_UNLOCK(sc);
2663 #ifdef WM_MPSAFE
2664 s = splnet();
2665 #endif
2666 /* It may call wm_start, so unlock here */
2667 error = ether_ioctl(ifp, cmd, data);
2668 #ifdef WM_MPSAFE
2669 splx(s);
2670 #endif
2671 WM_BOTH_LOCK(sc);
2672
2673 if (error != ENETRESET)
2674 break;
2675
2676 error = 0;
2677
2678 if (cmd == SIOCSIFCAP) {
2679 WM_BOTH_UNLOCK(sc);
2680 error = (*ifp->if_init)(ifp);
2681 WM_BOTH_LOCK(sc);
2682 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2683 ;
2684 else if (ifp->if_flags & IFF_RUNNING) {
2685 /*
2686 * Multicast list has changed; set the hardware filter
2687 * accordingly.
2688 */
2689 wm_set_filter(sc);
2690 }
2691 break;
2692 }
2693
2694 WM_BOTH_UNLOCK(sc);
2695
2696 /* Try to get more packets going. */
2697 ifp->if_start(ifp);
2698
2699 #ifndef WM_MPSAFE
2700 splx(s);
2701 #endif
2702 return error;
2703 }
2704
2705 /* MAC address related */
2706
2707 static int
2708 wm_check_alt_mac_addr(struct wm_softc *sc)
2709 {
2710 uint16_t myea[ETHER_ADDR_LEN / 2];
2711 uint16_t offset = NVM_OFF_MACADDR;
2712
2713 /* Try to read alternative MAC address pointer */
2714 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2715 return -1;
2716
2717 /* Check pointer */
2718 if (offset == 0xffff)
2719 return -1;
2720
2721 /*
2722 * Check whether alternative MAC address is valid or not.
2723 * Some cards have non 0xffff pointer but those don't use
2724 * alternative MAC address in reality.
2725 *
2726 * Check whether the broadcast bit is set or not.
2727 */
2728 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2729 if (((myea[0] & 0xff) & 0x01) == 0)
2730 return 0; /* found! */
2731
2732 /* not found */
2733 return -1;
2734 }
2735
2736 static int
2737 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2738 {
2739 uint16_t myea[ETHER_ADDR_LEN / 2];
2740 uint16_t offset = NVM_OFF_MACADDR;
2741 int do_invert = 0;
2742
2743 switch (sc->sc_type) {
2744 case WM_T_82580:
2745 case WM_T_82580ER:
2746 case WM_T_I350:
2747 case WM_T_I354:
2748 switch (sc->sc_funcid) {
2749 case 0:
2750 /* default value (== NVM_OFF_MACADDR) */
2751 break;
2752 case 1:
2753 offset = NVM_OFF_LAN1;
2754 break;
2755 case 2:
2756 offset = NVM_OFF_LAN2;
2757 break;
2758 case 3:
2759 offset = NVM_OFF_LAN3;
2760 break;
2761 default:
2762 goto bad;
2763 /* NOTREACHED */
2764 break;
2765 }
2766 break;
2767 case WM_T_82571:
2768 case WM_T_82575:
2769 case WM_T_82576:
2770 case WM_T_80003:
2771 case WM_T_I210:
2772 case WM_T_I211:
2773 if (wm_check_alt_mac_addr(sc) != 0) {
2774 /* reset the offset to LAN0 */
2775 offset = NVM_OFF_MACADDR;
2776 if ((sc->sc_funcid & 0x01) == 1)
2777 do_invert = 1;
2778 goto do_read;
2779 }
2780 switch (sc->sc_funcid) {
2781 case 0:
2782 /*
2783 * The offset is the value in NVM_OFF_ALT_MAC_ADDR_PTR
2784 * itself.
2785 */
2786 break;
2787 case 1:
2788 offset += NVM_OFF_MACADDR_LAN1;
2789 break;
2790 case 2:
2791 offset += NVM_OFF_MACADDR_LAN2;
2792 break;
2793 case 3:
2794 offset += NVM_OFF_MACADDR_LAN3;
2795 break;
2796 default:
2797 goto bad;
2798 /* NOTREACHED */
2799 break;
2800 }
2801 break;
2802 default:
2803 if ((sc->sc_funcid & 0x01) == 1)
2804 do_invert = 1;
2805 break;
2806 }
2807
2808 do_read:
2809 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2810 myea) != 0) {
2811 goto bad;
2812 }
2813
2814 enaddr[0] = myea[0] & 0xff;
2815 enaddr[1] = myea[0] >> 8;
2816 enaddr[2] = myea[1] & 0xff;
2817 enaddr[3] = myea[1] >> 8;
2818 enaddr[4] = myea[2] & 0xff;
2819 enaddr[5] = myea[2] >> 8;
2820
2821 /*
2822 * Toggle the LSB of the MAC address on the second port
2823 * of some dual port cards.
2824 */
2825 if (do_invert != 0)
2826 enaddr[5] ^= 1;
2827
2828 return 0;
2829
2830 bad:
2831 return -1;
2832 }
2833
2834 /*
2835 * wm_set_ral:
2836 *
2837 * Set an entery in the receive address list.
2838 */
2839 static void
2840 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2841 {
2842 uint32_t ral_lo, ral_hi;
2843
2844 if (enaddr != NULL) {
2845 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2846 (enaddr[3] << 24);
2847 ral_hi = enaddr[4] | (enaddr[5] << 8);
2848 ral_hi |= RAL_AV;
2849 } else {
2850 ral_lo = 0;
2851 ral_hi = 0;
2852 }
2853
2854 if (sc->sc_type >= WM_T_82544) {
2855 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2856 ral_lo);
2857 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2858 ral_hi);
2859 } else {
2860 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2861 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2862 }
2863 }
2864
2865 /*
2866 * wm_mchash:
2867 *
2868 * Compute the hash of the multicast address for the 4096-bit
2869 * multicast filter.
2870 */
2871 static uint32_t
2872 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2873 {
2874 static const int lo_shift[4] = { 4, 3, 2, 0 };
2875 static const int hi_shift[4] = { 4, 5, 6, 8 };
2876 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2877 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2878 uint32_t hash;
2879
2880 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2881 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2882 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2883 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2884 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2885 return (hash & 0x3ff);
2886 }
2887 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2888 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2889
2890 return (hash & 0xfff);
2891 }
2892
2893 /*
2894 * wm_set_filter:
2895 *
2896 * Set up the receive filter.
2897 */
2898 static void
2899 wm_set_filter(struct wm_softc *sc)
2900 {
2901 struct ethercom *ec = &sc->sc_ethercom;
2902 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2903 struct ether_multi *enm;
2904 struct ether_multistep step;
2905 bus_addr_t mta_reg;
2906 uint32_t hash, reg, bit;
2907 int i, size;
2908
2909 if (sc->sc_type >= WM_T_82544)
2910 mta_reg = WMREG_CORDOVA_MTA;
2911 else
2912 mta_reg = WMREG_MTA;
2913
2914 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2915
2916 if (ifp->if_flags & IFF_BROADCAST)
2917 sc->sc_rctl |= RCTL_BAM;
2918 if (ifp->if_flags & IFF_PROMISC) {
2919 sc->sc_rctl |= RCTL_UPE;
2920 goto allmulti;
2921 }
2922
2923 /*
2924 * Set the station address in the first RAL slot, and
2925 * clear the remaining slots.
2926 */
2927 if (sc->sc_type == WM_T_ICH8)
2928 size = WM_RAL_TABSIZE_ICH8 -1;
2929 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2930 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2931 || (sc->sc_type == WM_T_PCH_LPT))
2932 size = WM_RAL_TABSIZE_ICH8;
2933 else if (sc->sc_type == WM_T_82575)
2934 size = WM_RAL_TABSIZE_82575;
2935 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2936 size = WM_RAL_TABSIZE_82576;
2937 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2938 size = WM_RAL_TABSIZE_I350;
2939 else
2940 size = WM_RAL_TABSIZE;
2941 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2942 for (i = 1; i < size; i++)
2943 wm_set_ral(sc, NULL, i);
2944
2945 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2946 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2947 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2948 size = WM_ICH8_MC_TABSIZE;
2949 else
2950 size = WM_MC_TABSIZE;
2951 /* Clear out the multicast table. */
2952 for (i = 0; i < size; i++)
2953 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2954
2955 ETHER_FIRST_MULTI(step, ec, enm);
2956 while (enm != NULL) {
2957 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2958 /*
2959 * We must listen to a range of multicast addresses.
2960 * For now, just accept all multicasts, rather than
2961 * trying to set only those filter bits needed to match
2962 * the range. (At this time, the only use of address
2963 * ranges is for IP multicast routing, for which the
2964 * range is big enough to require all bits set.)
2965 */
2966 goto allmulti;
2967 }
2968
2969 hash = wm_mchash(sc, enm->enm_addrlo);
2970
2971 reg = (hash >> 5);
2972 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2973 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2974 || (sc->sc_type == WM_T_PCH2)
2975 || (sc->sc_type == WM_T_PCH_LPT))
2976 reg &= 0x1f;
2977 else
2978 reg &= 0x7f;
2979 bit = hash & 0x1f;
2980
2981 hash = CSR_READ(sc, mta_reg + (reg << 2));
2982 hash |= 1U << bit;
2983
2984 /* XXX Hardware bug?? */
2985 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2986 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2987 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2988 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2989 } else
2990 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2991
2992 ETHER_NEXT_MULTI(step, enm);
2993 }
2994
2995 ifp->if_flags &= ~IFF_ALLMULTI;
2996 goto setit;
2997
2998 allmulti:
2999 ifp->if_flags |= IFF_ALLMULTI;
3000 sc->sc_rctl |= RCTL_MPE;
3001
3002 setit:
3003 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3004 }
3005
3006 /* Reset and init related */
3007
3008 static void
3009 wm_set_vlan(struct wm_softc *sc)
3010 {
3011 /* Deal with VLAN enables. */
3012 if (VLAN_ATTACHED(&sc->sc_ethercom))
3013 sc->sc_ctrl |= CTRL_VME;
3014 else
3015 sc->sc_ctrl &= ~CTRL_VME;
3016
3017 /* Write the control registers. */
3018 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3019 }
3020
3021 static void
3022 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3023 {
3024 uint32_t gcr;
3025 pcireg_t ctrl2;
3026
3027 gcr = CSR_READ(sc, WMREG_GCR);
3028
3029 /* Only take action if timeout value is defaulted to 0 */
3030 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3031 goto out;
3032
3033 if ((gcr & GCR_CAP_VER2) == 0) {
3034 gcr |= GCR_CMPL_TMOUT_10MS;
3035 goto out;
3036 }
3037
3038 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3039 sc->sc_pcixe_capoff + PCIE_DCSR2);
3040 ctrl2 |= WM_PCIE_DCSR2_16MS;
3041 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3042 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3043
3044 out:
3045 /* Disable completion timeout resend */
3046 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3047
3048 CSR_WRITE(sc, WMREG_GCR, gcr);
3049 }
3050
3051 void
3052 wm_get_auto_rd_done(struct wm_softc *sc)
3053 {
3054 int i;
3055
3056 /* wait for eeprom to reload */
3057 switch (sc->sc_type) {
3058 case WM_T_82571:
3059 case WM_T_82572:
3060 case WM_T_82573:
3061 case WM_T_82574:
3062 case WM_T_82583:
3063 case WM_T_82575:
3064 case WM_T_82576:
3065 case WM_T_82580:
3066 case WM_T_82580ER:
3067 case WM_T_I350:
3068 case WM_T_I354:
3069 case WM_T_I210:
3070 case WM_T_I211:
3071 case WM_T_80003:
3072 case WM_T_ICH8:
3073 case WM_T_ICH9:
3074 for (i = 0; i < 10; i++) {
3075 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3076 break;
3077 delay(1000);
3078 }
3079 if (i == 10) {
3080 log(LOG_ERR, "%s: auto read from eeprom failed to "
3081 "complete\n", device_xname(sc->sc_dev));
3082 }
3083 break;
3084 default:
3085 break;
3086 }
3087 }
3088
3089 void
3090 wm_lan_init_done(struct wm_softc *sc)
3091 {
3092 uint32_t reg = 0;
3093 int i;
3094
3095 /* wait for eeprom to reload */
3096 switch (sc->sc_type) {
3097 case WM_T_ICH10:
3098 case WM_T_PCH:
3099 case WM_T_PCH2:
3100 case WM_T_PCH_LPT:
3101 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3102 reg = CSR_READ(sc, WMREG_STATUS);
3103 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3104 break;
3105 delay(100);
3106 }
3107 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3108 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3109 "complete\n", device_xname(sc->sc_dev), __func__);
3110 }
3111 break;
3112 default:
3113 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3114 __func__);
3115 break;
3116 }
3117
3118 reg &= ~STATUS_LAN_INIT_DONE;
3119 CSR_WRITE(sc, WMREG_STATUS, reg);
3120 }
3121
3122 void
3123 wm_get_cfg_done(struct wm_softc *sc)
3124 {
3125 int mask;
3126 uint32_t reg;
3127 int i;
3128
3129 /* wait for eeprom to reload */
3130 switch (sc->sc_type) {
3131 case WM_T_82542_2_0:
3132 case WM_T_82542_2_1:
3133 /* null */
3134 break;
3135 case WM_T_82543:
3136 case WM_T_82544:
3137 case WM_T_82540:
3138 case WM_T_82545:
3139 case WM_T_82545_3:
3140 case WM_T_82546:
3141 case WM_T_82546_3:
3142 case WM_T_82541:
3143 case WM_T_82541_2:
3144 case WM_T_82547:
3145 case WM_T_82547_2:
3146 case WM_T_82573:
3147 case WM_T_82574:
3148 case WM_T_82583:
3149 /* generic */
3150 delay(10*1000);
3151 break;
3152 case WM_T_80003:
3153 case WM_T_82571:
3154 case WM_T_82572:
3155 case WM_T_82575:
3156 case WM_T_82576:
3157 case WM_T_82580:
3158 case WM_T_82580ER:
3159 case WM_T_I350:
3160 case WM_T_I354:
3161 case WM_T_I210:
3162 case WM_T_I211:
3163 if (sc->sc_type == WM_T_82571) {
3164 /* Only 82571 shares port 0 */
3165 mask = EEMNGCTL_CFGDONE_0;
3166 } else
3167 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3168 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3169 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3170 break;
3171 delay(1000);
3172 }
3173 if (i >= WM_PHY_CFG_TIMEOUT) {
3174 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3175 device_xname(sc->sc_dev), __func__));
3176 }
3177 break;
3178 case WM_T_ICH8:
3179 case WM_T_ICH9:
3180 case WM_T_ICH10:
3181 case WM_T_PCH:
3182 case WM_T_PCH2:
3183 case WM_T_PCH_LPT:
3184 delay(10*1000);
3185 if (sc->sc_type >= WM_T_ICH10)
3186 wm_lan_init_done(sc);
3187 else
3188 wm_get_auto_rd_done(sc);
3189
3190 reg = CSR_READ(sc, WMREG_STATUS);
3191 if ((reg & STATUS_PHYRA) != 0)
3192 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3193 break;
3194 default:
3195 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3196 __func__);
3197 break;
3198 }
3199 }
3200
3201 /*
3202 * wm_reset:
3203 *
3204 * Reset the i82542 chip.
3205 */
3206 static void
3207 wm_reset(struct wm_softc *sc)
3208 {
3209 int phy_reset = 0;
3210 int error = 0;
3211 uint32_t reg, mask;
3212
3213 /*
3214 * Allocate on-chip memory according to the MTU size.
3215 * The Packet Buffer Allocation register must be written
3216 * before the chip is reset.
3217 */
3218 switch (sc->sc_type) {
3219 case WM_T_82547:
3220 case WM_T_82547_2:
3221 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3222 PBA_22K : PBA_30K;
3223 sc->sc_txfifo_head = 0;
3224 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3225 sc->sc_txfifo_size =
3226 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3227 sc->sc_txfifo_stall = 0;
3228 break;
3229 case WM_T_82571:
3230 case WM_T_82572:
3231 case WM_T_82575: /* XXX need special handing for jumbo frames */
3232 case WM_T_I350:
3233 case WM_T_I354:
3234 case WM_T_80003:
3235 sc->sc_pba = PBA_32K;
3236 break;
3237 case WM_T_82580:
3238 case WM_T_82580ER:
3239 sc->sc_pba = PBA_35K;
3240 break;
3241 case WM_T_I210:
3242 case WM_T_I211:
3243 sc->sc_pba = PBA_34K;
3244 break;
3245 case WM_T_82576:
3246 sc->sc_pba = PBA_64K;
3247 break;
3248 case WM_T_82573:
3249 sc->sc_pba = PBA_12K;
3250 break;
3251 case WM_T_82574:
3252 case WM_T_82583:
3253 sc->sc_pba = PBA_20K;
3254 break;
3255 case WM_T_ICH8:
3256 sc->sc_pba = PBA_8K;
3257 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3258 break;
3259 case WM_T_ICH9:
3260 case WM_T_ICH10:
3261 sc->sc_pba = PBA_10K;
3262 break;
3263 case WM_T_PCH:
3264 case WM_T_PCH2:
3265 case WM_T_PCH_LPT:
3266 sc->sc_pba = PBA_26K;
3267 break;
3268 default:
3269 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3270 PBA_40K : PBA_48K;
3271 break;
3272 }
3273 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3274
3275 /* Prevent the PCI-E bus from sticking */
3276 if (sc->sc_flags & WM_F_PCIE) {
3277 int timeout = 800;
3278
3279 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3280 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3281
3282 while (timeout--) {
3283 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3284 == 0)
3285 break;
3286 delay(100);
3287 }
3288 }
3289
3290 /* Set the completion timeout for interface */
3291 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3292 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3293 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3294 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3295 wm_set_pcie_completion_timeout(sc);
3296
3297 /* Clear interrupt */
3298 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3299
3300 /* Stop the transmit and receive processes. */
3301 CSR_WRITE(sc, WMREG_RCTL, 0);
3302 sc->sc_rctl &= ~RCTL_EN;
3303 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3304 CSR_WRITE_FLUSH(sc);
3305
3306 /* XXX set_tbi_sbp_82543() */
3307
3308 delay(10*1000);
3309
3310 /* Must acquire the MDIO ownership before MAC reset */
3311 switch (sc->sc_type) {
3312 case WM_T_82573:
3313 case WM_T_82574:
3314 case WM_T_82583:
3315 error = wm_get_hw_semaphore_82573(sc);
3316 break;
3317 default:
3318 break;
3319 }
3320
3321 /*
3322 * 82541 Errata 29? & 82547 Errata 28?
3323 * See also the description about PHY_RST bit in CTRL register
3324 * in 8254x_GBe_SDM.pdf.
3325 */
3326 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3327 CSR_WRITE(sc, WMREG_CTRL,
3328 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3329 CSR_WRITE_FLUSH(sc);
3330 delay(5000);
3331 }
3332
3333 switch (sc->sc_type) {
3334 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3335 case WM_T_82541:
3336 case WM_T_82541_2:
3337 case WM_T_82547:
3338 case WM_T_82547_2:
3339 /*
3340 * On some chipsets, a reset through a memory-mapped write
3341 * cycle can cause the chip to reset before completing the
3342 * write cycle. This causes major headache that can be
3343 * avoided by issuing the reset via indirect register writes
3344 * through I/O space.
3345 *
3346 * So, if we successfully mapped the I/O BAR at attach time,
3347 * use that. Otherwise, try our luck with a memory-mapped
3348 * reset.
3349 */
3350 if (sc->sc_flags & WM_F_IOH_VALID)
3351 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3352 else
3353 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3354 break;
3355 case WM_T_82545_3:
3356 case WM_T_82546_3:
3357 /* Use the shadow control register on these chips. */
3358 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3359 break;
3360 case WM_T_80003:
3361 mask = swfwphysem[sc->sc_funcid];
3362 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3363 wm_get_swfw_semaphore(sc, mask);
3364 CSR_WRITE(sc, WMREG_CTRL, reg);
3365 wm_put_swfw_semaphore(sc, mask);
3366 break;
3367 case WM_T_ICH8:
3368 case WM_T_ICH9:
3369 case WM_T_ICH10:
3370 case WM_T_PCH:
3371 case WM_T_PCH2:
3372 case WM_T_PCH_LPT:
3373 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3374 if (wm_check_reset_block(sc) == 0) {
3375 /*
3376 * Gate automatic PHY configuration by hardware on
3377 * non-managed 82579
3378 */
3379 if ((sc->sc_type == WM_T_PCH2)
3380 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3381 != 0))
3382 wm_gate_hw_phy_config_ich8lan(sc, 1);
3383
3384
3385 reg |= CTRL_PHY_RESET;
3386 phy_reset = 1;
3387 }
3388 wm_get_swfwhw_semaphore(sc);
3389 CSR_WRITE(sc, WMREG_CTRL, reg);
3390 /* Don't insert a completion barrier when reset */
3391 delay(20*1000);
3392 wm_put_swfwhw_semaphore(sc);
3393 break;
3394 case WM_T_82542_2_0:
3395 case WM_T_82542_2_1:
3396 case WM_T_82543:
3397 case WM_T_82540:
3398 case WM_T_82545:
3399 case WM_T_82546:
3400 case WM_T_82571:
3401 case WM_T_82572:
3402 case WM_T_82573:
3403 case WM_T_82574:
3404 case WM_T_82575:
3405 case WM_T_82576:
3406 case WM_T_82580:
3407 case WM_T_82580ER:
3408 case WM_T_82583:
3409 case WM_T_I350:
3410 case WM_T_I354:
3411 case WM_T_I210:
3412 case WM_T_I211:
3413 default:
3414 /* Everything else can safely use the documented method. */
3415 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3416 break;
3417 }
3418
3419 /* Must release the MDIO ownership after MAC reset */
3420 switch (sc->sc_type) {
3421 case WM_T_82573:
3422 case WM_T_82574:
3423 case WM_T_82583:
3424 if (error == 0)
3425 wm_put_hw_semaphore_82573(sc);
3426 break;
3427 default:
3428 break;
3429 }
3430
3431 if (phy_reset != 0)
3432 wm_get_cfg_done(sc);
3433
3434 /* reload EEPROM */
3435 switch (sc->sc_type) {
3436 case WM_T_82542_2_0:
3437 case WM_T_82542_2_1:
3438 case WM_T_82543:
3439 case WM_T_82544:
3440 delay(10);
3441 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3442 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3443 CSR_WRITE_FLUSH(sc);
3444 delay(2000);
3445 break;
3446 case WM_T_82540:
3447 case WM_T_82545:
3448 case WM_T_82545_3:
3449 case WM_T_82546:
3450 case WM_T_82546_3:
3451 delay(5*1000);
3452 /* XXX Disable HW ARPs on ASF enabled adapters */
3453 break;
3454 case WM_T_82541:
3455 case WM_T_82541_2:
3456 case WM_T_82547:
3457 case WM_T_82547_2:
3458 delay(20000);
3459 /* XXX Disable HW ARPs on ASF enabled adapters */
3460 break;
3461 case WM_T_82571:
3462 case WM_T_82572:
3463 case WM_T_82573:
3464 case WM_T_82574:
3465 case WM_T_82583:
3466 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3467 delay(10);
3468 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3469 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3470 CSR_WRITE_FLUSH(sc);
3471 }
3472 /* check EECD_EE_AUTORD */
3473 wm_get_auto_rd_done(sc);
3474 /*
3475 * Phy configuration from NVM just starts after EECD_AUTO_RD
3476 * is set.
3477 */
3478 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3479 || (sc->sc_type == WM_T_82583))
3480 delay(25*1000);
3481 break;
3482 case WM_T_82575:
3483 case WM_T_82576:
3484 case WM_T_82580:
3485 case WM_T_82580ER:
3486 case WM_T_I350:
3487 case WM_T_I354:
3488 case WM_T_I210:
3489 case WM_T_I211:
3490 case WM_T_80003:
3491 /* check EECD_EE_AUTORD */
3492 wm_get_auto_rd_done(sc);
3493 break;
3494 case WM_T_ICH8:
3495 case WM_T_ICH9:
3496 case WM_T_ICH10:
3497 case WM_T_PCH:
3498 case WM_T_PCH2:
3499 case WM_T_PCH_LPT:
3500 break;
3501 default:
3502 panic("%s: unknown type\n", __func__);
3503 }
3504
3505 /* Check whether EEPROM is present or not */
3506 switch (sc->sc_type) {
3507 case WM_T_82575:
3508 case WM_T_82576:
3509 #if 0 /* XXX */
3510 case WM_T_82580:
3511 case WM_T_82580ER:
3512 #endif
3513 case WM_T_I350:
3514 case WM_T_I354:
3515 case WM_T_ICH8:
3516 case WM_T_ICH9:
3517 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3518 /* Not found */
3519 sc->sc_flags |= WM_F_EEPROM_INVALID;
3520 if ((sc->sc_type == WM_T_82575)
3521 || (sc->sc_type == WM_T_82576)
3522 || (sc->sc_type == WM_T_82580)
3523 || (sc->sc_type == WM_T_82580ER)
3524 || (sc->sc_type == WM_T_I350)
3525 || (sc->sc_type == WM_T_I354))
3526 wm_reset_init_script_82575(sc);
3527 }
3528 break;
3529 default:
3530 break;
3531 }
3532
3533 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3534 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3535 /* clear global device reset status bit */
3536 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3537 }
3538
3539 /* Clear any pending interrupt events. */
3540 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3541 reg = CSR_READ(sc, WMREG_ICR);
3542
3543 /* reload sc_ctrl */
3544 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3545
3546 if (sc->sc_type == WM_T_I350)
3547 wm_set_eee_i350(sc);
3548
3549 /* dummy read from WUC */
3550 if (sc->sc_type == WM_T_PCH)
3551 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3552 /*
3553 * For PCH, this write will make sure that any noise will be detected
3554 * as a CRC error and be dropped rather than show up as a bad packet
3555 * to the DMA engine
3556 */
3557 if (sc->sc_type == WM_T_PCH)
3558 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3559
3560 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3561 CSR_WRITE(sc, WMREG_WUC, 0);
3562
3563 /* XXX need special handling for 82580 */
3564 }
3565
3566 /*
3567 * wm_add_rxbuf:
3568 *
3569 * Add a receive buffer to the indiciated descriptor.
3570 */
3571 static int
3572 wm_add_rxbuf(struct wm_softc *sc, int idx)
3573 {
3574 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3575 struct mbuf *m;
3576 int error;
3577
3578 KASSERT(WM_RX_LOCKED(sc));
3579
3580 MGETHDR(m, M_DONTWAIT, MT_DATA);
3581 if (m == NULL)
3582 return ENOBUFS;
3583
3584 MCLGET(m, M_DONTWAIT);
3585 if ((m->m_flags & M_EXT) == 0) {
3586 m_freem(m);
3587 return ENOBUFS;
3588 }
3589
3590 if (rxs->rxs_mbuf != NULL)
3591 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3592
3593 rxs->rxs_mbuf = m;
3594
3595 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3596 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3597 BUS_DMA_READ|BUS_DMA_NOWAIT);
3598 if (error) {
3599 /* XXX XXX XXX */
3600 aprint_error_dev(sc->sc_dev,
3601 "unable to load rx DMA map %d, error = %d\n",
3602 idx, error);
3603 panic("wm_add_rxbuf");
3604 }
3605
3606 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3607 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3608
3609 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3610 if ((sc->sc_rctl & RCTL_EN) != 0)
3611 WM_INIT_RXDESC(sc, idx);
3612 } else
3613 WM_INIT_RXDESC(sc, idx);
3614
3615 return 0;
3616 }
3617
3618 /*
3619 * wm_rxdrain:
3620 *
3621 * Drain the receive queue.
3622 */
3623 static void
3624 wm_rxdrain(struct wm_softc *sc)
3625 {
3626 struct wm_rxsoft *rxs;
3627 int i;
3628
3629 KASSERT(WM_RX_LOCKED(sc));
3630
3631 for (i = 0; i < WM_NRXDESC; i++) {
3632 rxs = &sc->sc_rxsoft[i];
3633 if (rxs->rxs_mbuf != NULL) {
3634 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3635 m_freem(rxs->rxs_mbuf);
3636 rxs->rxs_mbuf = NULL;
3637 }
3638 }
3639 }
3640
3641 /*
3642 * wm_init: [ifnet interface function]
3643 *
3644 * Initialize the interface.
3645 */
3646 static int
3647 wm_init(struct ifnet *ifp)
3648 {
3649 struct wm_softc *sc = ifp->if_softc;
3650 int ret;
3651
3652 WM_BOTH_LOCK(sc);
3653 ret = wm_init_locked(ifp);
3654 WM_BOTH_UNLOCK(sc);
3655
3656 return ret;
3657 }
3658
3659 static int
3660 wm_init_locked(struct ifnet *ifp)
3661 {
3662 struct wm_softc *sc = ifp->if_softc;
3663 struct wm_rxsoft *rxs;
3664 int i, j, trynum, error = 0;
3665 uint32_t reg;
3666
3667 KASSERT(WM_BOTH_LOCKED(sc));
3668 /*
3669 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3670 * There is a small but measurable benefit to avoiding the adjusment
3671 * of the descriptor so that the headers are aligned, for normal mtu,
3672 * on such platforms. One possibility is that the DMA itself is
3673 * slightly more efficient if the front of the entire packet (instead
3674 * of the front of the headers) is aligned.
3675 *
3676 * Note we must always set align_tweak to 0 if we are using
3677 * jumbo frames.
3678 */
3679 #ifdef __NO_STRICT_ALIGNMENT
3680 sc->sc_align_tweak = 0;
3681 #else
3682 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3683 sc->sc_align_tweak = 0;
3684 else
3685 sc->sc_align_tweak = 2;
3686 #endif /* __NO_STRICT_ALIGNMENT */
3687
3688 /* Cancel any pending I/O. */
3689 wm_stop_locked(ifp, 0);
3690
3691 /* update statistics before reset */
3692 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3693 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3694
3695 /* Reset the chip to a known state. */
3696 wm_reset(sc);
3697
3698 switch (sc->sc_type) {
3699 case WM_T_82571:
3700 case WM_T_82572:
3701 case WM_T_82573:
3702 case WM_T_82574:
3703 case WM_T_82583:
3704 case WM_T_80003:
3705 case WM_T_ICH8:
3706 case WM_T_ICH9:
3707 case WM_T_ICH10:
3708 case WM_T_PCH:
3709 case WM_T_PCH2:
3710 case WM_T_PCH_LPT:
3711 if (wm_check_mng_mode(sc) != 0)
3712 wm_get_hw_control(sc);
3713 break;
3714 default:
3715 break;
3716 }
3717
3718 /* Reset the PHY. */
3719 if (sc->sc_flags & WM_F_HAS_MII)
3720 wm_gmii_reset(sc);
3721
3722 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3723 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3724 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3725 || (sc->sc_type == WM_T_PCH_LPT))
3726 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3727
3728 /* Initialize the transmit descriptor ring. */
3729 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3730 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3731 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3732 sc->sc_txfree = WM_NTXDESC(sc);
3733 sc->sc_txnext = 0;
3734
3735 if (sc->sc_type < WM_T_82543) {
3736 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3737 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3738 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3739 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3740 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3741 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3742 } else {
3743 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3744 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3745 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3746 CSR_WRITE(sc, WMREG_TDH, 0);
3747 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3748 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3749
3750 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3751 /*
3752 * Don't write TDT before TCTL.EN is set.
3753 * See the document.
3754 */
3755 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3756 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3757 | TXDCTL_WTHRESH(0));
3758 else {
3759 CSR_WRITE(sc, WMREG_TDT, 0);
3760 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3761 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3762 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3763 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3764 }
3765 }
3766 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3767 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3768
3769 /* Initialize the transmit job descriptors. */
3770 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3771 sc->sc_txsoft[i].txs_mbuf = NULL;
3772 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3773 sc->sc_txsnext = 0;
3774 sc->sc_txsdirty = 0;
3775
3776 /*
3777 * Initialize the receive descriptor and receive job
3778 * descriptor rings.
3779 */
3780 if (sc->sc_type < WM_T_82543) {
3781 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3782 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3783 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3784 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3785 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3786 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3787
3788 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3789 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3790 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3791 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3792 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3793 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3794 } else {
3795 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3796 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3797 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3798 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3799 CSR_WRITE(sc, WMREG_EITR(0), 450);
3800 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3801 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3802 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3803 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3804 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3805 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3806 | RXDCTL_WTHRESH(1));
3807 } else {
3808 CSR_WRITE(sc, WMREG_RDH, 0);
3809 CSR_WRITE(sc, WMREG_RDT, 0);
3810 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3811 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3812 }
3813 }
3814 for (i = 0; i < WM_NRXDESC; i++) {
3815 rxs = &sc->sc_rxsoft[i];
3816 if (rxs->rxs_mbuf == NULL) {
3817 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3818 log(LOG_ERR, "%s: unable to allocate or map "
3819 "rx buffer %d, error = %d\n",
3820 device_xname(sc->sc_dev), i, error);
3821 /*
3822 * XXX Should attempt to run with fewer receive
3823 * XXX buffers instead of just failing.
3824 */
3825 wm_rxdrain(sc);
3826 goto out;
3827 }
3828 } else {
3829 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3830 WM_INIT_RXDESC(sc, i);
3831 /*
3832 * For 82575 and newer device, the RX descriptors
3833 * must be initialized after the setting of RCTL.EN in
3834 * wm_set_filter()
3835 */
3836 }
3837 }
3838 sc->sc_rxptr = 0;
3839 sc->sc_rxdiscard = 0;
3840 WM_RXCHAIN_RESET(sc);
3841
3842 /*
3843 * Clear out the VLAN table -- we don't use it (yet).
3844 */
3845 CSR_WRITE(sc, WMREG_VET, 0);
3846 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3847 trynum = 10; /* Due to hw errata */
3848 else
3849 trynum = 1;
3850 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3851 for (j = 0; j < trynum; j++)
3852 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3853
3854 /*
3855 * Set up flow-control parameters.
3856 *
3857 * XXX Values could probably stand some tuning.
3858 */
3859 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3860 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3861 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3862 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3863 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3864 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3865 }
3866
3867 sc->sc_fcrtl = FCRTL_DFLT;
3868 if (sc->sc_type < WM_T_82543) {
3869 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3870 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3871 } else {
3872 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3873 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3874 }
3875
3876 if (sc->sc_type == WM_T_80003)
3877 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3878 else
3879 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3880
3881 /* Writes the control register. */
3882 wm_set_vlan(sc);
3883
3884 if (sc->sc_flags & WM_F_HAS_MII) {
3885 int val;
3886
3887 switch (sc->sc_type) {
3888 case WM_T_80003:
3889 case WM_T_ICH8:
3890 case WM_T_ICH9:
3891 case WM_T_ICH10:
3892 case WM_T_PCH:
3893 case WM_T_PCH2:
3894 case WM_T_PCH_LPT:
3895 /*
3896 * Set the mac to wait the maximum time between each
3897 * iteration and increase the max iterations when
3898 * polling the phy; this fixes erroneous timeouts at
3899 * 10Mbps.
3900 */
3901 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3902 0xFFFF);
3903 val = wm_kmrn_readreg(sc,
3904 KUMCTRLSTA_OFFSET_INB_PARAM);
3905 val |= 0x3F;
3906 wm_kmrn_writereg(sc,
3907 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3908 break;
3909 default:
3910 break;
3911 }
3912
3913 if (sc->sc_type == WM_T_80003) {
3914 val = CSR_READ(sc, WMREG_CTRL_EXT);
3915 val &= ~CTRL_EXT_LINK_MODE_MASK;
3916 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3917
3918 /* Bypass RX and TX FIFO's */
3919 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3920 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3921 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3922 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3923 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3924 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3925 }
3926 }
3927 #if 0
3928 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3929 #endif
3930
3931 /* Set up checksum offload parameters. */
3932 reg = CSR_READ(sc, WMREG_RXCSUM);
3933 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3934 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3935 reg |= RXCSUM_IPOFL;
3936 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3937 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3938 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3939 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3940 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3941
3942 /* Set up the interrupt registers. */
3943 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3944 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3945 ICR_RXO | ICR_RXT0;
3946 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3947
3948 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3949 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3950 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3951 reg = CSR_READ(sc, WMREG_KABGTXD);
3952 reg |= KABGTXD_BGSQLBIAS;
3953 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3954 }
3955
3956 /* Set up the inter-packet gap. */
3957 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3958
3959 if (sc->sc_type >= WM_T_82543) {
3960 /*
3961 * Set up the interrupt throttling register (units of 256ns)
3962 * Note that a footnote in Intel's documentation says this
3963 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3964 * or 10Mbit mode. Empirically, it appears to be the case
3965 * that that is also true for the 1024ns units of the other
3966 * interrupt-related timer registers -- so, really, we ought
3967 * to divide this value by 4 when the link speed is low.
3968 *
3969 * XXX implement this division at link speed change!
3970 */
3971
3972 /*
3973 * For N interrupts/sec, set this value to:
3974 * 1000000000 / (N * 256). Note that we set the
3975 * absolute and packet timer values to this value
3976 * divided by 4 to get "simple timer" behavior.
3977 */
3978
3979 sc->sc_itr = 1500; /* 2604 ints/sec */
3980 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3981 }
3982
3983 /* Set the VLAN ethernetype. */
3984 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3985
3986 /*
3987 * Set up the transmit control register; we start out with
3988 * a collision distance suitable for FDX, but update it whe
3989 * we resolve the media type.
3990 */
3991 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3992 | TCTL_CT(TX_COLLISION_THRESHOLD)
3993 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3994 if (sc->sc_type >= WM_T_82571)
3995 sc->sc_tctl |= TCTL_MULR;
3996 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3997
3998 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3999 /* Write TDT after TCTL.EN is set. See the document. */
4000 CSR_WRITE(sc, WMREG_TDT, 0);
4001 }
4002
4003 if (sc->sc_type == WM_T_80003) {
4004 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4005 reg &= ~TCTL_EXT_GCEX_MASK;
4006 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4007 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4008 }
4009
4010 /* Set the media. */
4011 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4012 goto out;
4013
4014 /* Configure for OS presence */
4015 wm_init_manageability(sc);
4016
4017 /*
4018 * Set up the receive control register; we actually program
4019 * the register when we set the receive filter. Use multicast
4020 * address offset type 0.
4021 *
4022 * Only the i82544 has the ability to strip the incoming
4023 * CRC, so we don't enable that feature.
4024 */
4025 sc->sc_mchash_type = 0;
4026 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4027 | RCTL_MO(sc->sc_mchash_type);
4028
4029 /*
4030 * The I350 has a bug where it always strips the CRC whether
4031 * asked to or not. So ask for stripped CRC here and cope in rxeof
4032 */
4033 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4034 || (sc->sc_type == WM_T_I210))
4035 sc->sc_rctl |= RCTL_SECRC;
4036
4037 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4038 && (ifp->if_mtu > ETHERMTU)) {
4039 sc->sc_rctl |= RCTL_LPE;
4040 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4041 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4042 }
4043
4044 if (MCLBYTES == 2048) {
4045 sc->sc_rctl |= RCTL_2k;
4046 } else {
4047 if (sc->sc_type >= WM_T_82543) {
4048 switch (MCLBYTES) {
4049 case 4096:
4050 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4051 break;
4052 case 8192:
4053 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4054 break;
4055 case 16384:
4056 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4057 break;
4058 default:
4059 panic("wm_init: MCLBYTES %d unsupported",
4060 MCLBYTES);
4061 break;
4062 }
4063 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4064 }
4065
4066 /* Set the receive filter. */
4067 wm_set_filter(sc);
4068
4069 /* Enable ECC */
4070 switch (sc->sc_type) {
4071 case WM_T_82571:
4072 reg = CSR_READ(sc, WMREG_PBA_ECC);
4073 reg |= PBA_ECC_CORR_EN;
4074 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4075 break;
4076 case WM_T_PCH_LPT:
4077 reg = CSR_READ(sc, WMREG_PBECCSTS);
4078 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4079 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4080
4081 reg = CSR_READ(sc, WMREG_CTRL);
4082 reg |= CTRL_MEHE;
4083 CSR_WRITE(sc, WMREG_CTRL, reg);
4084 break;
4085 default:
4086 break;
4087 }
4088
4089 /* On 575 and later set RDT only if RX enabled */
4090 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4091 for (i = 0; i < WM_NRXDESC; i++)
4092 WM_INIT_RXDESC(sc, i);
4093
4094 sc->sc_stopping = false;
4095
4096 /* Start the one second link check clock. */
4097 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4098
4099 /* ...all done! */
4100 ifp->if_flags |= IFF_RUNNING;
4101 ifp->if_flags &= ~IFF_OACTIVE;
4102
4103 out:
4104 sc->sc_if_flags = ifp->if_flags;
4105 if (error)
4106 log(LOG_ERR, "%s: interface not running\n",
4107 device_xname(sc->sc_dev));
4108 return error;
4109 }
4110
4111 /*
4112 * wm_stop: [ifnet interface function]
4113 *
4114 * Stop transmission on the interface.
4115 */
4116 static void
4117 wm_stop(struct ifnet *ifp, int disable)
4118 {
4119 struct wm_softc *sc = ifp->if_softc;
4120
4121 WM_BOTH_LOCK(sc);
4122 wm_stop_locked(ifp, disable);
4123 WM_BOTH_UNLOCK(sc);
4124 }
4125
4126 static void
4127 wm_stop_locked(struct ifnet *ifp, int disable)
4128 {
4129 struct wm_softc *sc = ifp->if_softc;
4130 struct wm_txsoft *txs;
4131 int i;
4132
4133 KASSERT(WM_BOTH_LOCKED(sc));
4134
4135 sc->sc_stopping = true;
4136
4137 /* Stop the one second clock. */
4138 callout_stop(&sc->sc_tick_ch);
4139
4140 /* Stop the 82547 Tx FIFO stall check timer. */
4141 if (sc->sc_type == WM_T_82547)
4142 callout_stop(&sc->sc_txfifo_ch);
4143
4144 if (sc->sc_flags & WM_F_HAS_MII) {
4145 /* Down the MII. */
4146 mii_down(&sc->sc_mii);
4147 } else {
4148 #if 0
4149 /* Should we clear PHY's status properly? */
4150 wm_reset(sc);
4151 #endif
4152 }
4153
4154 /* Stop the transmit and receive processes. */
4155 CSR_WRITE(sc, WMREG_TCTL, 0);
4156 CSR_WRITE(sc, WMREG_RCTL, 0);
4157 sc->sc_rctl &= ~RCTL_EN;
4158
4159 /*
4160 * Clear the interrupt mask to ensure the device cannot assert its
4161 * interrupt line.
4162 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4163 * any currently pending or shared interrupt.
4164 */
4165 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4166 sc->sc_icr = 0;
4167
4168 /* Release any queued transmit buffers. */
4169 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4170 txs = &sc->sc_txsoft[i];
4171 if (txs->txs_mbuf != NULL) {
4172 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4173 m_freem(txs->txs_mbuf);
4174 txs->txs_mbuf = NULL;
4175 }
4176 }
4177
4178 /* Mark the interface as down and cancel the watchdog timer. */
4179 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4180 ifp->if_timer = 0;
4181
4182 if (disable)
4183 wm_rxdrain(sc);
4184
4185 #if 0 /* notyet */
4186 if (sc->sc_type >= WM_T_82544)
4187 CSR_WRITE(sc, WMREG_WUC, 0);
4188 #endif
4189 }
4190
4191 /*
4192 * wm_tx_offload:
4193 *
4194 * Set up TCP/IP checksumming parameters for the
4195 * specified packet.
4196 */
4197 static int
4198 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4199 uint8_t *fieldsp)
4200 {
4201 struct mbuf *m0 = txs->txs_mbuf;
4202 struct livengood_tcpip_ctxdesc *t;
4203 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4204 uint32_t ipcse;
4205 struct ether_header *eh;
4206 int offset, iphl;
4207 uint8_t fields;
4208
4209 /*
4210 * XXX It would be nice if the mbuf pkthdr had offset
4211 * fields for the protocol headers.
4212 */
4213
4214 eh = mtod(m0, struct ether_header *);
4215 switch (htons(eh->ether_type)) {
4216 case ETHERTYPE_IP:
4217 case ETHERTYPE_IPV6:
4218 offset = ETHER_HDR_LEN;
4219 break;
4220
4221 case ETHERTYPE_VLAN:
4222 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4223 break;
4224
4225 default:
4226 /*
4227 * Don't support this protocol or encapsulation.
4228 */
4229 *fieldsp = 0;
4230 *cmdp = 0;
4231 return 0;
4232 }
4233
4234 if ((m0->m_pkthdr.csum_flags &
4235 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4236 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4237 } else {
4238 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4239 }
4240 ipcse = offset + iphl - 1;
4241
4242 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4243 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4244 seg = 0;
4245 fields = 0;
4246
4247 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4248 int hlen = offset + iphl;
4249 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4250
4251 if (__predict_false(m0->m_len <
4252 (hlen + sizeof(struct tcphdr)))) {
4253 /*
4254 * TCP/IP headers are not in the first mbuf; we need
4255 * to do this the slow and painful way. Let's just
4256 * hope this doesn't happen very often.
4257 */
4258 struct tcphdr th;
4259
4260 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4261
4262 m_copydata(m0, hlen, sizeof(th), &th);
4263 if (v4) {
4264 struct ip ip;
4265
4266 m_copydata(m0, offset, sizeof(ip), &ip);
4267 ip.ip_len = 0;
4268 m_copyback(m0,
4269 offset + offsetof(struct ip, ip_len),
4270 sizeof(ip.ip_len), &ip.ip_len);
4271 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4272 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4273 } else {
4274 struct ip6_hdr ip6;
4275
4276 m_copydata(m0, offset, sizeof(ip6), &ip6);
4277 ip6.ip6_plen = 0;
4278 m_copyback(m0,
4279 offset + offsetof(struct ip6_hdr, ip6_plen),
4280 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4281 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4282 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4283 }
4284 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4285 sizeof(th.th_sum), &th.th_sum);
4286
4287 hlen += th.th_off << 2;
4288 } else {
4289 /*
4290 * TCP/IP headers are in the first mbuf; we can do
4291 * this the easy way.
4292 */
4293 struct tcphdr *th;
4294
4295 if (v4) {
4296 struct ip *ip =
4297 (void *)(mtod(m0, char *) + offset);
4298 th = (void *)(mtod(m0, char *) + hlen);
4299
4300 ip->ip_len = 0;
4301 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4302 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4303 } else {
4304 struct ip6_hdr *ip6 =
4305 (void *)(mtod(m0, char *) + offset);
4306 th = (void *)(mtod(m0, char *) + hlen);
4307
4308 ip6->ip6_plen = 0;
4309 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4310 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4311 }
4312 hlen += th->th_off << 2;
4313 }
4314
4315 if (v4) {
4316 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4317 cmdlen |= WTX_TCPIP_CMD_IP;
4318 } else {
4319 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4320 ipcse = 0;
4321 }
4322 cmd |= WTX_TCPIP_CMD_TSE;
4323 cmdlen |= WTX_TCPIP_CMD_TSE |
4324 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4325 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4326 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4327 }
4328
4329 /*
4330 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4331 * offload feature, if we load the context descriptor, we
4332 * MUST provide valid values for IPCSS and TUCSS fields.
4333 */
4334
4335 ipcs = WTX_TCPIP_IPCSS(offset) |
4336 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4337 WTX_TCPIP_IPCSE(ipcse);
4338 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4339 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4340 fields |= WTX_IXSM;
4341 }
4342
4343 offset += iphl;
4344
4345 if (m0->m_pkthdr.csum_flags &
4346 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4347 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4348 fields |= WTX_TXSM;
4349 tucs = WTX_TCPIP_TUCSS(offset) |
4350 WTX_TCPIP_TUCSO(offset +
4351 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4352 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4353 } else if ((m0->m_pkthdr.csum_flags &
4354 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4355 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4356 fields |= WTX_TXSM;
4357 tucs = WTX_TCPIP_TUCSS(offset) |
4358 WTX_TCPIP_TUCSO(offset +
4359 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4360 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4361 } else {
4362 /* Just initialize it to a valid TCP context. */
4363 tucs = WTX_TCPIP_TUCSS(offset) |
4364 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4365 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4366 }
4367
4368 /* Fill in the context descriptor. */
4369 t = (struct livengood_tcpip_ctxdesc *)
4370 &sc->sc_txdescs[sc->sc_txnext];
4371 t->tcpip_ipcs = htole32(ipcs);
4372 t->tcpip_tucs = htole32(tucs);
4373 t->tcpip_cmdlen = htole32(cmdlen);
4374 t->tcpip_seg = htole32(seg);
4375 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4376
4377 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4378 txs->txs_ndesc++;
4379
4380 *cmdp = cmd;
4381 *fieldsp = fields;
4382
4383 return 0;
4384 }
4385
4386 static void
4387 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4388 {
4389 struct mbuf *m;
4390 int i;
4391
4392 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4393 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4394 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4395 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4396 m->m_data, m->m_len, m->m_flags);
4397 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4398 i, i == 1 ? "" : "s");
4399 }
4400
4401 /*
4402 * wm_82547_txfifo_stall:
4403 *
4404 * Callout used to wait for the 82547 Tx FIFO to drain,
4405 * reset the FIFO pointers, and restart packet transmission.
4406 */
4407 static void
4408 wm_82547_txfifo_stall(void *arg)
4409 {
4410 struct wm_softc *sc = arg;
4411 #ifndef WM_MPSAFE
4412 int s;
4413
4414 s = splnet();
4415 #endif
4416 WM_TX_LOCK(sc);
4417
4418 if (sc->sc_stopping)
4419 goto out;
4420
4421 if (sc->sc_txfifo_stall) {
4422 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4423 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4424 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4425 /*
4426 * Packets have drained. Stop transmitter, reset
4427 * FIFO pointers, restart transmitter, and kick
4428 * the packet queue.
4429 */
4430 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4431 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4432 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4433 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4434 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4435 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4436 CSR_WRITE(sc, WMREG_TCTL, tctl);
4437 CSR_WRITE_FLUSH(sc);
4438
4439 sc->sc_txfifo_head = 0;
4440 sc->sc_txfifo_stall = 0;
4441 wm_start_locked(&sc->sc_ethercom.ec_if);
4442 } else {
4443 /*
4444 * Still waiting for packets to drain; try again in
4445 * another tick.
4446 */
4447 callout_schedule(&sc->sc_txfifo_ch, 1);
4448 }
4449 }
4450
4451 out:
4452 WM_TX_UNLOCK(sc);
4453 #ifndef WM_MPSAFE
4454 splx(s);
4455 #endif
4456 }
4457
4458 /*
4459 * wm_82547_txfifo_bugchk:
4460 *
4461 * Check for bug condition in the 82547 Tx FIFO. We need to
4462 * prevent enqueueing a packet that would wrap around the end
4463 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4464 *
4465 * We do this by checking the amount of space before the end
4466 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4467 * the Tx FIFO, wait for all remaining packets to drain, reset
4468 * the internal FIFO pointers to the beginning, and restart
4469 * transmission on the interface.
4470 */
4471 #define WM_FIFO_HDR 0x10
4472 #define WM_82547_PAD_LEN 0x3e0
4473 static int
4474 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4475 {
4476 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4477 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4478
4479 /* Just return if already stalled. */
4480 if (sc->sc_txfifo_stall)
4481 return 1;
4482
4483 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4484 /* Stall only occurs in half-duplex mode. */
4485 goto send_packet;
4486 }
4487
4488 if (len >= WM_82547_PAD_LEN + space) {
4489 sc->sc_txfifo_stall = 1;
4490 callout_schedule(&sc->sc_txfifo_ch, 1);
4491 return 1;
4492 }
4493
4494 send_packet:
4495 sc->sc_txfifo_head += len;
4496 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4497 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4498
4499 return 0;
4500 }
4501
4502 /*
4503 * wm_start: [ifnet interface function]
4504 *
4505 * Start packet transmission on the interface.
4506 */
4507 static void
4508 wm_start(struct ifnet *ifp)
4509 {
4510 struct wm_softc *sc = ifp->if_softc;
4511
4512 WM_TX_LOCK(sc);
4513 if (!sc->sc_stopping)
4514 wm_start_locked(ifp);
4515 WM_TX_UNLOCK(sc);
4516 }
4517
4518 static void
4519 wm_start_locked(struct ifnet *ifp)
4520 {
4521 struct wm_softc *sc = ifp->if_softc;
4522 struct mbuf *m0;
4523 struct m_tag *mtag;
4524 struct wm_txsoft *txs;
4525 bus_dmamap_t dmamap;
4526 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4527 bus_addr_t curaddr;
4528 bus_size_t seglen, curlen;
4529 uint32_t cksumcmd;
4530 uint8_t cksumfields;
4531
4532 KASSERT(WM_TX_LOCKED(sc));
4533
4534 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4535 return;
4536
4537 /* Remember the previous number of free descriptors. */
4538 ofree = sc->sc_txfree;
4539
4540 /*
4541 * Loop through the send queue, setting up transmit descriptors
4542 * until we drain the queue, or use up all available transmit
4543 * descriptors.
4544 */
4545 for (;;) {
4546 m0 = NULL;
4547
4548 /* Get a work queue entry. */
4549 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4550 wm_txintr(sc);
4551 if (sc->sc_txsfree == 0) {
4552 DPRINTF(WM_DEBUG_TX,
4553 ("%s: TX: no free job descriptors\n",
4554 device_xname(sc->sc_dev)));
4555 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4556 break;
4557 }
4558 }
4559
4560 /* Grab a packet off the queue. */
4561 IFQ_DEQUEUE(&ifp->if_snd, m0);
4562 if (m0 == NULL)
4563 break;
4564
4565 DPRINTF(WM_DEBUG_TX,
4566 ("%s: TX: have packet to transmit: %p\n",
4567 device_xname(sc->sc_dev), m0));
4568
4569 txs = &sc->sc_txsoft[sc->sc_txsnext];
4570 dmamap = txs->txs_dmamap;
4571
4572 use_tso = (m0->m_pkthdr.csum_flags &
4573 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4574
4575 /*
4576 * So says the Linux driver:
4577 * The controller does a simple calculation to make sure
4578 * there is enough room in the FIFO before initiating the
4579 * DMA for each buffer. The calc is:
4580 * 4 = ceil(buffer len / MSS)
4581 * To make sure we don't overrun the FIFO, adjust the max
4582 * buffer len if the MSS drops.
4583 */
4584 dmamap->dm_maxsegsz =
4585 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4586 ? m0->m_pkthdr.segsz << 2
4587 : WTX_MAX_LEN;
4588
4589 /*
4590 * Load the DMA map. If this fails, the packet either
4591 * didn't fit in the allotted number of segments, or we
4592 * were short on resources. For the too-many-segments
4593 * case, we simply report an error and drop the packet,
4594 * since we can't sanely copy a jumbo packet to a single
4595 * buffer.
4596 */
4597 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4598 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4599 if (error) {
4600 if (error == EFBIG) {
4601 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4602 log(LOG_ERR, "%s: Tx packet consumes too many "
4603 "DMA segments, dropping...\n",
4604 device_xname(sc->sc_dev));
4605 wm_dump_mbuf_chain(sc, m0);
4606 m_freem(m0);
4607 continue;
4608 }
4609 /* Short on resources, just stop for now. */
4610 DPRINTF(WM_DEBUG_TX,
4611 ("%s: TX: dmamap load failed: %d\n",
4612 device_xname(sc->sc_dev), error));
4613 break;
4614 }
4615
4616 segs_needed = dmamap->dm_nsegs;
4617 if (use_tso) {
4618 /* For sentinel descriptor; see below. */
4619 segs_needed++;
4620 }
4621
4622 /*
4623 * Ensure we have enough descriptors free to describe
4624 * the packet. Note, we always reserve one descriptor
4625 * at the end of the ring due to the semantics of the
4626 * TDT register, plus one more in the event we need
4627 * to load offload context.
4628 */
4629 if (segs_needed > sc->sc_txfree - 2) {
4630 /*
4631 * Not enough free descriptors to transmit this
4632 * packet. We haven't committed anything yet,
4633 * so just unload the DMA map, put the packet
4634 * pack on the queue, and punt. Notify the upper
4635 * layer that there are no more slots left.
4636 */
4637 DPRINTF(WM_DEBUG_TX,
4638 ("%s: TX: need %d (%d) descriptors, have %d\n",
4639 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4640 segs_needed, sc->sc_txfree - 1));
4641 ifp->if_flags |= IFF_OACTIVE;
4642 bus_dmamap_unload(sc->sc_dmat, dmamap);
4643 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4644 break;
4645 }
4646
4647 /*
4648 * Check for 82547 Tx FIFO bug. We need to do this
4649 * once we know we can transmit the packet, since we
4650 * do some internal FIFO space accounting here.
4651 */
4652 if (sc->sc_type == WM_T_82547 &&
4653 wm_82547_txfifo_bugchk(sc, m0)) {
4654 DPRINTF(WM_DEBUG_TX,
4655 ("%s: TX: 82547 Tx FIFO bug detected\n",
4656 device_xname(sc->sc_dev)));
4657 ifp->if_flags |= IFF_OACTIVE;
4658 bus_dmamap_unload(sc->sc_dmat, dmamap);
4659 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4660 break;
4661 }
4662
4663 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4664
4665 DPRINTF(WM_DEBUG_TX,
4666 ("%s: TX: packet has %d (%d) DMA segments\n",
4667 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4668
4669 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4670
4671 /*
4672 * Store a pointer to the packet so that we can free it
4673 * later.
4674 *
4675 * Initially, we consider the number of descriptors the
4676 * packet uses the number of DMA segments. This may be
4677 * incremented by 1 if we do checksum offload (a descriptor
4678 * is used to set the checksum context).
4679 */
4680 txs->txs_mbuf = m0;
4681 txs->txs_firstdesc = sc->sc_txnext;
4682 txs->txs_ndesc = segs_needed;
4683
4684 /* Set up offload parameters for this packet. */
4685 if (m0->m_pkthdr.csum_flags &
4686 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4687 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4688 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4689 if (wm_tx_offload(sc, txs, &cksumcmd,
4690 &cksumfields) != 0) {
4691 /* Error message already displayed. */
4692 bus_dmamap_unload(sc->sc_dmat, dmamap);
4693 continue;
4694 }
4695 } else {
4696 cksumcmd = 0;
4697 cksumfields = 0;
4698 }
4699
4700 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4701
4702 /* Sync the DMA map. */
4703 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4704 BUS_DMASYNC_PREWRITE);
4705
4706 /* Initialize the transmit descriptor. */
4707 for (nexttx = sc->sc_txnext, seg = 0;
4708 seg < dmamap->dm_nsegs; seg++) {
4709 for (seglen = dmamap->dm_segs[seg].ds_len,
4710 curaddr = dmamap->dm_segs[seg].ds_addr;
4711 seglen != 0;
4712 curaddr += curlen, seglen -= curlen,
4713 nexttx = WM_NEXTTX(sc, nexttx)) {
4714 curlen = seglen;
4715
4716 /*
4717 * So says the Linux driver:
4718 * Work around for premature descriptor
4719 * write-backs in TSO mode. Append a
4720 * 4-byte sentinel descriptor.
4721 */
4722 if (use_tso &&
4723 seg == dmamap->dm_nsegs - 1 &&
4724 curlen > 8)
4725 curlen -= 4;
4726
4727 wm_set_dma_addr(
4728 &sc->sc_txdescs[nexttx].wtx_addr,
4729 curaddr);
4730 sc->sc_txdescs[nexttx].wtx_cmdlen =
4731 htole32(cksumcmd | curlen);
4732 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4733 0;
4734 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4735 cksumfields;
4736 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4737 lasttx = nexttx;
4738
4739 DPRINTF(WM_DEBUG_TX,
4740 ("%s: TX: desc %d: low %#" PRIx64 ", "
4741 "len %#04zx\n",
4742 device_xname(sc->sc_dev), nexttx,
4743 (uint64_t)curaddr, curlen));
4744 }
4745 }
4746
4747 KASSERT(lasttx != -1);
4748
4749 /*
4750 * Set up the command byte on the last descriptor of
4751 * the packet. If we're in the interrupt delay window,
4752 * delay the interrupt.
4753 */
4754 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4755 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4756
4757 /*
4758 * If VLANs are enabled and the packet has a VLAN tag, set
4759 * up the descriptor to encapsulate the packet for us.
4760 *
4761 * This is only valid on the last descriptor of the packet.
4762 */
4763 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4764 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4765 htole32(WTX_CMD_VLE);
4766 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4767 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4768 }
4769
4770 txs->txs_lastdesc = lasttx;
4771
4772 DPRINTF(WM_DEBUG_TX,
4773 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4774 device_xname(sc->sc_dev),
4775 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4776
4777 /* Sync the descriptors we're using. */
4778 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4779 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4780
4781 /* Give the packet to the chip. */
4782 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4783
4784 DPRINTF(WM_DEBUG_TX,
4785 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4786
4787 DPRINTF(WM_DEBUG_TX,
4788 ("%s: TX: finished transmitting packet, job %d\n",
4789 device_xname(sc->sc_dev), sc->sc_txsnext));
4790
4791 /* Advance the tx pointer. */
4792 sc->sc_txfree -= txs->txs_ndesc;
4793 sc->sc_txnext = nexttx;
4794
4795 sc->sc_txsfree--;
4796 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4797
4798 /* Pass the packet to any BPF listeners. */
4799 bpf_mtap(ifp, m0);
4800 }
4801
4802 if (m0 != NULL) {
4803 ifp->if_flags |= IFF_OACTIVE;
4804 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4805 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4806 m_freem(m0);
4807 }
4808
4809 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4810 /* No more slots; notify upper layer. */
4811 ifp->if_flags |= IFF_OACTIVE;
4812 }
4813
4814 if (sc->sc_txfree != ofree) {
4815 /* Set a watchdog timer in case the chip flakes out. */
4816 ifp->if_timer = 5;
4817 }
4818 }
4819
4820 /*
4821 * wm_nq_tx_offload:
4822 *
4823 * Set up TCP/IP checksumming parameters for the
4824 * specified packet, for NEWQUEUE devices
4825 */
4826 static int
4827 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4828 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4829 {
4830 struct mbuf *m0 = txs->txs_mbuf;
4831 struct m_tag *mtag;
4832 uint32_t vl_len, mssidx, cmdc;
4833 struct ether_header *eh;
4834 int offset, iphl;
4835
4836 /*
4837 * XXX It would be nice if the mbuf pkthdr had offset
4838 * fields for the protocol headers.
4839 */
4840 *cmdlenp = 0;
4841 *fieldsp = 0;
4842
4843 eh = mtod(m0, struct ether_header *);
4844 switch (htons(eh->ether_type)) {
4845 case ETHERTYPE_IP:
4846 case ETHERTYPE_IPV6:
4847 offset = ETHER_HDR_LEN;
4848 break;
4849
4850 case ETHERTYPE_VLAN:
4851 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4852 break;
4853
4854 default:
4855 /* Don't support this protocol or encapsulation. */
4856 *do_csum = false;
4857 return 0;
4858 }
4859 *do_csum = true;
4860 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4861 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4862
4863 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4864 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4865
4866 if ((m0->m_pkthdr.csum_flags &
4867 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4868 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4869 } else {
4870 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4871 }
4872 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4873 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4874
4875 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4876 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4877 << NQTXC_VLLEN_VLAN_SHIFT);
4878 *cmdlenp |= NQTX_CMD_VLE;
4879 }
4880
4881 mssidx = 0;
4882
4883 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4884 int hlen = offset + iphl;
4885 int tcp_hlen;
4886 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4887
4888 if (__predict_false(m0->m_len <
4889 (hlen + sizeof(struct tcphdr)))) {
4890 /*
4891 * TCP/IP headers are not in the first mbuf; we need
4892 * to do this the slow and painful way. Let's just
4893 * hope this doesn't happen very often.
4894 */
4895 struct tcphdr th;
4896
4897 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4898
4899 m_copydata(m0, hlen, sizeof(th), &th);
4900 if (v4) {
4901 struct ip ip;
4902
4903 m_copydata(m0, offset, sizeof(ip), &ip);
4904 ip.ip_len = 0;
4905 m_copyback(m0,
4906 offset + offsetof(struct ip, ip_len),
4907 sizeof(ip.ip_len), &ip.ip_len);
4908 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4909 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4910 } else {
4911 struct ip6_hdr ip6;
4912
4913 m_copydata(m0, offset, sizeof(ip6), &ip6);
4914 ip6.ip6_plen = 0;
4915 m_copyback(m0,
4916 offset + offsetof(struct ip6_hdr, ip6_plen),
4917 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4918 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4919 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4920 }
4921 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4922 sizeof(th.th_sum), &th.th_sum);
4923
4924 tcp_hlen = th.th_off << 2;
4925 } else {
4926 /*
4927 * TCP/IP headers are in the first mbuf; we can do
4928 * this the easy way.
4929 */
4930 struct tcphdr *th;
4931
4932 if (v4) {
4933 struct ip *ip =
4934 (void *)(mtod(m0, char *) + offset);
4935 th = (void *)(mtod(m0, char *) + hlen);
4936
4937 ip->ip_len = 0;
4938 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4939 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4940 } else {
4941 struct ip6_hdr *ip6 =
4942 (void *)(mtod(m0, char *) + offset);
4943 th = (void *)(mtod(m0, char *) + hlen);
4944
4945 ip6->ip6_plen = 0;
4946 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4947 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4948 }
4949 tcp_hlen = th->th_off << 2;
4950 }
4951 hlen += tcp_hlen;
4952 *cmdlenp |= NQTX_CMD_TSE;
4953
4954 if (v4) {
4955 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4956 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4957 } else {
4958 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4959 *fieldsp |= NQTXD_FIELDS_TUXSM;
4960 }
4961 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4962 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4963 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4964 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4965 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4966 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4967 } else {
4968 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4969 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4970 }
4971
4972 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
4973 *fieldsp |= NQTXD_FIELDS_IXSM;
4974 cmdc |= NQTXC_CMD_IP4;
4975 }
4976
4977 if (m0->m_pkthdr.csum_flags &
4978 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4979 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4980 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4981 cmdc |= NQTXC_CMD_TCP;
4982 } else {
4983 cmdc |= NQTXC_CMD_UDP;
4984 }
4985 cmdc |= NQTXC_CMD_IP4;
4986 *fieldsp |= NQTXD_FIELDS_TUXSM;
4987 }
4988 if (m0->m_pkthdr.csum_flags &
4989 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4990 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4991 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4992 cmdc |= NQTXC_CMD_TCP;
4993 } else {
4994 cmdc |= NQTXC_CMD_UDP;
4995 }
4996 cmdc |= NQTXC_CMD_IP6;
4997 *fieldsp |= NQTXD_FIELDS_TUXSM;
4998 }
4999
5000 /* Fill in the context descriptor. */
5001 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5002 htole32(vl_len);
5003 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5004 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5005 htole32(cmdc);
5006 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5007 htole32(mssidx);
5008 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5009 DPRINTF(WM_DEBUG_TX,
5010 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5011 sc->sc_txnext, 0, vl_len));
5012 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5013 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5014 txs->txs_ndesc++;
5015 return 0;
5016 }
5017
5018 /*
5019 * wm_nq_start: [ifnet interface function]
5020 *
5021 * Start packet transmission on the interface for NEWQUEUE devices
5022 */
5023 static void
5024 wm_nq_start(struct ifnet *ifp)
5025 {
5026 struct wm_softc *sc = ifp->if_softc;
5027
5028 WM_TX_LOCK(sc);
5029 if (!sc->sc_stopping)
5030 wm_nq_start_locked(ifp);
5031 WM_TX_UNLOCK(sc);
5032 }
5033
5034 static void
5035 wm_nq_start_locked(struct ifnet *ifp)
5036 {
5037 struct wm_softc *sc = ifp->if_softc;
5038 struct mbuf *m0;
5039 struct m_tag *mtag;
5040 struct wm_txsoft *txs;
5041 bus_dmamap_t dmamap;
5042 int error, nexttx, lasttx = -1, seg, segs_needed;
5043 bool do_csum, sent;
5044
5045 KASSERT(WM_TX_LOCKED(sc));
5046
5047 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5048 return;
5049
5050 sent = false;
5051
5052 /*
5053 * Loop through the send queue, setting up transmit descriptors
5054 * until we drain the queue, or use up all available transmit
5055 * descriptors.
5056 */
5057 for (;;) {
5058 m0 = NULL;
5059
5060 /* Get a work queue entry. */
5061 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5062 wm_txintr(sc);
5063 if (sc->sc_txsfree == 0) {
5064 DPRINTF(WM_DEBUG_TX,
5065 ("%s: TX: no free job descriptors\n",
5066 device_xname(sc->sc_dev)));
5067 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5068 break;
5069 }
5070 }
5071
5072 /* Grab a packet off the queue. */
5073 IFQ_DEQUEUE(&ifp->if_snd, m0);
5074 if (m0 == NULL)
5075 break;
5076
5077 DPRINTF(WM_DEBUG_TX,
5078 ("%s: TX: have packet to transmit: %p\n",
5079 device_xname(sc->sc_dev), m0));
5080
5081 txs = &sc->sc_txsoft[sc->sc_txsnext];
5082 dmamap = txs->txs_dmamap;
5083
5084 /*
5085 * Load the DMA map. If this fails, the packet either
5086 * didn't fit in the allotted number of segments, or we
5087 * were short on resources. For the too-many-segments
5088 * case, we simply report an error and drop the packet,
5089 * since we can't sanely copy a jumbo packet to a single
5090 * buffer.
5091 */
5092 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5093 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5094 if (error) {
5095 if (error == EFBIG) {
5096 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5097 log(LOG_ERR, "%s: Tx packet consumes too many "
5098 "DMA segments, dropping...\n",
5099 device_xname(sc->sc_dev));
5100 wm_dump_mbuf_chain(sc, m0);
5101 m_freem(m0);
5102 continue;
5103 }
5104 /* Short on resources, just stop for now. */
5105 DPRINTF(WM_DEBUG_TX,
5106 ("%s: TX: dmamap load failed: %d\n",
5107 device_xname(sc->sc_dev), error));
5108 break;
5109 }
5110
5111 segs_needed = dmamap->dm_nsegs;
5112
5113 /*
5114 * Ensure we have enough descriptors free to describe
5115 * the packet. Note, we always reserve one descriptor
5116 * at the end of the ring due to the semantics of the
5117 * TDT register, plus one more in the event we need
5118 * to load offload context.
5119 */
5120 if (segs_needed > sc->sc_txfree - 2) {
5121 /*
5122 * Not enough free descriptors to transmit this
5123 * packet. We haven't committed anything yet,
5124 * so just unload the DMA map, put the packet
5125 * pack on the queue, and punt. Notify the upper
5126 * layer that there are no more slots left.
5127 */
5128 DPRINTF(WM_DEBUG_TX,
5129 ("%s: TX: need %d (%d) descriptors, have %d\n",
5130 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5131 segs_needed, sc->sc_txfree - 1));
5132 ifp->if_flags |= IFF_OACTIVE;
5133 bus_dmamap_unload(sc->sc_dmat, dmamap);
5134 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5135 break;
5136 }
5137
5138 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5139
5140 DPRINTF(WM_DEBUG_TX,
5141 ("%s: TX: packet has %d (%d) DMA segments\n",
5142 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5143
5144 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5145
5146 /*
5147 * Store a pointer to the packet so that we can free it
5148 * later.
5149 *
5150 * Initially, we consider the number of descriptors the
5151 * packet uses the number of DMA segments. This may be
5152 * incremented by 1 if we do checksum offload (a descriptor
5153 * is used to set the checksum context).
5154 */
5155 txs->txs_mbuf = m0;
5156 txs->txs_firstdesc = sc->sc_txnext;
5157 txs->txs_ndesc = segs_needed;
5158
5159 /* Set up offload parameters for this packet. */
5160 uint32_t cmdlen, fields, dcmdlen;
5161 if (m0->m_pkthdr.csum_flags &
5162 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5163 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5164 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5165 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5166 &do_csum) != 0) {
5167 /* Error message already displayed. */
5168 bus_dmamap_unload(sc->sc_dmat, dmamap);
5169 continue;
5170 }
5171 } else {
5172 do_csum = false;
5173 cmdlen = 0;
5174 fields = 0;
5175 }
5176
5177 /* Sync the DMA map. */
5178 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5179 BUS_DMASYNC_PREWRITE);
5180
5181 /* Initialize the first transmit descriptor. */
5182 nexttx = sc->sc_txnext;
5183 if (!do_csum) {
5184 /* setup a legacy descriptor */
5185 wm_set_dma_addr(
5186 &sc->sc_txdescs[nexttx].wtx_addr,
5187 dmamap->dm_segs[0].ds_addr);
5188 sc->sc_txdescs[nexttx].wtx_cmdlen =
5189 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5190 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5191 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5192 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5193 NULL) {
5194 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5195 htole32(WTX_CMD_VLE);
5196 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5197 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5198 } else {
5199 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5200 }
5201 dcmdlen = 0;
5202 } else {
5203 /* setup an advanced data descriptor */
5204 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5205 htole64(dmamap->dm_segs[0].ds_addr);
5206 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5207 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5208 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5209 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5210 htole32(fields);
5211 DPRINTF(WM_DEBUG_TX,
5212 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5213 device_xname(sc->sc_dev), nexttx,
5214 (uint64_t)dmamap->dm_segs[0].ds_addr));
5215 DPRINTF(WM_DEBUG_TX,
5216 ("\t 0x%08x%08x\n", fields,
5217 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5218 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5219 }
5220
5221 lasttx = nexttx;
5222 nexttx = WM_NEXTTX(sc, nexttx);
5223 /*
5224 * fill in the next descriptors. legacy or adcanced format
5225 * is the same here
5226 */
5227 for (seg = 1; seg < dmamap->dm_nsegs;
5228 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5229 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5230 htole64(dmamap->dm_segs[seg].ds_addr);
5231 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5232 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5233 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5234 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5235 lasttx = nexttx;
5236
5237 DPRINTF(WM_DEBUG_TX,
5238 ("%s: TX: desc %d: %#" PRIx64 ", "
5239 "len %#04zx\n",
5240 device_xname(sc->sc_dev), nexttx,
5241 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5242 dmamap->dm_segs[seg].ds_len));
5243 }
5244
5245 KASSERT(lasttx != -1);
5246
5247 /*
5248 * Set up the command byte on the last descriptor of
5249 * the packet. If we're in the interrupt delay window,
5250 * delay the interrupt.
5251 */
5252 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5253 (NQTX_CMD_EOP | NQTX_CMD_RS));
5254 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5255 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5256
5257 txs->txs_lastdesc = lasttx;
5258
5259 DPRINTF(WM_DEBUG_TX,
5260 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5261 device_xname(sc->sc_dev),
5262 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5263
5264 /* Sync the descriptors we're using. */
5265 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5266 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5267
5268 /* Give the packet to the chip. */
5269 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5270 sent = true;
5271
5272 DPRINTF(WM_DEBUG_TX,
5273 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5274
5275 DPRINTF(WM_DEBUG_TX,
5276 ("%s: TX: finished transmitting packet, job %d\n",
5277 device_xname(sc->sc_dev), sc->sc_txsnext));
5278
5279 /* Advance the tx pointer. */
5280 sc->sc_txfree -= txs->txs_ndesc;
5281 sc->sc_txnext = nexttx;
5282
5283 sc->sc_txsfree--;
5284 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5285
5286 /* Pass the packet to any BPF listeners. */
5287 bpf_mtap(ifp, m0);
5288 }
5289
5290 if (m0 != NULL) {
5291 ifp->if_flags |= IFF_OACTIVE;
5292 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5293 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5294 m_freem(m0);
5295 }
5296
5297 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5298 /* No more slots; notify upper layer. */
5299 ifp->if_flags |= IFF_OACTIVE;
5300 }
5301
5302 if (sent) {
5303 /* Set a watchdog timer in case the chip flakes out. */
5304 ifp->if_timer = 5;
5305 }
5306 }
5307
5308 /* Interrupt */
5309
5310 /*
5311 * wm_txintr:
5312 *
5313 * Helper; handle transmit interrupts.
5314 */
5315 static void
5316 wm_txintr(struct wm_softc *sc)
5317 {
5318 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5319 struct wm_txsoft *txs;
5320 uint8_t status;
5321 int i;
5322
5323 if (sc->sc_stopping)
5324 return;
5325
5326 ifp->if_flags &= ~IFF_OACTIVE;
5327
5328 /*
5329 * Go through the Tx list and free mbufs for those
5330 * frames which have been transmitted.
5331 */
5332 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5333 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5334 txs = &sc->sc_txsoft[i];
5335
5336 DPRINTF(WM_DEBUG_TX,
5337 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5338
5339 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5340 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5341
5342 status =
5343 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5344 if ((status & WTX_ST_DD) == 0) {
5345 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5346 BUS_DMASYNC_PREREAD);
5347 break;
5348 }
5349
5350 DPRINTF(WM_DEBUG_TX,
5351 ("%s: TX: job %d done: descs %d..%d\n",
5352 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5353 txs->txs_lastdesc));
5354
5355 /*
5356 * XXX We should probably be using the statistics
5357 * XXX registers, but I don't know if they exist
5358 * XXX on chips before the i82544.
5359 */
5360
5361 #ifdef WM_EVENT_COUNTERS
5362 if (status & WTX_ST_TU)
5363 WM_EVCNT_INCR(&sc->sc_ev_tu);
5364 #endif /* WM_EVENT_COUNTERS */
5365
5366 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5367 ifp->if_oerrors++;
5368 if (status & WTX_ST_LC)
5369 log(LOG_WARNING, "%s: late collision\n",
5370 device_xname(sc->sc_dev));
5371 else if (status & WTX_ST_EC) {
5372 ifp->if_collisions += 16;
5373 log(LOG_WARNING, "%s: excessive collisions\n",
5374 device_xname(sc->sc_dev));
5375 }
5376 } else
5377 ifp->if_opackets++;
5378
5379 sc->sc_txfree += txs->txs_ndesc;
5380 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5381 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5382 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5383 m_freem(txs->txs_mbuf);
5384 txs->txs_mbuf = NULL;
5385 }
5386
5387 /* Update the dirty transmit buffer pointer. */
5388 sc->sc_txsdirty = i;
5389 DPRINTF(WM_DEBUG_TX,
5390 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5391
5392 /*
5393 * If there are no more pending transmissions, cancel the watchdog
5394 * timer.
5395 */
5396 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5397 ifp->if_timer = 0;
5398 }
5399
5400 /*
5401 * wm_rxintr:
5402 *
5403 * Helper; handle receive interrupts.
5404 */
5405 static void
5406 wm_rxintr(struct wm_softc *sc)
5407 {
5408 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5409 struct wm_rxsoft *rxs;
5410 struct mbuf *m;
5411 int i, len;
5412 uint8_t status, errors;
5413 uint16_t vlantag;
5414
5415 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5416 rxs = &sc->sc_rxsoft[i];
5417
5418 DPRINTF(WM_DEBUG_RX,
5419 ("%s: RX: checking descriptor %d\n",
5420 device_xname(sc->sc_dev), i));
5421
5422 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5423
5424 status = sc->sc_rxdescs[i].wrx_status;
5425 errors = sc->sc_rxdescs[i].wrx_errors;
5426 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5427 vlantag = sc->sc_rxdescs[i].wrx_special;
5428
5429 if ((status & WRX_ST_DD) == 0) {
5430 /* We have processed all of the receive descriptors. */
5431 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5432 break;
5433 }
5434
5435 if (__predict_false(sc->sc_rxdiscard)) {
5436 DPRINTF(WM_DEBUG_RX,
5437 ("%s: RX: discarding contents of descriptor %d\n",
5438 device_xname(sc->sc_dev), i));
5439 WM_INIT_RXDESC(sc, i);
5440 if (status & WRX_ST_EOP) {
5441 /* Reset our state. */
5442 DPRINTF(WM_DEBUG_RX,
5443 ("%s: RX: resetting rxdiscard -> 0\n",
5444 device_xname(sc->sc_dev)));
5445 sc->sc_rxdiscard = 0;
5446 }
5447 continue;
5448 }
5449
5450 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5451 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5452
5453 m = rxs->rxs_mbuf;
5454
5455 /*
5456 * Add a new receive buffer to the ring, unless of
5457 * course the length is zero. Treat the latter as a
5458 * failed mapping.
5459 */
5460 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5461 /*
5462 * Failed, throw away what we've done so
5463 * far, and discard the rest of the packet.
5464 */
5465 ifp->if_ierrors++;
5466 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5467 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5468 WM_INIT_RXDESC(sc, i);
5469 if ((status & WRX_ST_EOP) == 0)
5470 sc->sc_rxdiscard = 1;
5471 if (sc->sc_rxhead != NULL)
5472 m_freem(sc->sc_rxhead);
5473 WM_RXCHAIN_RESET(sc);
5474 DPRINTF(WM_DEBUG_RX,
5475 ("%s: RX: Rx buffer allocation failed, "
5476 "dropping packet%s\n", device_xname(sc->sc_dev),
5477 sc->sc_rxdiscard ? " (discard)" : ""));
5478 continue;
5479 }
5480
5481 m->m_len = len;
5482 sc->sc_rxlen += len;
5483 DPRINTF(WM_DEBUG_RX,
5484 ("%s: RX: buffer at %p len %d\n",
5485 device_xname(sc->sc_dev), m->m_data, len));
5486
5487 /* If this is not the end of the packet, keep looking. */
5488 if ((status & WRX_ST_EOP) == 0) {
5489 WM_RXCHAIN_LINK(sc, m);
5490 DPRINTF(WM_DEBUG_RX,
5491 ("%s: RX: not yet EOP, rxlen -> %d\n",
5492 device_xname(sc->sc_dev), sc->sc_rxlen));
5493 continue;
5494 }
5495
5496 /*
5497 * Okay, we have the entire packet now. The chip is
5498 * configured to include the FCS except I350 and I21[01]
5499 * (not all chips can be configured to strip it),
5500 * so we need to trim it.
5501 * May need to adjust length of previous mbuf in the
5502 * chain if the current mbuf is too short.
5503 * For an eratta, the RCTL_SECRC bit in RCTL register
5504 * is always set in I350, so we don't trim it.
5505 */
5506 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5507 && (sc->sc_type != WM_T_I210)
5508 && (sc->sc_type != WM_T_I211)) {
5509 if (m->m_len < ETHER_CRC_LEN) {
5510 sc->sc_rxtail->m_len
5511 -= (ETHER_CRC_LEN - m->m_len);
5512 m->m_len = 0;
5513 } else
5514 m->m_len -= ETHER_CRC_LEN;
5515 len = sc->sc_rxlen - ETHER_CRC_LEN;
5516 } else
5517 len = sc->sc_rxlen;
5518
5519 WM_RXCHAIN_LINK(sc, m);
5520
5521 *sc->sc_rxtailp = NULL;
5522 m = sc->sc_rxhead;
5523
5524 WM_RXCHAIN_RESET(sc);
5525
5526 DPRINTF(WM_DEBUG_RX,
5527 ("%s: RX: have entire packet, len -> %d\n",
5528 device_xname(sc->sc_dev), len));
5529
5530 /* If an error occurred, update stats and drop the packet. */
5531 if (errors &
5532 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5533 if (errors & WRX_ER_SE)
5534 log(LOG_WARNING, "%s: symbol error\n",
5535 device_xname(sc->sc_dev));
5536 else if (errors & WRX_ER_SEQ)
5537 log(LOG_WARNING, "%s: receive sequence error\n",
5538 device_xname(sc->sc_dev));
5539 else if (errors & WRX_ER_CE)
5540 log(LOG_WARNING, "%s: CRC error\n",
5541 device_xname(sc->sc_dev));
5542 m_freem(m);
5543 continue;
5544 }
5545
5546 /* No errors. Receive the packet. */
5547 m->m_pkthdr.rcvif = ifp;
5548 m->m_pkthdr.len = len;
5549
5550 /*
5551 * If VLANs are enabled, VLAN packets have been unwrapped
5552 * for us. Associate the tag with the packet.
5553 */
5554 /* XXXX should check for i350 and i354 */
5555 if ((status & WRX_ST_VP) != 0) {
5556 VLAN_INPUT_TAG(ifp, m,
5557 le16toh(vlantag),
5558 continue);
5559 }
5560
5561 /* Set up checksum info for this packet. */
5562 if ((status & WRX_ST_IXSM) == 0) {
5563 if (status & WRX_ST_IPCS) {
5564 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5565 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5566 if (errors & WRX_ER_IPE)
5567 m->m_pkthdr.csum_flags |=
5568 M_CSUM_IPv4_BAD;
5569 }
5570 if (status & WRX_ST_TCPCS) {
5571 /*
5572 * Note: we don't know if this was TCP or UDP,
5573 * so we just set both bits, and expect the
5574 * upper layers to deal.
5575 */
5576 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5577 m->m_pkthdr.csum_flags |=
5578 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5579 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5580 if (errors & WRX_ER_TCPE)
5581 m->m_pkthdr.csum_flags |=
5582 M_CSUM_TCP_UDP_BAD;
5583 }
5584 }
5585
5586 ifp->if_ipackets++;
5587
5588 WM_RX_UNLOCK(sc);
5589
5590 /* Pass this up to any BPF listeners. */
5591 bpf_mtap(ifp, m);
5592
5593 /* Pass it on. */
5594 (*ifp->if_input)(ifp, m);
5595
5596 WM_RX_LOCK(sc);
5597
5598 if (sc->sc_stopping)
5599 break;
5600 }
5601
5602 /* Update the receive pointer. */
5603 sc->sc_rxptr = i;
5604
5605 DPRINTF(WM_DEBUG_RX,
5606 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5607 }
5608
5609 /*
5610 * wm_linkintr_gmii:
5611 *
5612 * Helper; handle link interrupts for GMII.
5613 */
5614 static void
5615 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5616 {
5617
5618 KASSERT(WM_TX_LOCKED(sc));
5619
5620 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5621 __func__));
5622
5623 if (icr & ICR_LSC) {
5624 DPRINTF(WM_DEBUG_LINK,
5625 ("%s: LINK: LSC -> mii_pollstat\n",
5626 device_xname(sc->sc_dev)));
5627 mii_pollstat(&sc->sc_mii);
5628 if (sc->sc_type == WM_T_82543) {
5629 int miistatus, active;
5630
5631 /*
5632 * With 82543, we need to force speed and
5633 * duplex on the MAC equal to what the PHY
5634 * speed and duplex configuration is.
5635 */
5636 miistatus = sc->sc_mii.mii_media_status;
5637
5638 if (miistatus & IFM_ACTIVE) {
5639 active = sc->sc_mii.mii_media_active;
5640 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5641 switch (IFM_SUBTYPE(active)) {
5642 case IFM_10_T:
5643 sc->sc_ctrl |= CTRL_SPEED_10;
5644 break;
5645 case IFM_100_TX:
5646 sc->sc_ctrl |= CTRL_SPEED_100;
5647 break;
5648 case IFM_1000_T:
5649 sc->sc_ctrl |= CTRL_SPEED_1000;
5650 break;
5651 default:
5652 /*
5653 * fiber?
5654 * Shoud not enter here.
5655 */
5656 printf("unknown media (%x)\n",
5657 active);
5658 break;
5659 }
5660 if (active & IFM_FDX)
5661 sc->sc_ctrl |= CTRL_FD;
5662 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5663 }
5664 } else if ((sc->sc_type == WM_T_ICH8)
5665 && (sc->sc_phytype == WMPHY_IGP_3)) {
5666 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5667 } else if (sc->sc_type == WM_T_PCH) {
5668 wm_k1_gig_workaround_hv(sc,
5669 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5670 }
5671
5672 if ((sc->sc_phytype == WMPHY_82578)
5673 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5674 == IFM_1000_T)) {
5675
5676 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5677 delay(200*1000); /* XXX too big */
5678
5679 /* Link stall fix for link up */
5680 wm_gmii_hv_writereg(sc->sc_dev, 1,
5681 HV_MUX_DATA_CTRL,
5682 HV_MUX_DATA_CTRL_GEN_TO_MAC
5683 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5684 wm_gmii_hv_writereg(sc->sc_dev, 1,
5685 HV_MUX_DATA_CTRL,
5686 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5687 }
5688 }
5689 } else if (icr & ICR_RXSEQ) {
5690 DPRINTF(WM_DEBUG_LINK,
5691 ("%s: LINK Receive sequence error\n",
5692 device_xname(sc->sc_dev)));
5693 }
5694 }
5695
5696 /*
5697 * wm_linkintr_tbi:
5698 *
5699 * Helper; handle link interrupts for TBI mode.
5700 */
5701 static void
5702 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5703 {
5704 uint32_t status;
5705
5706 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5707 __func__));
5708
5709 status = CSR_READ(sc, WMREG_STATUS);
5710 if (icr & ICR_LSC) {
5711 if (status & STATUS_LU) {
5712 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5713 device_xname(sc->sc_dev),
5714 (status & STATUS_FD) ? "FDX" : "HDX"));
5715 /*
5716 * NOTE: CTRL will update TFCE and RFCE automatically,
5717 * so we should update sc->sc_ctrl
5718 */
5719
5720 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5721 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5722 sc->sc_fcrtl &= ~FCRTL_XONE;
5723 if (status & STATUS_FD)
5724 sc->sc_tctl |=
5725 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5726 else
5727 sc->sc_tctl |=
5728 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5729 if (sc->sc_ctrl & CTRL_TFCE)
5730 sc->sc_fcrtl |= FCRTL_XONE;
5731 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5732 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5733 WMREG_OLD_FCRTL : WMREG_FCRTL,
5734 sc->sc_fcrtl);
5735 sc->sc_tbi_linkup = 1;
5736 } else {
5737 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5738 device_xname(sc->sc_dev)));
5739 sc->sc_tbi_linkup = 0;
5740 }
5741 wm_tbi_set_linkled(sc);
5742 } else if (icr & ICR_RXSEQ) {
5743 DPRINTF(WM_DEBUG_LINK,
5744 ("%s: LINK: Receive sequence error\n",
5745 device_xname(sc->sc_dev)));
5746 }
5747 }
5748
5749 /*
5750 * wm_linkintr:
5751 *
5752 * Helper; handle link interrupts.
5753 */
5754 static void
5755 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5756 {
5757
5758 if (sc->sc_flags & WM_F_HAS_MII)
5759 wm_linkintr_gmii(sc, icr);
5760 else
5761 wm_linkintr_tbi(sc, icr);
5762 }
5763
5764 /*
5765 * wm_intr:
5766 *
5767 * Interrupt service routine.
5768 */
5769 static int
5770 wm_intr(void *arg)
5771 {
5772 struct wm_softc *sc = arg;
5773 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5774 uint32_t icr;
5775 int handled = 0;
5776
5777 while (1 /* CONSTCOND */) {
5778 icr = CSR_READ(sc, WMREG_ICR);
5779 if ((icr & sc->sc_icr) == 0)
5780 break;
5781 rnd_add_uint32(&sc->rnd_source, icr);
5782
5783 WM_RX_LOCK(sc);
5784
5785 if (sc->sc_stopping) {
5786 WM_RX_UNLOCK(sc);
5787 break;
5788 }
5789
5790 handled = 1;
5791
5792 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5793 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5794 DPRINTF(WM_DEBUG_RX,
5795 ("%s: RX: got Rx intr 0x%08x\n",
5796 device_xname(sc->sc_dev),
5797 icr & (ICR_RXDMT0|ICR_RXT0)));
5798 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5799 }
5800 #endif
5801 wm_rxintr(sc);
5802
5803 WM_RX_UNLOCK(sc);
5804 WM_TX_LOCK(sc);
5805
5806 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5807 if (icr & ICR_TXDW) {
5808 DPRINTF(WM_DEBUG_TX,
5809 ("%s: TX: got TXDW interrupt\n",
5810 device_xname(sc->sc_dev)));
5811 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5812 }
5813 #endif
5814 wm_txintr(sc);
5815
5816 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5817 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5818 wm_linkintr(sc, icr);
5819 }
5820
5821 WM_TX_UNLOCK(sc);
5822
5823 if (icr & ICR_RXO) {
5824 #if defined(WM_DEBUG)
5825 log(LOG_WARNING, "%s: Receive overrun\n",
5826 device_xname(sc->sc_dev));
5827 #endif /* defined(WM_DEBUG) */
5828 }
5829 }
5830
5831 if (handled) {
5832 /* Try to get more packets going. */
5833 ifp->if_start(ifp);
5834 }
5835
5836 return handled;
5837 }
5838
5839 /*
5840 * Media related.
5841 * GMII, SGMII, TBI (and SERDES)
5842 */
5843
5844 /* GMII related */
5845
5846 /*
5847 * wm_gmii_reset:
5848 *
5849 * Reset the PHY.
5850 */
5851 static void
5852 wm_gmii_reset(struct wm_softc *sc)
5853 {
5854 uint32_t reg;
5855 int rv;
5856
5857 /* get phy semaphore */
5858 switch (sc->sc_type) {
5859 case WM_T_82571:
5860 case WM_T_82572:
5861 case WM_T_82573:
5862 case WM_T_82574:
5863 case WM_T_82583:
5864 /* XXX should get sw semaphore, too */
5865 rv = wm_get_swsm_semaphore(sc);
5866 break;
5867 case WM_T_82575:
5868 case WM_T_82576:
5869 case WM_T_82580:
5870 case WM_T_82580ER:
5871 case WM_T_I350:
5872 case WM_T_I354:
5873 case WM_T_I210:
5874 case WM_T_I211:
5875 case WM_T_80003:
5876 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5877 break;
5878 case WM_T_ICH8:
5879 case WM_T_ICH9:
5880 case WM_T_ICH10:
5881 case WM_T_PCH:
5882 case WM_T_PCH2:
5883 case WM_T_PCH_LPT:
5884 rv = wm_get_swfwhw_semaphore(sc);
5885 break;
5886 default:
5887 /* nothing to do*/
5888 rv = 0;
5889 break;
5890 }
5891 if (rv != 0) {
5892 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5893 __func__);
5894 return;
5895 }
5896
5897 switch (sc->sc_type) {
5898 case WM_T_82542_2_0:
5899 case WM_T_82542_2_1:
5900 /* null */
5901 break;
5902 case WM_T_82543:
5903 /*
5904 * With 82543, we need to force speed and duplex on the MAC
5905 * equal to what the PHY speed and duplex configuration is.
5906 * In addition, we need to perform a hardware reset on the PHY
5907 * to take it out of reset.
5908 */
5909 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5910 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5911
5912 /* The PHY reset pin is active-low. */
5913 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5914 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5915 CTRL_EXT_SWDPIN(4));
5916 reg |= CTRL_EXT_SWDPIO(4);
5917
5918 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5919 CSR_WRITE_FLUSH(sc);
5920 delay(10*1000);
5921
5922 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5923 CSR_WRITE_FLUSH(sc);
5924 delay(150);
5925 #if 0
5926 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5927 #endif
5928 delay(20*1000); /* XXX extra delay to get PHY ID? */
5929 break;
5930 case WM_T_82544: /* reset 10000us */
5931 case WM_T_82540:
5932 case WM_T_82545:
5933 case WM_T_82545_3:
5934 case WM_T_82546:
5935 case WM_T_82546_3:
5936 case WM_T_82541:
5937 case WM_T_82541_2:
5938 case WM_T_82547:
5939 case WM_T_82547_2:
5940 case WM_T_82571: /* reset 100us */
5941 case WM_T_82572:
5942 case WM_T_82573:
5943 case WM_T_82574:
5944 case WM_T_82575:
5945 case WM_T_82576:
5946 case WM_T_82580:
5947 case WM_T_82580ER:
5948 case WM_T_I350:
5949 case WM_T_I354:
5950 case WM_T_I210:
5951 case WM_T_I211:
5952 case WM_T_82583:
5953 case WM_T_80003:
5954 /* generic reset */
5955 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5956 CSR_WRITE_FLUSH(sc);
5957 delay(20000);
5958 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5959 CSR_WRITE_FLUSH(sc);
5960 delay(20000);
5961
5962 if ((sc->sc_type == WM_T_82541)
5963 || (sc->sc_type == WM_T_82541_2)
5964 || (sc->sc_type == WM_T_82547)
5965 || (sc->sc_type == WM_T_82547_2)) {
5966 /* workaround for igp are done in igp_reset() */
5967 /* XXX add code to set LED after phy reset */
5968 }
5969 break;
5970 case WM_T_ICH8:
5971 case WM_T_ICH9:
5972 case WM_T_ICH10:
5973 case WM_T_PCH:
5974 case WM_T_PCH2:
5975 case WM_T_PCH_LPT:
5976 /* generic reset */
5977 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5978 CSR_WRITE_FLUSH(sc);
5979 delay(100);
5980 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5981 CSR_WRITE_FLUSH(sc);
5982 delay(150);
5983 break;
5984 default:
5985 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5986 __func__);
5987 break;
5988 }
5989
5990 /* release PHY semaphore */
5991 switch (sc->sc_type) {
5992 case WM_T_82571:
5993 case WM_T_82572:
5994 case WM_T_82573:
5995 case WM_T_82574:
5996 case WM_T_82583:
5997 /* XXX should put sw semaphore, too */
5998 wm_put_swsm_semaphore(sc);
5999 break;
6000 case WM_T_82575:
6001 case WM_T_82576:
6002 case WM_T_82580:
6003 case WM_T_82580ER:
6004 case WM_T_I350:
6005 case WM_T_I354:
6006 case WM_T_I210:
6007 case WM_T_I211:
6008 case WM_T_80003:
6009 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6010 break;
6011 case WM_T_ICH8:
6012 case WM_T_ICH9:
6013 case WM_T_ICH10:
6014 case WM_T_PCH:
6015 case WM_T_PCH2:
6016 case WM_T_PCH_LPT:
6017 wm_put_swfwhw_semaphore(sc);
6018 break;
6019 default:
6020 /* nothing to do*/
6021 rv = 0;
6022 break;
6023 }
6024
6025 /* get_cfg_done */
6026 wm_get_cfg_done(sc);
6027
6028 /* extra setup */
6029 switch (sc->sc_type) {
6030 case WM_T_82542_2_0:
6031 case WM_T_82542_2_1:
6032 case WM_T_82543:
6033 case WM_T_82544:
6034 case WM_T_82540:
6035 case WM_T_82545:
6036 case WM_T_82545_3:
6037 case WM_T_82546:
6038 case WM_T_82546_3:
6039 case WM_T_82541_2:
6040 case WM_T_82547_2:
6041 case WM_T_82571:
6042 case WM_T_82572:
6043 case WM_T_82573:
6044 case WM_T_82574:
6045 case WM_T_82575:
6046 case WM_T_82576:
6047 case WM_T_82580:
6048 case WM_T_82580ER:
6049 case WM_T_I350:
6050 case WM_T_I354:
6051 case WM_T_I210:
6052 case WM_T_I211:
6053 case WM_T_82583:
6054 case WM_T_80003:
6055 /* null */
6056 break;
6057 case WM_T_82541:
6058 case WM_T_82547:
6059 /* XXX Configure actively LED after PHY reset */
6060 break;
6061 case WM_T_ICH8:
6062 case WM_T_ICH9:
6063 case WM_T_ICH10:
6064 case WM_T_PCH:
6065 case WM_T_PCH2:
6066 case WM_T_PCH_LPT:
6067 /* Allow time for h/w to get to a quiescent state afer reset */
6068 delay(10*1000);
6069
6070 if (sc->sc_type == WM_T_PCH)
6071 wm_hv_phy_workaround_ich8lan(sc);
6072
6073 if (sc->sc_type == WM_T_PCH2)
6074 wm_lv_phy_workaround_ich8lan(sc);
6075
6076 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6077 /*
6078 * dummy read to clear the phy wakeup bit after lcd
6079 * reset
6080 */
6081 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6082 }
6083
6084 /*
6085 * XXX Configure the LCD with th extended configuration region
6086 * in NVM
6087 */
6088
6089 /* Configure the LCD with the OEM bits in NVM */
6090 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6091 || (sc->sc_type == WM_T_PCH_LPT)) {
6092 /*
6093 * Disable LPLU.
6094 * XXX It seems that 82567 has LPLU, too.
6095 */
6096 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6097 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6098 reg |= HV_OEM_BITS_ANEGNOW;
6099 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6100 }
6101 break;
6102 default:
6103 panic("%s: unknown type\n", __func__);
6104 break;
6105 }
6106 }
6107
6108 /*
6109 * wm_get_phy_id_82575:
6110 *
6111 * Return PHY ID. Return -1 if it failed.
6112 */
6113 static int
6114 wm_get_phy_id_82575(struct wm_softc *sc)
6115 {
6116 uint32_t reg;
6117 int phyid = -1;
6118
6119 /* XXX */
6120 if ((sc->sc_flags & WM_F_SGMII) == 0)
6121 return -1;
6122
6123 if (wm_sgmii_uses_mdio(sc)) {
6124 switch (sc->sc_type) {
6125 case WM_T_82575:
6126 case WM_T_82576:
6127 reg = CSR_READ(sc, WMREG_MDIC);
6128 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6129 break;
6130 case WM_T_82580:
6131 case WM_T_I350:
6132 case WM_T_I354:
6133 case WM_T_I210:
6134 case WM_T_I211:
6135 reg = CSR_READ(sc, WMREG_MDICNFG);
6136 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6137 break;
6138 default:
6139 return -1;
6140 }
6141 }
6142
6143 return phyid;
6144 }
6145
6146
6147 /*
6148 * wm_gmii_mediainit:
6149 *
6150 * Initialize media for use on 1000BASE-T devices.
6151 */
6152 static void
6153 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6154 {
6155 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6156 struct mii_data *mii = &sc->sc_mii;
6157 uint32_t reg;
6158
6159 /* We have GMII. */
6160 sc->sc_flags |= WM_F_HAS_MII;
6161
6162 if (sc->sc_type == WM_T_80003)
6163 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6164 else
6165 sc->sc_tipg = TIPG_1000T_DFLT;
6166
6167 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6168 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6169 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6170 || (sc->sc_type == WM_T_I211)) {
6171 reg = CSR_READ(sc, WMREG_PHPM);
6172 reg &= ~PHPM_GO_LINK_D;
6173 CSR_WRITE(sc, WMREG_PHPM, reg);
6174 }
6175
6176 /*
6177 * Let the chip set speed/duplex on its own based on
6178 * signals from the PHY.
6179 * XXXbouyer - I'm not sure this is right for the 80003,
6180 * the em driver only sets CTRL_SLU here - but it seems to work.
6181 */
6182 sc->sc_ctrl |= CTRL_SLU;
6183 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6184
6185 /* Initialize our media structures and probe the GMII. */
6186 mii->mii_ifp = ifp;
6187
6188 /*
6189 * Determine the PHY access method.
6190 *
6191 * For SGMII, use SGMII specific method.
6192 *
6193 * For some devices, we can determine the PHY access method
6194 * from sc_type.
6195 *
6196 * For ICH8 variants, it's difficult to detemine the PHY access
6197 * method by sc_type, so use the PCI product ID for some devices.
6198 * For other ICH8 variants, try to use igp's method. If the PHY
6199 * can't detect, then use bm's method.
6200 */
6201 switch (prodid) {
6202 case PCI_PRODUCT_INTEL_PCH_M_LM:
6203 case PCI_PRODUCT_INTEL_PCH_M_LC:
6204 /* 82577 */
6205 sc->sc_phytype = WMPHY_82577;
6206 mii->mii_readreg = wm_gmii_hv_readreg;
6207 mii->mii_writereg = wm_gmii_hv_writereg;
6208 break;
6209 case PCI_PRODUCT_INTEL_PCH_D_DM:
6210 case PCI_PRODUCT_INTEL_PCH_D_DC:
6211 /* 82578 */
6212 sc->sc_phytype = WMPHY_82578;
6213 mii->mii_readreg = wm_gmii_hv_readreg;
6214 mii->mii_writereg = wm_gmii_hv_writereg;
6215 break;
6216 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6217 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6218 /* 82579 */
6219 sc->sc_phytype = WMPHY_82579;
6220 mii->mii_readreg = wm_gmii_hv_readreg;
6221 mii->mii_writereg = wm_gmii_hv_writereg;
6222 break;
6223 case PCI_PRODUCT_INTEL_I217_LM:
6224 case PCI_PRODUCT_INTEL_I217_V:
6225 case PCI_PRODUCT_INTEL_I218_LM:
6226 case PCI_PRODUCT_INTEL_I218_V:
6227 /* I21[78] */
6228 mii->mii_readreg = wm_gmii_hv_readreg;
6229 mii->mii_writereg = wm_gmii_hv_writereg;
6230 break;
6231 case PCI_PRODUCT_INTEL_82801I_BM:
6232 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6233 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6234 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6235 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6236 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6237 /* 82567 */
6238 sc->sc_phytype = WMPHY_BM;
6239 mii->mii_readreg = wm_gmii_bm_readreg;
6240 mii->mii_writereg = wm_gmii_bm_writereg;
6241 break;
6242 default:
6243 if (((sc->sc_flags & WM_F_SGMII) != 0)
6244 && !wm_sgmii_uses_mdio(sc)){
6245 mii->mii_readreg = wm_sgmii_readreg;
6246 mii->mii_writereg = wm_sgmii_writereg;
6247 } else if (sc->sc_type >= WM_T_80003) {
6248 mii->mii_readreg = wm_gmii_i80003_readreg;
6249 mii->mii_writereg = wm_gmii_i80003_writereg;
6250 } else if (sc->sc_type >= WM_T_I210) {
6251 mii->mii_readreg = wm_gmii_i82544_readreg;
6252 mii->mii_writereg = wm_gmii_i82544_writereg;
6253 } else if (sc->sc_type >= WM_T_82580) {
6254 sc->sc_phytype = WMPHY_82580;
6255 mii->mii_readreg = wm_gmii_82580_readreg;
6256 mii->mii_writereg = wm_gmii_82580_writereg;
6257 } else if (sc->sc_type >= WM_T_82544) {
6258 mii->mii_readreg = wm_gmii_i82544_readreg;
6259 mii->mii_writereg = wm_gmii_i82544_writereg;
6260 } else {
6261 mii->mii_readreg = wm_gmii_i82543_readreg;
6262 mii->mii_writereg = wm_gmii_i82543_writereg;
6263 }
6264 break;
6265 }
6266 mii->mii_statchg = wm_gmii_statchg;
6267
6268 wm_gmii_reset(sc);
6269
6270 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6271 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6272 wm_gmii_mediastatus);
6273
6274 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6275 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6276 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6277 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6278 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6279 /* Attach only one port */
6280 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6281 MII_OFFSET_ANY, MIIF_DOPAUSE);
6282 } else {
6283 int i, id;
6284 uint32_t ctrl_ext;
6285
6286 id = wm_get_phy_id_82575(sc);
6287 if (id != -1) {
6288 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6289 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6290 }
6291 if ((id == -1)
6292 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6293 /* Power on sgmii phy if it is disabled */
6294 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6295 CSR_WRITE(sc, WMREG_CTRL_EXT,
6296 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6297 CSR_WRITE_FLUSH(sc);
6298 delay(300*1000); /* XXX too long */
6299
6300 /* from 1 to 8 */
6301 for (i = 1; i < 8; i++)
6302 mii_attach(sc->sc_dev, &sc->sc_mii,
6303 0xffffffff, i, MII_OFFSET_ANY,
6304 MIIF_DOPAUSE);
6305
6306 /* restore previous sfp cage power state */
6307 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6308 }
6309 }
6310 } else {
6311 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6312 MII_OFFSET_ANY, MIIF_DOPAUSE);
6313 }
6314
6315 /*
6316 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6317 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6318 */
6319 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6320 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6321 wm_set_mdio_slow_mode_hv(sc);
6322 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6323 MII_OFFSET_ANY, MIIF_DOPAUSE);
6324 }
6325
6326 /*
6327 * (For ICH8 variants)
6328 * If PHY detection failed, use BM's r/w function and retry.
6329 */
6330 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6331 /* if failed, retry with *_bm_* */
6332 mii->mii_readreg = wm_gmii_bm_readreg;
6333 mii->mii_writereg = wm_gmii_bm_writereg;
6334
6335 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6336 MII_OFFSET_ANY, MIIF_DOPAUSE);
6337 }
6338
6339 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6340 /* Any PHY wasn't find */
6341 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6342 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6343 sc->sc_phytype = WMPHY_NONE;
6344 } else {
6345 /*
6346 * PHY Found!
6347 * Check PHY type.
6348 */
6349 uint32_t model;
6350 struct mii_softc *child;
6351
6352 child = LIST_FIRST(&mii->mii_phys);
6353 if (device_is_a(child->mii_dev, "igphy")) {
6354 struct igphy_softc *isc = (struct igphy_softc *)child;
6355
6356 model = isc->sc_mii.mii_mpd_model;
6357 if (model == MII_MODEL_yyINTEL_I82566)
6358 sc->sc_phytype = WMPHY_IGP_3;
6359 }
6360
6361 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6362 }
6363 }
6364
6365 /*
6366 * wm_gmii_mediastatus: [ifmedia interface function]
6367 *
6368 * Get the current interface media status on a 1000BASE-T device.
6369 */
6370 static void
6371 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6372 {
6373 struct wm_softc *sc = ifp->if_softc;
6374
6375 ether_mediastatus(ifp, ifmr);
6376 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6377 | sc->sc_flowflags;
6378 }
6379
6380 /*
6381 * wm_gmii_mediachange: [ifmedia interface function]
6382 *
6383 * Set hardware to newly-selected media on a 1000BASE-T device.
6384 */
6385 static int
6386 wm_gmii_mediachange(struct ifnet *ifp)
6387 {
6388 struct wm_softc *sc = ifp->if_softc;
6389 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6390 int rc;
6391
6392 if ((ifp->if_flags & IFF_UP) == 0)
6393 return 0;
6394
6395 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6396 sc->sc_ctrl |= CTRL_SLU;
6397 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6398 || (sc->sc_type > WM_T_82543)) {
6399 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6400 } else {
6401 sc->sc_ctrl &= ~CTRL_ASDE;
6402 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6403 if (ife->ifm_media & IFM_FDX)
6404 sc->sc_ctrl |= CTRL_FD;
6405 switch (IFM_SUBTYPE(ife->ifm_media)) {
6406 case IFM_10_T:
6407 sc->sc_ctrl |= CTRL_SPEED_10;
6408 break;
6409 case IFM_100_TX:
6410 sc->sc_ctrl |= CTRL_SPEED_100;
6411 break;
6412 case IFM_1000_T:
6413 sc->sc_ctrl |= CTRL_SPEED_1000;
6414 break;
6415 default:
6416 panic("wm_gmii_mediachange: bad media 0x%x",
6417 ife->ifm_media);
6418 }
6419 }
6420 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6421 if (sc->sc_type <= WM_T_82543)
6422 wm_gmii_reset(sc);
6423
6424 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6425 return 0;
6426 return rc;
6427 }
6428
6429 #define MDI_IO CTRL_SWDPIN(2)
6430 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6431 #define MDI_CLK CTRL_SWDPIN(3)
6432
6433 static void
6434 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6435 {
6436 uint32_t i, v;
6437
6438 v = CSR_READ(sc, WMREG_CTRL);
6439 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6440 v |= MDI_DIR | CTRL_SWDPIO(3);
6441
6442 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6443 if (data & i)
6444 v |= MDI_IO;
6445 else
6446 v &= ~MDI_IO;
6447 CSR_WRITE(sc, WMREG_CTRL, v);
6448 CSR_WRITE_FLUSH(sc);
6449 delay(10);
6450 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6451 CSR_WRITE_FLUSH(sc);
6452 delay(10);
6453 CSR_WRITE(sc, WMREG_CTRL, v);
6454 CSR_WRITE_FLUSH(sc);
6455 delay(10);
6456 }
6457 }
6458
6459 static uint32_t
6460 wm_i82543_mii_recvbits(struct wm_softc *sc)
6461 {
6462 uint32_t v, i, data = 0;
6463
6464 v = CSR_READ(sc, WMREG_CTRL);
6465 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6466 v |= CTRL_SWDPIO(3);
6467
6468 CSR_WRITE(sc, WMREG_CTRL, v);
6469 CSR_WRITE_FLUSH(sc);
6470 delay(10);
6471 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6472 CSR_WRITE_FLUSH(sc);
6473 delay(10);
6474 CSR_WRITE(sc, WMREG_CTRL, v);
6475 CSR_WRITE_FLUSH(sc);
6476 delay(10);
6477
6478 for (i = 0; i < 16; i++) {
6479 data <<= 1;
6480 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6481 CSR_WRITE_FLUSH(sc);
6482 delay(10);
6483 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6484 data |= 1;
6485 CSR_WRITE(sc, WMREG_CTRL, v);
6486 CSR_WRITE_FLUSH(sc);
6487 delay(10);
6488 }
6489
6490 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6491 CSR_WRITE_FLUSH(sc);
6492 delay(10);
6493 CSR_WRITE(sc, WMREG_CTRL, v);
6494 CSR_WRITE_FLUSH(sc);
6495 delay(10);
6496
6497 return data;
6498 }
6499
6500 #undef MDI_IO
6501 #undef MDI_DIR
6502 #undef MDI_CLK
6503
6504 /*
6505 * wm_gmii_i82543_readreg: [mii interface function]
6506 *
6507 * Read a PHY register on the GMII (i82543 version).
6508 */
6509 static int
6510 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6511 {
6512 struct wm_softc *sc = device_private(self);
6513 int rv;
6514
6515 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6516 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6517 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6518 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6519
6520 DPRINTF(WM_DEBUG_GMII,
6521 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6522 device_xname(sc->sc_dev), phy, reg, rv));
6523
6524 return rv;
6525 }
6526
6527 /*
6528 * wm_gmii_i82543_writereg: [mii interface function]
6529 *
6530 * Write a PHY register on the GMII (i82543 version).
6531 */
6532 static void
6533 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6534 {
6535 struct wm_softc *sc = device_private(self);
6536
6537 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6538 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6539 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6540 (MII_COMMAND_START << 30), 32);
6541 }
6542
6543 /*
6544 * wm_gmii_i82544_readreg: [mii interface function]
6545 *
6546 * Read a PHY register on the GMII.
6547 */
6548 static int
6549 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6550 {
6551 struct wm_softc *sc = device_private(self);
6552 uint32_t mdic = 0;
6553 int i, rv;
6554
6555 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6556 MDIC_REGADD(reg));
6557
6558 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6559 mdic = CSR_READ(sc, WMREG_MDIC);
6560 if (mdic & MDIC_READY)
6561 break;
6562 delay(50);
6563 }
6564
6565 if ((mdic & MDIC_READY) == 0) {
6566 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6567 device_xname(sc->sc_dev), phy, reg);
6568 rv = 0;
6569 } else if (mdic & MDIC_E) {
6570 #if 0 /* This is normal if no PHY is present. */
6571 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6572 device_xname(sc->sc_dev), phy, reg);
6573 #endif
6574 rv = 0;
6575 } else {
6576 rv = MDIC_DATA(mdic);
6577 if (rv == 0xffff)
6578 rv = 0;
6579 }
6580
6581 return rv;
6582 }
6583
6584 /*
6585 * wm_gmii_i82544_writereg: [mii interface function]
6586 *
6587 * Write a PHY register on the GMII.
6588 */
6589 static void
6590 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6591 {
6592 struct wm_softc *sc = device_private(self);
6593 uint32_t mdic = 0;
6594 int i;
6595
6596 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6597 MDIC_REGADD(reg) | MDIC_DATA(val));
6598
6599 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6600 mdic = CSR_READ(sc, WMREG_MDIC);
6601 if (mdic & MDIC_READY)
6602 break;
6603 delay(50);
6604 }
6605
6606 if ((mdic & MDIC_READY) == 0)
6607 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6608 device_xname(sc->sc_dev), phy, reg);
6609 else if (mdic & MDIC_E)
6610 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6611 device_xname(sc->sc_dev), phy, reg);
6612 }
6613
6614 /*
6615 * wm_gmii_i80003_readreg: [mii interface function]
6616 *
6617 * Read a PHY register on the kumeran
6618 * This could be handled by the PHY layer if we didn't have to lock the
6619 * ressource ...
6620 */
6621 static int
6622 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6623 {
6624 struct wm_softc *sc = device_private(self);
6625 int sem;
6626 int rv;
6627
6628 if (phy != 1) /* only one PHY on kumeran bus */
6629 return 0;
6630
6631 sem = swfwphysem[sc->sc_funcid];
6632 if (wm_get_swfw_semaphore(sc, sem)) {
6633 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6634 __func__);
6635 return 0;
6636 }
6637
6638 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6639 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6640 reg >> GG82563_PAGE_SHIFT);
6641 } else {
6642 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6643 reg >> GG82563_PAGE_SHIFT);
6644 }
6645 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6646 delay(200);
6647 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6648 delay(200);
6649
6650 wm_put_swfw_semaphore(sc, sem);
6651 return rv;
6652 }
6653
6654 /*
6655 * wm_gmii_i80003_writereg: [mii interface function]
6656 *
6657 * Write a PHY register on the kumeran.
6658 * This could be handled by the PHY layer if we didn't have to lock the
6659 * ressource ...
6660 */
6661 static void
6662 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6663 {
6664 struct wm_softc *sc = device_private(self);
6665 int sem;
6666
6667 if (phy != 1) /* only one PHY on kumeran bus */
6668 return;
6669
6670 sem = swfwphysem[sc->sc_funcid];
6671 if (wm_get_swfw_semaphore(sc, sem)) {
6672 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6673 __func__);
6674 return;
6675 }
6676
6677 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6678 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6679 reg >> GG82563_PAGE_SHIFT);
6680 } else {
6681 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6682 reg >> GG82563_PAGE_SHIFT);
6683 }
6684 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6685 delay(200);
6686 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6687 delay(200);
6688
6689 wm_put_swfw_semaphore(sc, sem);
6690 }
6691
6692 /*
6693 * wm_gmii_bm_readreg: [mii interface function]
6694 *
6695 * Read a PHY register on the kumeran
6696 * This could be handled by the PHY layer if we didn't have to lock the
6697 * ressource ...
6698 */
6699 static int
6700 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6701 {
6702 struct wm_softc *sc = device_private(self);
6703 int sem;
6704 int rv;
6705
6706 sem = swfwphysem[sc->sc_funcid];
6707 if (wm_get_swfw_semaphore(sc, sem)) {
6708 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6709 __func__);
6710 return 0;
6711 }
6712
6713 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6714 if (phy == 1)
6715 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6716 reg);
6717 else
6718 wm_gmii_i82544_writereg(self, phy,
6719 GG82563_PHY_PAGE_SELECT,
6720 reg >> GG82563_PAGE_SHIFT);
6721 }
6722
6723 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6724 wm_put_swfw_semaphore(sc, sem);
6725 return rv;
6726 }
6727
6728 /*
6729 * wm_gmii_bm_writereg: [mii interface function]
6730 *
6731 * Write a PHY register on the kumeran.
6732 * This could be handled by the PHY layer if we didn't have to lock the
6733 * ressource ...
6734 */
6735 static void
6736 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6737 {
6738 struct wm_softc *sc = device_private(self);
6739 int sem;
6740
6741 sem = swfwphysem[sc->sc_funcid];
6742 if (wm_get_swfw_semaphore(sc, sem)) {
6743 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6744 __func__);
6745 return;
6746 }
6747
6748 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6749 if (phy == 1)
6750 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6751 reg);
6752 else
6753 wm_gmii_i82544_writereg(self, phy,
6754 GG82563_PHY_PAGE_SELECT,
6755 reg >> GG82563_PAGE_SHIFT);
6756 }
6757
6758 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6759 wm_put_swfw_semaphore(sc, sem);
6760 }
6761
6762 static void
6763 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6764 {
6765 struct wm_softc *sc = device_private(self);
6766 uint16_t regnum = BM_PHY_REG_NUM(offset);
6767 uint16_t wuce;
6768
6769 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6770 if (sc->sc_type == WM_T_PCH) {
6771 /* XXX e1000 driver do nothing... why? */
6772 }
6773
6774 /* Set page 769 */
6775 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6776 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6777
6778 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6779
6780 wuce &= ~BM_WUC_HOST_WU_BIT;
6781 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6782 wuce | BM_WUC_ENABLE_BIT);
6783
6784 /* Select page 800 */
6785 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6786 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6787
6788 /* Write page 800 */
6789 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6790
6791 if (rd)
6792 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6793 else
6794 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6795
6796 /* Set page 769 */
6797 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6798 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6799
6800 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6801 }
6802
6803 /*
6804 * wm_gmii_hv_readreg: [mii interface function]
6805 *
6806 * Read a PHY register on the kumeran
6807 * This could be handled by the PHY layer if we didn't have to lock the
6808 * ressource ...
6809 */
6810 static int
6811 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6812 {
6813 struct wm_softc *sc = device_private(self);
6814 uint16_t page = BM_PHY_REG_PAGE(reg);
6815 uint16_t regnum = BM_PHY_REG_NUM(reg);
6816 uint16_t val;
6817 int rv;
6818
6819 if (wm_get_swfwhw_semaphore(sc)) {
6820 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6821 __func__);
6822 return 0;
6823 }
6824
6825 /* XXX Workaround failure in MDIO access while cable is disconnected */
6826 if (sc->sc_phytype == WMPHY_82577) {
6827 /* XXX must write */
6828 }
6829
6830 /* Page 800 works differently than the rest so it has its own func */
6831 if (page == BM_WUC_PAGE) {
6832 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6833 return val;
6834 }
6835
6836 /*
6837 * Lower than page 768 works differently than the rest so it has its
6838 * own func
6839 */
6840 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6841 printf("gmii_hv_readreg!!!\n");
6842 return 0;
6843 }
6844
6845 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6846 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6847 page << BME1000_PAGE_SHIFT);
6848 }
6849
6850 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6851 wm_put_swfwhw_semaphore(sc);
6852 return rv;
6853 }
6854
6855 /*
6856 * wm_gmii_hv_writereg: [mii interface function]
6857 *
6858 * Write a PHY register on the kumeran.
6859 * This could be handled by the PHY layer if we didn't have to lock the
6860 * ressource ...
6861 */
6862 static void
6863 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6864 {
6865 struct wm_softc *sc = device_private(self);
6866 uint16_t page = BM_PHY_REG_PAGE(reg);
6867 uint16_t regnum = BM_PHY_REG_NUM(reg);
6868
6869 if (wm_get_swfwhw_semaphore(sc)) {
6870 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6871 __func__);
6872 return;
6873 }
6874
6875 /* XXX Workaround failure in MDIO access while cable is disconnected */
6876
6877 /* Page 800 works differently than the rest so it has its own func */
6878 if (page == BM_WUC_PAGE) {
6879 uint16_t tmp;
6880
6881 tmp = val;
6882 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6883 return;
6884 }
6885
6886 /*
6887 * Lower than page 768 works differently than the rest so it has its
6888 * own func
6889 */
6890 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6891 printf("gmii_hv_writereg!!!\n");
6892 return;
6893 }
6894
6895 /*
6896 * XXX Workaround MDIO accesses being disabled after entering IEEE
6897 * Power Down (whenever bit 11 of the PHY control register is set)
6898 */
6899
6900 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6901 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6902 page << BME1000_PAGE_SHIFT);
6903 }
6904
6905 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6906 wm_put_swfwhw_semaphore(sc);
6907 }
6908
6909 /*
6910 * wm_gmii_82580_readreg: [mii interface function]
6911 *
6912 * Read a PHY register on the 82580 and I350.
6913 * This could be handled by the PHY layer if we didn't have to lock the
6914 * ressource ...
6915 */
6916 static int
6917 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6918 {
6919 struct wm_softc *sc = device_private(self);
6920 int sem;
6921 int rv;
6922
6923 sem = swfwphysem[sc->sc_funcid];
6924 if (wm_get_swfw_semaphore(sc, sem)) {
6925 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6926 __func__);
6927 return 0;
6928 }
6929
6930 rv = wm_gmii_i82544_readreg(self, phy, reg);
6931
6932 wm_put_swfw_semaphore(sc, sem);
6933 return rv;
6934 }
6935
6936 /*
6937 * wm_gmii_82580_writereg: [mii interface function]
6938 *
6939 * Write a PHY register on the 82580 and I350.
6940 * This could be handled by the PHY layer if we didn't have to lock the
6941 * ressource ...
6942 */
6943 static void
6944 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6945 {
6946 struct wm_softc *sc = device_private(self);
6947 int sem;
6948
6949 sem = swfwphysem[sc->sc_funcid];
6950 if (wm_get_swfw_semaphore(sc, sem)) {
6951 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6952 __func__);
6953 return;
6954 }
6955
6956 wm_gmii_i82544_writereg(self, phy, reg, val);
6957
6958 wm_put_swfw_semaphore(sc, sem);
6959 }
6960
6961 /*
6962 * wm_gmii_statchg: [mii interface function]
6963 *
6964 * Callback from MII layer when media changes.
6965 */
6966 static void
6967 wm_gmii_statchg(struct ifnet *ifp)
6968 {
6969 struct wm_softc *sc = ifp->if_softc;
6970 struct mii_data *mii = &sc->sc_mii;
6971
6972 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6973 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6974 sc->sc_fcrtl &= ~FCRTL_XONE;
6975
6976 /*
6977 * Get flow control negotiation result.
6978 */
6979 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6980 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6981 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6982 mii->mii_media_active &= ~IFM_ETH_FMASK;
6983 }
6984
6985 if (sc->sc_flowflags & IFM_FLOW) {
6986 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6987 sc->sc_ctrl |= CTRL_TFCE;
6988 sc->sc_fcrtl |= FCRTL_XONE;
6989 }
6990 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6991 sc->sc_ctrl |= CTRL_RFCE;
6992 }
6993
6994 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6995 DPRINTF(WM_DEBUG_LINK,
6996 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
6997 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6998 } else {
6999 DPRINTF(WM_DEBUG_LINK,
7000 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7001 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7002 }
7003
7004 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7005 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7006 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7007 : WMREG_FCRTL, sc->sc_fcrtl);
7008 if (sc->sc_type == WM_T_80003) {
7009 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7010 case IFM_1000_T:
7011 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7012 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7013 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7014 break;
7015 default:
7016 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7017 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7018 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7019 break;
7020 }
7021 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7022 }
7023 }
7024
7025 /*
7026 * wm_kmrn_readreg:
7027 *
7028 * Read a kumeran register
7029 */
7030 static int
7031 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7032 {
7033 int rv;
7034
7035 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7036 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7037 aprint_error_dev(sc->sc_dev,
7038 "%s: failed to get semaphore\n", __func__);
7039 return 0;
7040 }
7041 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7042 if (wm_get_swfwhw_semaphore(sc)) {
7043 aprint_error_dev(sc->sc_dev,
7044 "%s: failed to get semaphore\n", __func__);
7045 return 0;
7046 }
7047 }
7048
7049 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7050 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7051 KUMCTRLSTA_REN);
7052 CSR_WRITE_FLUSH(sc);
7053 delay(2);
7054
7055 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7056
7057 if (sc->sc_flags == WM_F_LOCK_SWFW)
7058 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7059 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7060 wm_put_swfwhw_semaphore(sc);
7061
7062 return rv;
7063 }
7064
7065 /*
7066 * wm_kmrn_writereg:
7067 *
7068 * Write a kumeran register
7069 */
7070 static void
7071 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7072 {
7073
7074 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7075 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7076 aprint_error_dev(sc->sc_dev,
7077 "%s: failed to get semaphore\n", __func__);
7078 return;
7079 }
7080 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7081 if (wm_get_swfwhw_semaphore(sc)) {
7082 aprint_error_dev(sc->sc_dev,
7083 "%s: failed to get semaphore\n", __func__);
7084 return;
7085 }
7086 }
7087
7088 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7089 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7090 (val & KUMCTRLSTA_MASK));
7091
7092 if (sc->sc_flags == WM_F_LOCK_SWFW)
7093 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7094 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7095 wm_put_swfwhw_semaphore(sc);
7096 }
7097
7098 /* SGMII related */
7099
7100 /*
7101 * wm_sgmii_uses_mdio
7102 *
7103 * Check whether the transaction is to the internal PHY or the external
7104 * MDIO interface. Return true if it's MDIO.
7105 */
7106 static bool
7107 wm_sgmii_uses_mdio(struct wm_softc *sc)
7108 {
7109 uint32_t reg;
7110 bool ismdio = false;
7111
7112 switch (sc->sc_type) {
7113 case WM_T_82575:
7114 case WM_T_82576:
7115 reg = CSR_READ(sc, WMREG_MDIC);
7116 ismdio = ((reg & MDIC_DEST) != 0);
7117 break;
7118 case WM_T_82580:
7119 case WM_T_82580ER:
7120 case WM_T_I350:
7121 case WM_T_I354:
7122 case WM_T_I210:
7123 case WM_T_I211:
7124 reg = CSR_READ(sc, WMREG_MDICNFG);
7125 ismdio = ((reg & MDICNFG_DEST) != 0);
7126 break;
7127 default:
7128 break;
7129 }
7130
7131 return ismdio;
7132 }
7133
7134 /*
7135 * wm_sgmii_readreg: [mii interface function]
7136 *
7137 * Read a PHY register on the SGMII
7138 * This could be handled by the PHY layer if we didn't have to lock the
7139 * ressource ...
7140 */
7141 static int
7142 wm_sgmii_readreg(device_t self, int phy, int reg)
7143 {
7144 struct wm_softc *sc = device_private(self);
7145 uint32_t i2ccmd;
7146 int i, rv;
7147
7148 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7149 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7150 __func__);
7151 return 0;
7152 }
7153
7154 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7155 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7156 | I2CCMD_OPCODE_READ;
7157 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7158
7159 /* Poll the ready bit */
7160 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7161 delay(50);
7162 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7163 if (i2ccmd & I2CCMD_READY)
7164 break;
7165 }
7166 if ((i2ccmd & I2CCMD_READY) == 0)
7167 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7168 if ((i2ccmd & I2CCMD_ERROR) != 0)
7169 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7170
7171 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7172
7173 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7174 return rv;
7175 }
7176
7177 /*
7178 * wm_sgmii_writereg: [mii interface function]
7179 *
7180 * Write a PHY register on the SGMII.
7181 * This could be handled by the PHY layer if we didn't have to lock the
7182 * ressource ...
7183 */
7184 static void
7185 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7186 {
7187 struct wm_softc *sc = device_private(self);
7188 uint32_t i2ccmd;
7189 int i;
7190
7191 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7192 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7193 __func__);
7194 return;
7195 }
7196
7197 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7198 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7199 | I2CCMD_OPCODE_WRITE;
7200 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7201
7202 /* Poll the ready bit */
7203 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7204 delay(50);
7205 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7206 if (i2ccmd & I2CCMD_READY)
7207 break;
7208 }
7209 if ((i2ccmd & I2CCMD_READY) == 0)
7210 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7211 if ((i2ccmd & I2CCMD_ERROR) != 0)
7212 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7213
7214 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7215 }
7216
7217 /* TBI related */
7218
7219 /* XXX Currently TBI only */
7220 static int
7221 wm_check_for_link(struct wm_softc *sc)
7222 {
7223 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7224 uint32_t rxcw;
7225 uint32_t ctrl;
7226 uint32_t status;
7227 uint32_t sig;
7228
7229 if (sc->sc_mediatype & WMP_F_SERDES) {
7230 sc->sc_tbi_linkup = 1;
7231 return 0;
7232 }
7233
7234 rxcw = CSR_READ(sc, WMREG_RXCW);
7235 ctrl = CSR_READ(sc, WMREG_CTRL);
7236 status = CSR_READ(sc, WMREG_STATUS);
7237
7238 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7239
7240 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7241 device_xname(sc->sc_dev), __func__,
7242 ((ctrl & CTRL_SWDPIN(1)) == sig),
7243 ((status & STATUS_LU) != 0),
7244 ((rxcw & RXCW_C) != 0)
7245 ));
7246
7247 /*
7248 * SWDPIN LU RXCW
7249 * 0 0 0
7250 * 0 0 1 (should not happen)
7251 * 0 1 0 (should not happen)
7252 * 0 1 1 (should not happen)
7253 * 1 0 0 Disable autonego and force linkup
7254 * 1 0 1 got /C/ but not linkup yet
7255 * 1 1 0 (linkup)
7256 * 1 1 1 If IFM_AUTO, back to autonego
7257 *
7258 */
7259 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7260 && ((status & STATUS_LU) == 0)
7261 && ((rxcw & RXCW_C) == 0)) {
7262 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7263 __func__));
7264 sc->sc_tbi_linkup = 0;
7265 /* Disable auto-negotiation in the TXCW register */
7266 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7267
7268 /*
7269 * Force link-up and also force full-duplex.
7270 *
7271 * NOTE: CTRL was updated TFCE and RFCE automatically,
7272 * so we should update sc->sc_ctrl
7273 */
7274 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7275 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7276 } else if (((status & STATUS_LU) != 0)
7277 && ((rxcw & RXCW_C) != 0)
7278 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7279 sc->sc_tbi_linkup = 1;
7280 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7281 __func__));
7282 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7283 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7284 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7285 && ((rxcw & RXCW_C) != 0)) {
7286 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7287 } else {
7288 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7289 status));
7290 }
7291
7292 return 0;
7293 }
7294
7295 /*
7296 * wm_tbi_mediainit:
7297 *
7298 * Initialize media for use on 1000BASE-X devices.
7299 */
7300 static void
7301 wm_tbi_mediainit(struct wm_softc *sc)
7302 {
7303 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7304 const char *sep = "";
7305
7306 if (sc->sc_type < WM_T_82543)
7307 sc->sc_tipg = TIPG_WM_DFLT;
7308 else
7309 sc->sc_tipg = TIPG_LG_DFLT;
7310
7311 sc->sc_tbi_anegticks = 5;
7312
7313 /* Initialize our media structures */
7314 sc->sc_mii.mii_ifp = ifp;
7315
7316 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7317 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7318 wm_tbi_mediastatus);
7319
7320 /*
7321 * SWD Pins:
7322 *
7323 * 0 = Link LED (output)
7324 * 1 = Loss Of Signal (input)
7325 */
7326 sc->sc_ctrl |= CTRL_SWDPIO(0);
7327 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7328 if (sc->sc_mediatype & WMP_F_SERDES)
7329 sc->sc_ctrl &= ~CTRL_LRST;
7330
7331 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7332
7333 #define ADD(ss, mm, dd) \
7334 do { \
7335 aprint_normal("%s%s", sep, ss); \
7336 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7337 sep = ", "; \
7338 } while (/*CONSTCOND*/0)
7339
7340 aprint_normal_dev(sc->sc_dev, "");
7341
7342 /* Only 82545 is LX */
7343 if (sc->sc_type == WM_T_82545) {
7344 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7345 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7346 } else {
7347 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7348 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7349 }
7350 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7351 aprint_normal("\n");
7352
7353 #undef ADD
7354
7355 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7356 }
7357
7358 /*
7359 * wm_tbi_mediastatus: [ifmedia interface function]
7360 *
7361 * Get the current interface media status on a 1000BASE-X device.
7362 */
7363 static void
7364 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7365 {
7366 struct wm_softc *sc = ifp->if_softc;
7367 uint32_t ctrl, status;
7368
7369 ifmr->ifm_status = IFM_AVALID;
7370 ifmr->ifm_active = IFM_ETHER;
7371
7372 status = CSR_READ(sc, WMREG_STATUS);
7373 if ((status & STATUS_LU) == 0) {
7374 ifmr->ifm_active |= IFM_NONE;
7375 return;
7376 }
7377
7378 ifmr->ifm_status |= IFM_ACTIVE;
7379 /* Only 82545 is LX */
7380 if (sc->sc_type == WM_T_82545)
7381 ifmr->ifm_active |= IFM_1000_LX;
7382 else
7383 ifmr->ifm_active |= IFM_1000_SX;
7384 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7385 ifmr->ifm_active |= IFM_FDX;
7386 else
7387 ifmr->ifm_active |= IFM_HDX;
7388 ctrl = CSR_READ(sc, WMREG_CTRL);
7389 if (ctrl & CTRL_RFCE)
7390 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7391 if (ctrl & CTRL_TFCE)
7392 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7393 }
7394
7395 /*
7396 * wm_tbi_mediachange: [ifmedia interface function]
7397 *
7398 * Set hardware to newly-selected media on a 1000BASE-X device.
7399 */
7400 static int
7401 wm_tbi_mediachange(struct ifnet *ifp)
7402 {
7403 struct wm_softc *sc = ifp->if_softc;
7404 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7405 uint32_t status;
7406 int i;
7407
7408 if (sc->sc_mediatype & WMP_F_SERDES)
7409 return 0;
7410
7411 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7412 || (sc->sc_type >= WM_T_82575))
7413 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7414
7415 /* XXX power_up_serdes_link_82575() */
7416
7417 sc->sc_ctrl &= ~CTRL_LRST;
7418 sc->sc_txcw = TXCW_ANE;
7419 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7420 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7421 else if (ife->ifm_media & IFM_FDX)
7422 sc->sc_txcw |= TXCW_FD;
7423 else
7424 sc->sc_txcw |= TXCW_HD;
7425
7426 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7427 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7428
7429 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7430 device_xname(sc->sc_dev), sc->sc_txcw));
7431 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7432 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7433 CSR_WRITE_FLUSH(sc);
7434 delay(1000);
7435
7436 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7437 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7438
7439 /*
7440 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7441 * optics detect a signal, 0 if they don't.
7442 */
7443 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7444 /* Have signal; wait for the link to come up. */
7445 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7446 delay(10000);
7447 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7448 break;
7449 }
7450
7451 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7452 device_xname(sc->sc_dev),i));
7453
7454 status = CSR_READ(sc, WMREG_STATUS);
7455 DPRINTF(WM_DEBUG_LINK,
7456 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7457 device_xname(sc->sc_dev),status, STATUS_LU));
7458 if (status & STATUS_LU) {
7459 /* Link is up. */
7460 DPRINTF(WM_DEBUG_LINK,
7461 ("%s: LINK: set media -> link up %s\n",
7462 device_xname(sc->sc_dev),
7463 (status & STATUS_FD) ? "FDX" : "HDX"));
7464
7465 /*
7466 * NOTE: CTRL will update TFCE and RFCE automatically,
7467 * so we should update sc->sc_ctrl
7468 */
7469 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7470 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7471 sc->sc_fcrtl &= ~FCRTL_XONE;
7472 if (status & STATUS_FD)
7473 sc->sc_tctl |=
7474 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7475 else
7476 sc->sc_tctl |=
7477 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7478 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7479 sc->sc_fcrtl |= FCRTL_XONE;
7480 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7481 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7482 WMREG_OLD_FCRTL : WMREG_FCRTL,
7483 sc->sc_fcrtl);
7484 sc->sc_tbi_linkup = 1;
7485 } else {
7486 if (i == WM_LINKUP_TIMEOUT)
7487 wm_check_for_link(sc);
7488 /* Link is down. */
7489 DPRINTF(WM_DEBUG_LINK,
7490 ("%s: LINK: set media -> link down\n",
7491 device_xname(sc->sc_dev)));
7492 sc->sc_tbi_linkup = 0;
7493 }
7494 } else {
7495 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7496 device_xname(sc->sc_dev)));
7497 sc->sc_tbi_linkup = 0;
7498 }
7499
7500 wm_tbi_set_linkled(sc);
7501
7502 return 0;
7503 }
7504
7505 /*
7506 * wm_tbi_set_linkled:
7507 *
7508 * Update the link LED on 1000BASE-X devices.
7509 */
7510 static void
7511 wm_tbi_set_linkled(struct wm_softc *sc)
7512 {
7513
7514 if (sc->sc_tbi_linkup)
7515 sc->sc_ctrl |= CTRL_SWDPIN(0);
7516 else
7517 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7518
7519 /* 82540 or newer devices are active low */
7520 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7521
7522 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7523 }
7524
7525 /*
7526 * wm_tbi_check_link:
7527 *
7528 * Check the link on 1000BASE-X devices.
7529 */
7530 static void
7531 wm_tbi_check_link(struct wm_softc *sc)
7532 {
7533 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7534 uint32_t status;
7535
7536 KASSERT(WM_TX_LOCKED(sc));
7537
7538 if (sc->sc_mediatype & WMP_F_SERDES) {
7539 sc->sc_tbi_linkup = 1;
7540 return;
7541 }
7542
7543 status = CSR_READ(sc, WMREG_STATUS);
7544
7545 /* XXX is this needed? */
7546 (void)CSR_READ(sc, WMREG_RXCW);
7547 (void)CSR_READ(sc, WMREG_CTRL);
7548
7549 /* set link status */
7550 if ((status & STATUS_LU) == 0) {
7551 DPRINTF(WM_DEBUG_LINK,
7552 ("%s: LINK: checklink -> down\n",
7553 device_xname(sc->sc_dev)));
7554 sc->sc_tbi_linkup = 0;
7555 } else if (sc->sc_tbi_linkup == 0) {
7556 DPRINTF(WM_DEBUG_LINK,
7557 ("%s: LINK: checklink -> up %s\n",
7558 device_xname(sc->sc_dev),
7559 (status & STATUS_FD) ? "FDX" : "HDX"));
7560 sc->sc_tbi_linkup = 1;
7561 }
7562
7563 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7564 && ((status & STATUS_LU) == 0)) {
7565 sc->sc_tbi_linkup = 0;
7566 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7567 /* If the timer expired, retry autonegotiation */
7568 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7569 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7570 sc->sc_tbi_ticks = 0;
7571 /*
7572 * Reset the link, and let autonegotiation do
7573 * its thing
7574 */
7575 sc->sc_ctrl |= CTRL_LRST;
7576 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7577 CSR_WRITE_FLUSH(sc);
7578 delay(1000);
7579 sc->sc_ctrl &= ~CTRL_LRST;
7580 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7581 CSR_WRITE_FLUSH(sc);
7582 delay(1000);
7583 CSR_WRITE(sc, WMREG_TXCW,
7584 sc->sc_txcw & ~TXCW_ANE);
7585 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7586 }
7587 }
7588 }
7589
7590 wm_tbi_set_linkled(sc);
7591 }
7592
7593 /* SFP related */
7594
7595 static int
7596 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7597 {
7598 uint32_t i2ccmd;
7599 int i;
7600
7601 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7602 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7603
7604 /* Poll the ready bit */
7605 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7606 delay(50);
7607 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7608 if (i2ccmd & I2CCMD_READY)
7609 break;
7610 }
7611 if ((i2ccmd & I2CCMD_READY) == 0)
7612 return -1;
7613 if ((i2ccmd & I2CCMD_ERROR) != 0)
7614 return -1;
7615
7616 *data = i2ccmd & 0x00ff;
7617
7618 return 0;
7619 }
7620
7621 static uint32_t
7622 wm_sfp_get_media_type(struct wm_softc *sc)
7623 {
7624 uint32_t ctrl_ext;
7625 uint8_t val = 0;
7626 int timeout = 3;
7627 uint32_t mediatype = WMP_F_UNKNOWN;
7628 int rv = -1;
7629
7630 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7631 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7632 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7633 CSR_WRITE_FLUSH(sc);
7634
7635 /* Read SFP module data */
7636 while (timeout) {
7637 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7638 if (rv == 0)
7639 break;
7640 delay(100*1000); /* XXX too big */
7641 timeout--;
7642 }
7643 if (rv != 0)
7644 goto out;
7645 switch (val) {
7646 case SFF_SFP_ID_SFF:
7647 aprint_normal_dev(sc->sc_dev,
7648 "Module/Connector soldered to board\n");
7649 break;
7650 case SFF_SFP_ID_SFP:
7651 aprint_normal_dev(sc->sc_dev, "SFP\n");
7652 break;
7653 case SFF_SFP_ID_UNKNOWN:
7654 goto out;
7655 default:
7656 break;
7657 }
7658
7659 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7660 if (rv != 0) {
7661 goto out;
7662 }
7663
7664 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7665 mediatype = WMP_F_SERDES;
7666 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7667 sc->sc_flags |= WM_F_SGMII;
7668 mediatype = WMP_F_COPPER;
7669 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7670 sc->sc_flags |= WM_F_SGMII;
7671 mediatype = WMP_F_SERDES;
7672 }
7673
7674 out:
7675 /* Restore I2C interface setting */
7676 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7677
7678 return mediatype;
7679 }
7680 /*
7681 * NVM related.
7682 * Microwire, SPI (w/wo EERD) and Flash.
7683 */
7684
7685 /* Both spi and uwire */
7686
7687 /*
7688 * wm_eeprom_sendbits:
7689 *
7690 * Send a series of bits to the EEPROM.
7691 */
7692 static void
7693 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7694 {
7695 uint32_t reg;
7696 int x;
7697
7698 reg = CSR_READ(sc, WMREG_EECD);
7699
7700 for (x = nbits; x > 0; x--) {
7701 if (bits & (1U << (x - 1)))
7702 reg |= EECD_DI;
7703 else
7704 reg &= ~EECD_DI;
7705 CSR_WRITE(sc, WMREG_EECD, reg);
7706 CSR_WRITE_FLUSH(sc);
7707 delay(2);
7708 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7709 CSR_WRITE_FLUSH(sc);
7710 delay(2);
7711 CSR_WRITE(sc, WMREG_EECD, reg);
7712 CSR_WRITE_FLUSH(sc);
7713 delay(2);
7714 }
7715 }
7716
7717 /*
7718 * wm_eeprom_recvbits:
7719 *
7720 * Receive a series of bits from the EEPROM.
7721 */
7722 static void
7723 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7724 {
7725 uint32_t reg, val;
7726 int x;
7727
7728 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7729
7730 val = 0;
7731 for (x = nbits; x > 0; x--) {
7732 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7733 CSR_WRITE_FLUSH(sc);
7734 delay(2);
7735 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7736 val |= (1U << (x - 1));
7737 CSR_WRITE(sc, WMREG_EECD, reg);
7738 CSR_WRITE_FLUSH(sc);
7739 delay(2);
7740 }
7741 *valp = val;
7742 }
7743
7744 /* Microwire */
7745
7746 /*
7747 * wm_nvm_read_uwire:
7748 *
7749 * Read a word from the EEPROM using the MicroWire protocol.
7750 */
7751 static int
7752 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7753 {
7754 uint32_t reg, val;
7755 int i;
7756
7757 for (i = 0; i < wordcnt; i++) {
7758 /* Clear SK and DI. */
7759 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7760 CSR_WRITE(sc, WMREG_EECD, reg);
7761
7762 /*
7763 * XXX: workaround for a bug in qemu-0.12.x and prior
7764 * and Xen.
7765 *
7766 * We use this workaround only for 82540 because qemu's
7767 * e1000 act as 82540.
7768 */
7769 if (sc->sc_type == WM_T_82540) {
7770 reg |= EECD_SK;
7771 CSR_WRITE(sc, WMREG_EECD, reg);
7772 reg &= ~EECD_SK;
7773 CSR_WRITE(sc, WMREG_EECD, reg);
7774 CSR_WRITE_FLUSH(sc);
7775 delay(2);
7776 }
7777 /* XXX: end of workaround */
7778
7779 /* Set CHIP SELECT. */
7780 reg |= EECD_CS;
7781 CSR_WRITE(sc, WMREG_EECD, reg);
7782 CSR_WRITE_FLUSH(sc);
7783 delay(2);
7784
7785 /* Shift in the READ command. */
7786 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7787
7788 /* Shift in address. */
7789 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
7790
7791 /* Shift out the data. */
7792 wm_eeprom_recvbits(sc, &val, 16);
7793 data[i] = val & 0xffff;
7794
7795 /* Clear CHIP SELECT. */
7796 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7797 CSR_WRITE(sc, WMREG_EECD, reg);
7798 CSR_WRITE_FLUSH(sc);
7799 delay(2);
7800 }
7801
7802 return 0;
7803 }
7804
7805 /* SPI */
7806
7807 /*
7808 * Set SPI and FLASH related information from the EECD register.
7809 * For 82541 and 82547, the word size is taken from EEPROM.
7810 */
7811 static int
7812 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
7813 {
7814 int size;
7815 uint32_t reg;
7816 uint16_t data;
7817
7818 reg = CSR_READ(sc, WMREG_EECD);
7819 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7820
7821 /* Read the size of NVM from EECD by default */
7822 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7823 switch (sc->sc_type) {
7824 case WM_T_82541:
7825 case WM_T_82541_2:
7826 case WM_T_82547:
7827 case WM_T_82547_2:
7828 /* Set dummy value to access EEPROM */
7829 sc->sc_nvm_wordsize = 64;
7830 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
7831 reg = data;
7832 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7833 if (size == 0)
7834 size = 6; /* 64 word size */
7835 else
7836 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
7837 break;
7838 case WM_T_80003:
7839 case WM_T_82571:
7840 case WM_T_82572:
7841 case WM_T_82573: /* SPI case */
7842 case WM_T_82574: /* SPI case */
7843 case WM_T_82583: /* SPI case */
7844 size += NVM_WORD_SIZE_BASE_SHIFT;
7845 if (size > 14)
7846 size = 14;
7847 break;
7848 case WM_T_82575:
7849 case WM_T_82576:
7850 case WM_T_82580:
7851 case WM_T_82580ER:
7852 case WM_T_I350:
7853 case WM_T_I354:
7854 case WM_T_I210:
7855 case WM_T_I211:
7856 size += NVM_WORD_SIZE_BASE_SHIFT;
7857 if (size > 15)
7858 size = 15;
7859 break;
7860 default:
7861 aprint_error_dev(sc->sc_dev,
7862 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
7863 return -1;
7864 break;
7865 }
7866
7867 sc->sc_nvm_wordsize = 1 << size;
7868
7869 return 0;
7870 }
7871
7872 /*
7873 * wm_nvm_ready_spi:
7874 *
7875 * Wait for a SPI EEPROM to be ready for commands.
7876 */
7877 static int
7878 wm_nvm_ready_spi(struct wm_softc *sc)
7879 {
7880 uint32_t val;
7881 int usec;
7882
7883 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7884 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7885 wm_eeprom_recvbits(sc, &val, 8);
7886 if ((val & SPI_SR_RDY) == 0)
7887 break;
7888 }
7889 if (usec >= SPI_MAX_RETRIES) {
7890 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7891 return 1;
7892 }
7893 return 0;
7894 }
7895
7896 /*
7897 * wm_nvm_read_spi:
7898 *
7899 * Read a work from the EEPROM using the SPI protocol.
7900 */
7901 static int
7902 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7903 {
7904 uint32_t reg, val;
7905 int i;
7906 uint8_t opc;
7907
7908 /* Clear SK and CS. */
7909 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7910 CSR_WRITE(sc, WMREG_EECD, reg);
7911 CSR_WRITE_FLUSH(sc);
7912 delay(2);
7913
7914 if (wm_nvm_ready_spi(sc))
7915 return 1;
7916
7917 /* Toggle CS to flush commands. */
7918 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7919 CSR_WRITE_FLUSH(sc);
7920 delay(2);
7921 CSR_WRITE(sc, WMREG_EECD, reg);
7922 CSR_WRITE_FLUSH(sc);
7923 delay(2);
7924
7925 opc = SPI_OPC_READ;
7926 if (sc->sc_nvm_addrbits == 8 && word >= 128)
7927 opc |= SPI_OPC_A8;
7928
7929 wm_eeprom_sendbits(sc, opc, 8);
7930 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
7931
7932 for (i = 0; i < wordcnt; i++) {
7933 wm_eeprom_recvbits(sc, &val, 16);
7934 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7935 }
7936
7937 /* Raise CS and clear SK. */
7938 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7939 CSR_WRITE(sc, WMREG_EECD, reg);
7940 CSR_WRITE_FLUSH(sc);
7941 delay(2);
7942
7943 return 0;
7944 }
7945
7946 /* Using with EERD */
7947
7948 static int
7949 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7950 {
7951 uint32_t attempts = 100000;
7952 uint32_t i, reg = 0;
7953 int32_t done = -1;
7954
7955 for (i = 0; i < attempts; i++) {
7956 reg = CSR_READ(sc, rw);
7957
7958 if (reg & EERD_DONE) {
7959 done = 0;
7960 break;
7961 }
7962 delay(5);
7963 }
7964
7965 return done;
7966 }
7967
7968 static int
7969 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7970 uint16_t *data)
7971 {
7972 int i, eerd = 0;
7973 int error = 0;
7974
7975 for (i = 0; i < wordcnt; i++) {
7976 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7977
7978 CSR_WRITE(sc, WMREG_EERD, eerd);
7979 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
7980 if (error != 0)
7981 break;
7982
7983 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
7984 }
7985
7986 return error;
7987 }
7988
7989 /* Flash */
7990
7991 static int
7992 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7993 {
7994 uint32_t eecd;
7995 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7996 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7997 uint8_t sig_byte = 0;
7998
7999 switch (sc->sc_type) {
8000 case WM_T_ICH8:
8001 case WM_T_ICH9:
8002 eecd = CSR_READ(sc, WMREG_EECD);
8003 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8004 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8005 return 0;
8006 }
8007 /* FALLTHROUGH */
8008 default:
8009 /* Default to 0 */
8010 *bank = 0;
8011
8012 /* Check bank 0 */
8013 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8014 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8015 *bank = 0;
8016 return 0;
8017 }
8018
8019 /* Check bank 1 */
8020 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8021 &sig_byte);
8022 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8023 *bank = 1;
8024 return 0;
8025 }
8026 }
8027
8028 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8029 device_xname(sc->sc_dev)));
8030 return -1;
8031 }
8032
8033 /******************************************************************************
8034 * This function does initial flash setup so that a new read/write/erase cycle
8035 * can be started.
8036 *
8037 * sc - The pointer to the hw structure
8038 ****************************************************************************/
8039 static int32_t
8040 wm_ich8_cycle_init(struct wm_softc *sc)
8041 {
8042 uint16_t hsfsts;
8043 int32_t error = 1;
8044 int32_t i = 0;
8045
8046 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8047
8048 /* May be check the Flash Des Valid bit in Hw status */
8049 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8050 return error;
8051 }
8052
8053 /* Clear FCERR in Hw status by writing 1 */
8054 /* Clear DAEL in Hw status by writing a 1 */
8055 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8056
8057 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8058
8059 /*
8060 * Either we should have a hardware SPI cycle in progress bit to check
8061 * against, in order to start a new cycle or FDONE bit should be
8062 * changed in the hardware so that it is 1 after harware reset, which
8063 * can then be used as an indication whether a cycle is in progress or
8064 * has been completed .. we should also have some software semaphore
8065 * mechanism to guard FDONE or the cycle in progress bit so that two
8066 * threads access to those bits can be sequentiallized or a way so that
8067 * 2 threads dont start the cycle at the same time
8068 */
8069
8070 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8071 /*
8072 * There is no cycle running at present, so we can start a
8073 * cycle
8074 */
8075
8076 /* Begin by setting Flash Cycle Done. */
8077 hsfsts |= HSFSTS_DONE;
8078 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8079 error = 0;
8080 } else {
8081 /*
8082 * otherwise poll for sometime so the current cycle has a
8083 * chance to end before giving up.
8084 */
8085 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8086 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8087 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8088 error = 0;
8089 break;
8090 }
8091 delay(1);
8092 }
8093 if (error == 0) {
8094 /*
8095 * Successful in waiting for previous cycle to timeout,
8096 * now set the Flash Cycle Done.
8097 */
8098 hsfsts |= HSFSTS_DONE;
8099 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8100 }
8101 }
8102 return error;
8103 }
8104
8105 /******************************************************************************
8106 * This function starts a flash cycle and waits for its completion
8107 *
8108 * sc - The pointer to the hw structure
8109 ****************************************************************************/
8110 static int32_t
8111 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8112 {
8113 uint16_t hsflctl;
8114 uint16_t hsfsts;
8115 int32_t error = 1;
8116 uint32_t i = 0;
8117
8118 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8119 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8120 hsflctl |= HSFCTL_GO;
8121 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8122
8123 /* Wait till FDONE bit is set to 1 */
8124 do {
8125 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8126 if (hsfsts & HSFSTS_DONE)
8127 break;
8128 delay(1);
8129 i++;
8130 } while (i < timeout);
8131 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8132 error = 0;
8133
8134 return error;
8135 }
8136
8137 /******************************************************************************
8138 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8139 *
8140 * sc - The pointer to the hw structure
8141 * index - The index of the byte or word to read.
8142 * size - Size of data to read, 1=byte 2=word
8143 * data - Pointer to the word to store the value read.
8144 *****************************************************************************/
8145 static int32_t
8146 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8147 uint32_t size, uint16_t *data)
8148 {
8149 uint16_t hsfsts;
8150 uint16_t hsflctl;
8151 uint32_t flash_linear_address;
8152 uint32_t flash_data = 0;
8153 int32_t error = 1;
8154 int32_t count = 0;
8155
8156 if (size < 1 || size > 2 || data == 0x0 ||
8157 index > ICH_FLASH_LINEAR_ADDR_MASK)
8158 return error;
8159
8160 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8161 sc->sc_ich8_flash_base;
8162
8163 do {
8164 delay(1);
8165 /* Steps */
8166 error = wm_ich8_cycle_init(sc);
8167 if (error)
8168 break;
8169
8170 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8171 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8172 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8173 & HSFCTL_BCOUNT_MASK;
8174 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8175 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8176
8177 /*
8178 * Write the last 24 bits of index into Flash Linear address
8179 * field in Flash Address
8180 */
8181 /* TODO: TBD maybe check the index against the size of flash */
8182
8183 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8184
8185 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8186
8187 /*
8188 * Check if FCERR is set to 1, if set to 1, clear it and try
8189 * the whole sequence a few more times, else read in (shift in)
8190 * the Flash Data0, the order is least significant byte first
8191 * msb to lsb
8192 */
8193 if (error == 0) {
8194 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8195 if (size == 1)
8196 *data = (uint8_t)(flash_data & 0x000000FF);
8197 else if (size == 2)
8198 *data = (uint16_t)(flash_data & 0x0000FFFF);
8199 break;
8200 } else {
8201 /*
8202 * If we've gotten here, then things are probably
8203 * completely hosed, but if the error condition is
8204 * detected, it won't hurt to give it another try...
8205 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8206 */
8207 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8208 if (hsfsts & HSFSTS_ERR) {
8209 /* Repeat for some time before giving up. */
8210 continue;
8211 } else if ((hsfsts & HSFSTS_DONE) == 0)
8212 break;
8213 }
8214 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8215
8216 return error;
8217 }
8218
8219 /******************************************************************************
8220 * Reads a single byte from the NVM using the ICH8 flash access registers.
8221 *
8222 * sc - pointer to wm_hw structure
8223 * index - The index of the byte to read.
8224 * data - Pointer to a byte to store the value read.
8225 *****************************************************************************/
8226 static int32_t
8227 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8228 {
8229 int32_t status;
8230 uint16_t word = 0;
8231
8232 status = wm_read_ich8_data(sc, index, 1, &word);
8233 if (status == 0)
8234 *data = (uint8_t)word;
8235 else
8236 *data = 0;
8237
8238 return status;
8239 }
8240
8241 /******************************************************************************
8242 * Reads a word from the NVM using the ICH8 flash access registers.
8243 *
8244 * sc - pointer to wm_hw structure
8245 * index - The starting byte index of the word to read.
8246 * data - Pointer to a word to store the value read.
8247 *****************************************************************************/
8248 static int32_t
8249 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8250 {
8251 int32_t status;
8252
8253 status = wm_read_ich8_data(sc, index, 2, data);
8254 return status;
8255 }
8256
8257 /******************************************************************************
8258 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8259 * register.
8260 *
8261 * sc - Struct containing variables accessed by shared code
8262 * offset - offset of word in the EEPROM to read
8263 * data - word read from the EEPROM
8264 * words - number of words to read
8265 *****************************************************************************/
8266 static int
8267 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8268 {
8269 int32_t error = 0;
8270 uint32_t flash_bank = 0;
8271 uint32_t act_offset = 0;
8272 uint32_t bank_offset = 0;
8273 uint16_t word = 0;
8274 uint16_t i = 0;
8275
8276 /*
8277 * We need to know which is the valid flash bank. In the event
8278 * that we didn't allocate eeprom_shadow_ram, we may not be
8279 * managing flash_bank. So it cannot be trusted and needs
8280 * to be updated with each read.
8281 */
8282 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8283 if (error) {
8284 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8285 device_xname(sc->sc_dev)));
8286 flash_bank = 0;
8287 }
8288
8289 /*
8290 * Adjust offset appropriately if we're on bank 1 - adjust for word
8291 * size
8292 */
8293 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8294
8295 error = wm_get_swfwhw_semaphore(sc);
8296 if (error) {
8297 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8298 __func__);
8299 return error;
8300 }
8301
8302 for (i = 0; i < words; i++) {
8303 /* The NVM part needs a byte offset, hence * 2 */
8304 act_offset = bank_offset + ((offset + i) * 2);
8305 error = wm_read_ich8_word(sc, act_offset, &word);
8306 if (error) {
8307 aprint_error_dev(sc->sc_dev,
8308 "%s: failed to read NVM\n", __func__);
8309 break;
8310 }
8311 data[i] = word;
8312 }
8313
8314 wm_put_swfwhw_semaphore(sc);
8315 return error;
8316 }
8317
8318 /* Lock, detecting NVM type, validate checksum and read */
8319
8320 /*
8321 * wm_nvm_acquire:
8322 *
8323 * Perform the EEPROM handshake required on some chips.
8324 */
8325 static int
8326 wm_nvm_acquire(struct wm_softc *sc)
8327 {
8328 uint32_t reg;
8329 int x;
8330 int ret = 0;
8331
8332 /* always success */
8333 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8334 return 0;
8335
8336 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8337 ret = wm_get_swfwhw_semaphore(sc);
8338 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8339 /* This will also do wm_get_swsm_semaphore() if needed */
8340 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8341 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8342 ret = wm_get_swsm_semaphore(sc);
8343 }
8344
8345 if (ret) {
8346 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8347 __func__);
8348 return 1;
8349 }
8350
8351 if (sc->sc_flags & WM_F_LOCK_EECD) {
8352 reg = CSR_READ(sc, WMREG_EECD);
8353
8354 /* Request EEPROM access. */
8355 reg |= EECD_EE_REQ;
8356 CSR_WRITE(sc, WMREG_EECD, reg);
8357
8358 /* ..and wait for it to be granted. */
8359 for (x = 0; x < 1000; x++) {
8360 reg = CSR_READ(sc, WMREG_EECD);
8361 if (reg & EECD_EE_GNT)
8362 break;
8363 delay(5);
8364 }
8365 if ((reg & EECD_EE_GNT) == 0) {
8366 aprint_error_dev(sc->sc_dev,
8367 "could not acquire EEPROM GNT\n");
8368 reg &= ~EECD_EE_REQ;
8369 CSR_WRITE(sc, WMREG_EECD, reg);
8370 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8371 wm_put_swfwhw_semaphore(sc);
8372 if (sc->sc_flags & WM_F_LOCK_SWFW)
8373 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8374 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8375 wm_put_swsm_semaphore(sc);
8376 return 1;
8377 }
8378 }
8379
8380 return 0;
8381 }
8382
8383 /*
8384 * wm_nvm_release:
8385 *
8386 * Release the EEPROM mutex.
8387 */
8388 static void
8389 wm_nvm_release(struct wm_softc *sc)
8390 {
8391 uint32_t reg;
8392
8393 /* always success */
8394 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8395 return;
8396
8397 if (sc->sc_flags & WM_F_LOCK_EECD) {
8398 reg = CSR_READ(sc, WMREG_EECD);
8399 reg &= ~EECD_EE_REQ;
8400 CSR_WRITE(sc, WMREG_EECD, reg);
8401 }
8402
8403 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8404 wm_put_swfwhw_semaphore(sc);
8405 if (sc->sc_flags & WM_F_LOCK_SWFW)
8406 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8407 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8408 wm_put_swsm_semaphore(sc);
8409 }
8410
8411 static int
8412 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8413 {
8414 uint32_t eecd = 0;
8415
8416 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8417 || sc->sc_type == WM_T_82583) {
8418 eecd = CSR_READ(sc, WMREG_EECD);
8419
8420 /* Isolate bits 15 & 16 */
8421 eecd = ((eecd >> 15) & 0x03);
8422
8423 /* If both bits are set, device is Flash type */
8424 if (eecd == 0x03)
8425 return 0;
8426 }
8427 return 1;
8428 }
8429
8430 /*
8431 * wm_nvm_validate_checksum
8432 *
8433 * The checksum is defined as the sum of the first 64 (16 bit) words.
8434 */
8435 static int
8436 wm_nvm_validate_checksum(struct wm_softc *sc)
8437 {
8438 uint16_t checksum;
8439 uint16_t eeprom_data;
8440 #ifdef WM_DEBUG
8441 uint16_t csum_wordaddr, valid_checksum;
8442 #endif
8443 int i;
8444
8445 checksum = 0;
8446
8447 /* Don't check for I211 */
8448 if (sc->sc_type == WM_T_I211)
8449 return 0;
8450
8451 #ifdef WM_DEBUG
8452 if (sc->sc_type == WM_T_PCH_LPT) {
8453 csum_wordaddr = NVM_OFF_COMPAT;
8454 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8455 } else {
8456 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8457 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8458 }
8459
8460 /* Dump EEPROM image for debug */
8461 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8462 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8463 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8464 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8465 if ((eeprom_data & valid_checksum) == 0) {
8466 DPRINTF(WM_DEBUG_NVM,
8467 ("%s: NVM need to be updated (%04x != %04x)\n",
8468 device_xname(sc->sc_dev), eeprom_data,
8469 valid_checksum));
8470 }
8471 }
8472
8473 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8474 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8475 for (i = 0; i < NVM_SIZE; i++) {
8476 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8477 printf("XX ");
8478 else
8479 printf("%04x ", eeprom_data);
8480 if (i % 8 == 7)
8481 printf("\n");
8482 }
8483 }
8484
8485 #endif /* WM_DEBUG */
8486
8487 for (i = 0; i < NVM_SIZE; i++) {
8488 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8489 return 1;
8490 checksum += eeprom_data;
8491 }
8492
8493 if (checksum != (uint16_t) NVM_CHECKSUM) {
8494 #ifdef WM_DEBUG
8495 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8496 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8497 #endif
8498 }
8499
8500 return 0;
8501 }
8502
8503 /*
8504 * wm_nvm_read:
8505 *
8506 * Read data from the serial EEPROM.
8507 */
8508 static int
8509 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8510 {
8511 int rv;
8512
8513 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8514 return 1;
8515
8516 if (wm_nvm_acquire(sc))
8517 return 1;
8518
8519 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8520 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8521 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8522 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8523 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8524 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8525 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8526 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8527 else
8528 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8529
8530 wm_nvm_release(sc);
8531 return rv;
8532 }
8533
8534 /*
8535 * Hardware semaphores.
8536 * Very complexed...
8537 */
8538
8539 static int
8540 wm_get_swsm_semaphore(struct wm_softc *sc)
8541 {
8542 int32_t timeout;
8543 uint32_t swsm;
8544
8545 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8546 /* Get the SW semaphore. */
8547 timeout = sc->sc_nvm_wordsize + 1;
8548 while (timeout) {
8549 swsm = CSR_READ(sc, WMREG_SWSM);
8550
8551 if ((swsm & SWSM_SMBI) == 0)
8552 break;
8553
8554 delay(50);
8555 timeout--;
8556 }
8557
8558 if (timeout == 0) {
8559 aprint_error_dev(sc->sc_dev,
8560 "could not acquire SWSM SMBI\n");
8561 return 1;
8562 }
8563 }
8564
8565 /* Get the FW semaphore. */
8566 timeout = sc->sc_nvm_wordsize + 1;
8567 while (timeout) {
8568 swsm = CSR_READ(sc, WMREG_SWSM);
8569 swsm |= SWSM_SWESMBI;
8570 CSR_WRITE(sc, WMREG_SWSM, swsm);
8571 /* If we managed to set the bit we got the semaphore. */
8572 swsm = CSR_READ(sc, WMREG_SWSM);
8573 if (swsm & SWSM_SWESMBI)
8574 break;
8575
8576 delay(50);
8577 timeout--;
8578 }
8579
8580 if (timeout == 0) {
8581 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8582 /* Release semaphores */
8583 wm_put_swsm_semaphore(sc);
8584 return 1;
8585 }
8586 return 0;
8587 }
8588
8589 static void
8590 wm_put_swsm_semaphore(struct wm_softc *sc)
8591 {
8592 uint32_t swsm;
8593
8594 swsm = CSR_READ(sc, WMREG_SWSM);
8595 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8596 CSR_WRITE(sc, WMREG_SWSM, swsm);
8597 }
8598
8599 static int
8600 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8601 {
8602 uint32_t swfw_sync;
8603 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8604 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8605 int timeout = 200;
8606
8607 for (timeout = 0; timeout < 200; timeout++) {
8608 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8609 if (wm_get_swsm_semaphore(sc)) {
8610 aprint_error_dev(sc->sc_dev,
8611 "%s: failed to get semaphore\n",
8612 __func__);
8613 return 1;
8614 }
8615 }
8616 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8617 if ((swfw_sync & (swmask | fwmask)) == 0) {
8618 swfw_sync |= swmask;
8619 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8620 if (sc->sc_flags & WM_F_LOCK_SWSM)
8621 wm_put_swsm_semaphore(sc);
8622 return 0;
8623 }
8624 if (sc->sc_flags & WM_F_LOCK_SWSM)
8625 wm_put_swsm_semaphore(sc);
8626 delay(5000);
8627 }
8628 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8629 device_xname(sc->sc_dev), mask, swfw_sync);
8630 return 1;
8631 }
8632
8633 static void
8634 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8635 {
8636 uint32_t swfw_sync;
8637
8638 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8639 while (wm_get_swsm_semaphore(sc) != 0)
8640 continue;
8641 }
8642 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8643 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8644 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8645 if (sc->sc_flags & WM_F_LOCK_SWSM)
8646 wm_put_swsm_semaphore(sc);
8647 }
8648
8649 static int
8650 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8651 {
8652 uint32_t ext_ctrl;
8653 int timeout = 200;
8654
8655 for (timeout = 0; timeout < 200; timeout++) {
8656 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8657 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8658 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8659
8660 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8661 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8662 return 0;
8663 delay(5000);
8664 }
8665 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8666 device_xname(sc->sc_dev), ext_ctrl);
8667 return 1;
8668 }
8669
8670 static void
8671 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8672 {
8673 uint32_t ext_ctrl;
8674 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8675 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8676 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8677 }
8678
8679 static int
8680 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8681 {
8682 int i = 0;
8683 uint32_t reg;
8684
8685 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8686 do {
8687 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8688 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8689 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8690 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8691 break;
8692 delay(2*1000);
8693 i++;
8694 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8695
8696 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8697 wm_put_hw_semaphore_82573(sc);
8698 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8699 device_xname(sc->sc_dev));
8700 return -1;
8701 }
8702
8703 return 0;
8704 }
8705
8706 static void
8707 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8708 {
8709 uint32_t reg;
8710
8711 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8712 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8713 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8714 }
8715
8716 /*
8717 * Management mode and power management related subroutines.
8718 * BMC, AMT, suspend/resume and EEE.
8719 */
8720
8721 static int
8722 wm_check_mng_mode(struct wm_softc *sc)
8723 {
8724 int rv;
8725
8726 switch (sc->sc_type) {
8727 case WM_T_ICH8:
8728 case WM_T_ICH9:
8729 case WM_T_ICH10:
8730 case WM_T_PCH:
8731 case WM_T_PCH2:
8732 case WM_T_PCH_LPT:
8733 rv = wm_check_mng_mode_ich8lan(sc);
8734 break;
8735 case WM_T_82574:
8736 case WM_T_82583:
8737 rv = wm_check_mng_mode_82574(sc);
8738 break;
8739 case WM_T_82571:
8740 case WM_T_82572:
8741 case WM_T_82573:
8742 case WM_T_80003:
8743 rv = wm_check_mng_mode_generic(sc);
8744 break;
8745 default:
8746 /* noting to do */
8747 rv = 0;
8748 break;
8749 }
8750
8751 return rv;
8752 }
8753
8754 static int
8755 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8756 {
8757 uint32_t fwsm;
8758
8759 fwsm = CSR_READ(sc, WMREG_FWSM);
8760
8761 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8762 return 1;
8763
8764 return 0;
8765 }
8766
8767 static int
8768 wm_check_mng_mode_82574(struct wm_softc *sc)
8769 {
8770 uint16_t data;
8771
8772 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8773
8774 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8775 return 1;
8776
8777 return 0;
8778 }
8779
8780 static int
8781 wm_check_mng_mode_generic(struct wm_softc *sc)
8782 {
8783 uint32_t fwsm;
8784
8785 fwsm = CSR_READ(sc, WMREG_FWSM);
8786
8787 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8788 return 1;
8789
8790 return 0;
8791 }
8792
8793 static int
8794 wm_enable_mng_pass_thru(struct wm_softc *sc)
8795 {
8796 uint32_t manc, fwsm, factps;
8797
8798 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8799 return 0;
8800
8801 manc = CSR_READ(sc, WMREG_MANC);
8802
8803 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8804 device_xname(sc->sc_dev), manc));
8805 if ((manc & MANC_RECV_TCO_EN) == 0)
8806 return 0;
8807
8808 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8809 fwsm = CSR_READ(sc, WMREG_FWSM);
8810 factps = CSR_READ(sc, WMREG_FACTPS);
8811 if (((factps & FACTPS_MNGCG) == 0)
8812 && ((fwsm & FWSM_MODE_MASK)
8813 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8814 return 1;
8815 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8816 uint16_t data;
8817
8818 factps = CSR_READ(sc, WMREG_FACTPS);
8819 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8820 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8821 device_xname(sc->sc_dev), factps, data));
8822 if (((factps & FACTPS_MNGCG) == 0)
8823 && ((data & NVM_CFG2_MNGM_MASK)
8824 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
8825 return 1;
8826 } else if (((manc & MANC_SMBUS_EN) != 0)
8827 && ((manc & MANC_ASF_EN) == 0))
8828 return 1;
8829
8830 return 0;
8831 }
8832
8833 static int
8834 wm_check_reset_block(struct wm_softc *sc)
8835 {
8836 uint32_t reg;
8837
8838 switch (sc->sc_type) {
8839 case WM_T_ICH8:
8840 case WM_T_ICH9:
8841 case WM_T_ICH10:
8842 case WM_T_PCH:
8843 case WM_T_PCH2:
8844 case WM_T_PCH_LPT:
8845 reg = CSR_READ(sc, WMREG_FWSM);
8846 if ((reg & FWSM_RSPCIPHY) != 0)
8847 return 0;
8848 else
8849 return -1;
8850 break;
8851 case WM_T_82571:
8852 case WM_T_82572:
8853 case WM_T_82573:
8854 case WM_T_82574:
8855 case WM_T_82583:
8856 case WM_T_80003:
8857 reg = CSR_READ(sc, WMREG_MANC);
8858 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8859 return -1;
8860 else
8861 return 0;
8862 break;
8863 default:
8864 /* no problem */
8865 break;
8866 }
8867
8868 return 0;
8869 }
8870
8871 static void
8872 wm_get_hw_control(struct wm_softc *sc)
8873 {
8874 uint32_t reg;
8875
8876 switch (sc->sc_type) {
8877 case WM_T_82573:
8878 reg = CSR_READ(sc, WMREG_SWSM);
8879 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8880 break;
8881 case WM_T_82571:
8882 case WM_T_82572:
8883 case WM_T_82574:
8884 case WM_T_82583:
8885 case WM_T_80003:
8886 case WM_T_ICH8:
8887 case WM_T_ICH9:
8888 case WM_T_ICH10:
8889 case WM_T_PCH:
8890 case WM_T_PCH2:
8891 case WM_T_PCH_LPT:
8892 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8893 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8894 break;
8895 default:
8896 break;
8897 }
8898 }
8899
8900 static void
8901 wm_release_hw_control(struct wm_softc *sc)
8902 {
8903 uint32_t reg;
8904
8905 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8906 return;
8907
8908 if (sc->sc_type == WM_T_82573) {
8909 reg = CSR_READ(sc, WMREG_SWSM);
8910 reg &= ~SWSM_DRV_LOAD;
8911 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8912 } else {
8913 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8914 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8915 }
8916 }
8917
8918 static void
8919 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8920 {
8921 uint32_t reg;
8922
8923 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8924
8925 if (on != 0)
8926 reg |= EXTCNFCTR_GATE_PHY_CFG;
8927 else
8928 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8929
8930 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8931 }
8932
8933 static void
8934 wm_smbustopci(struct wm_softc *sc)
8935 {
8936 uint32_t fwsm;
8937
8938 fwsm = CSR_READ(sc, WMREG_FWSM);
8939 if (((fwsm & FWSM_FW_VALID) == 0)
8940 && ((wm_check_reset_block(sc) == 0))) {
8941 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8942 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8943 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8944 CSR_WRITE_FLUSH(sc);
8945 delay(10);
8946 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8947 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8948 CSR_WRITE_FLUSH(sc);
8949 delay(50*1000);
8950
8951 /*
8952 * Gate automatic PHY configuration by hardware on non-managed
8953 * 82579
8954 */
8955 if (sc->sc_type == WM_T_PCH2)
8956 wm_gate_hw_phy_config_ich8lan(sc, 1);
8957 }
8958 }
8959
8960 static void
8961 wm_init_manageability(struct wm_softc *sc)
8962 {
8963
8964 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8965 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8966 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8967
8968 /* Disable hardware interception of ARP */
8969 manc &= ~MANC_ARP_EN;
8970
8971 /* Enable receiving management packets to the host */
8972 if (sc->sc_type >= WM_T_82571) {
8973 manc |= MANC_EN_MNG2HOST;
8974 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8975 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8976
8977 }
8978
8979 CSR_WRITE(sc, WMREG_MANC, manc);
8980 }
8981 }
8982
8983 static void
8984 wm_release_manageability(struct wm_softc *sc)
8985 {
8986
8987 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8988 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8989
8990 manc |= MANC_ARP_EN;
8991 if (sc->sc_type >= WM_T_82571)
8992 manc &= ~MANC_EN_MNG2HOST;
8993
8994 CSR_WRITE(sc, WMREG_MANC, manc);
8995 }
8996 }
8997
8998 static void
8999 wm_get_wakeup(struct wm_softc *sc)
9000 {
9001
9002 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9003 switch (sc->sc_type) {
9004 case WM_T_82573:
9005 case WM_T_82583:
9006 sc->sc_flags |= WM_F_HAS_AMT;
9007 /* FALLTHROUGH */
9008 case WM_T_80003:
9009 case WM_T_82541:
9010 case WM_T_82547:
9011 case WM_T_82571:
9012 case WM_T_82572:
9013 case WM_T_82574:
9014 case WM_T_82575:
9015 case WM_T_82576:
9016 case WM_T_82580:
9017 case WM_T_82580ER:
9018 case WM_T_I350:
9019 case WM_T_I354:
9020 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9021 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9022 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9023 break;
9024 case WM_T_ICH8:
9025 case WM_T_ICH9:
9026 case WM_T_ICH10:
9027 case WM_T_PCH:
9028 case WM_T_PCH2:
9029 case WM_T_PCH_LPT:
9030 sc->sc_flags |= WM_F_HAS_AMT;
9031 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9032 break;
9033 default:
9034 break;
9035 }
9036
9037 /* 1: HAS_MANAGE */
9038 if (wm_enable_mng_pass_thru(sc) != 0)
9039 sc->sc_flags |= WM_F_HAS_MANAGE;
9040
9041 #ifdef WM_DEBUG
9042 printf("\n");
9043 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9044 printf("HAS_AMT,");
9045 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9046 printf("ARC_SUBSYS_VALID,");
9047 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9048 printf("ASF_FIRMWARE_PRES,");
9049 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9050 printf("HAS_MANAGE,");
9051 printf("\n");
9052 #endif
9053 /*
9054 * Note that the WOL flags is set after the resetting of the eeprom
9055 * stuff
9056 */
9057 }
9058
9059 #ifdef WM_WOL
9060 /* WOL in the newer chipset interfaces (pchlan) */
9061 static void
9062 wm_enable_phy_wakeup(struct wm_softc *sc)
9063 {
9064 #if 0
9065 uint16_t preg;
9066
9067 /* Copy MAC RARs to PHY RARs */
9068
9069 /* Copy MAC MTA to PHY MTA */
9070
9071 /* Configure PHY Rx Control register */
9072
9073 /* Enable PHY wakeup in MAC register */
9074
9075 /* Configure and enable PHY wakeup in PHY registers */
9076
9077 /* Activate PHY wakeup */
9078
9079 /* XXX */
9080 #endif
9081 }
9082
9083 /* Power down workaround on D3 */
9084 static void
9085 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9086 {
9087 uint32_t reg;
9088 int i;
9089
9090 for (i = 0; i < 2; i++) {
9091 /* Disable link */
9092 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9093 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9094 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9095
9096 /*
9097 * Call gig speed drop workaround on Gig disable before
9098 * accessing any PHY registers
9099 */
9100 if (sc->sc_type == WM_T_ICH8)
9101 wm_gig_downshift_workaround_ich8lan(sc);
9102
9103 /* Write VR power-down enable */
9104 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9105 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9106 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9107 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9108
9109 /* Read it back and test */
9110 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9111 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9112 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9113 break;
9114
9115 /* Issue PHY reset and repeat at most one more time */
9116 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9117 }
9118 }
9119
9120 static void
9121 wm_enable_wakeup(struct wm_softc *sc)
9122 {
9123 uint32_t reg, pmreg;
9124 pcireg_t pmode;
9125
9126 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9127 &pmreg, NULL) == 0)
9128 return;
9129
9130 /* Advertise the wakeup capability */
9131 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9132 | CTRL_SWDPIN(3));
9133 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9134
9135 /* ICH workaround */
9136 switch (sc->sc_type) {
9137 case WM_T_ICH8:
9138 case WM_T_ICH9:
9139 case WM_T_ICH10:
9140 case WM_T_PCH:
9141 case WM_T_PCH2:
9142 case WM_T_PCH_LPT:
9143 /* Disable gig during WOL */
9144 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9145 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9146 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9147 if (sc->sc_type == WM_T_PCH)
9148 wm_gmii_reset(sc);
9149
9150 /* Power down workaround */
9151 if (sc->sc_phytype == WMPHY_82577) {
9152 struct mii_softc *child;
9153
9154 /* Assume that the PHY is copper */
9155 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9156 if (child->mii_mpd_rev <= 2)
9157 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9158 (768 << 5) | 25, 0x0444); /* magic num */
9159 }
9160 break;
9161 default:
9162 break;
9163 }
9164
9165 /* Keep the laser running on fiber adapters */
9166 if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
9167 || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
9168 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9169 reg |= CTRL_EXT_SWDPIN(3);
9170 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9171 }
9172
9173 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9174 #if 0 /* for the multicast packet */
9175 reg |= WUFC_MC;
9176 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9177 #endif
9178
9179 if (sc->sc_type == WM_T_PCH) {
9180 wm_enable_phy_wakeup(sc);
9181 } else {
9182 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9183 CSR_WRITE(sc, WMREG_WUFC, reg);
9184 }
9185
9186 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9187 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9188 || (sc->sc_type == WM_T_PCH2))
9189 && (sc->sc_phytype == WMPHY_IGP_3))
9190 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9191
9192 /* Request PME */
9193 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9194 #if 0
9195 /* Disable WOL */
9196 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9197 #else
9198 /* For WOL */
9199 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9200 #endif
9201 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9202 }
9203 #endif /* WM_WOL */
9204
9205 /* EEE */
9206
9207 static void
9208 wm_set_eee_i350(struct wm_softc *sc)
9209 {
9210 uint32_t ipcnfg, eeer;
9211
9212 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9213 eeer = CSR_READ(sc, WMREG_EEER);
9214
9215 if ((sc->sc_flags & WM_F_EEE) != 0) {
9216 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9217 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9218 | EEER_LPI_FC);
9219 } else {
9220 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9221 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9222 | EEER_LPI_FC);
9223 }
9224
9225 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9226 CSR_WRITE(sc, WMREG_EEER, eeer);
9227 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9228 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9229 }
9230
9231 /*
9232 * Workarounds (mainly PHY related).
9233 * Basically, PHY's workarounds are in the PHY drivers.
9234 */
9235
9236 /* Work-around for 82566 Kumeran PCS lock loss */
9237 static void
9238 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9239 {
9240 int miistatus, active, i;
9241 int reg;
9242
9243 miistatus = sc->sc_mii.mii_media_status;
9244
9245 /* If the link is not up, do nothing */
9246 if ((miistatus & IFM_ACTIVE) != 0)
9247 return;
9248
9249 active = sc->sc_mii.mii_media_active;
9250
9251 /* Nothing to do if the link is other than 1Gbps */
9252 if (IFM_SUBTYPE(active) != IFM_1000_T)
9253 return;
9254
9255 for (i = 0; i < 10; i++) {
9256 /* read twice */
9257 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9258 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9259 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9260 goto out; /* GOOD! */
9261
9262 /* Reset the PHY */
9263 wm_gmii_reset(sc);
9264 delay(5*1000);
9265 }
9266
9267 /* Disable GigE link negotiation */
9268 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9269 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9270 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9271
9272 /*
9273 * Call gig speed drop workaround on Gig disable before accessing
9274 * any PHY registers.
9275 */
9276 wm_gig_downshift_workaround_ich8lan(sc);
9277
9278 out:
9279 return;
9280 }
9281
9282 /* WOL from S5 stops working */
9283 static void
9284 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9285 {
9286 uint16_t kmrn_reg;
9287
9288 /* Only for igp3 */
9289 if (sc->sc_phytype == WMPHY_IGP_3) {
9290 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9291 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9292 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9293 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9294 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9295 }
9296 }
9297
9298 /*
9299 * Workaround for pch's PHYs
9300 * XXX should be moved to new PHY driver?
9301 */
9302 static void
9303 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9304 {
9305 if (sc->sc_phytype == WMPHY_82577)
9306 wm_set_mdio_slow_mode_hv(sc);
9307
9308 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9309
9310 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9311
9312 /* 82578 */
9313 if (sc->sc_phytype == WMPHY_82578) {
9314 /* PCH rev. < 3 */
9315 if (sc->sc_rev < 3) {
9316 /* XXX 6 bit shift? Why? Is it page2? */
9317 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9318 0x66c0);
9319 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9320 0xffff);
9321 }
9322
9323 /* XXX phy rev. < 2 */
9324 }
9325
9326 /* Select page 0 */
9327
9328 /* XXX acquire semaphore */
9329 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9330 /* XXX release semaphore */
9331
9332 /*
9333 * Configure the K1 Si workaround during phy reset assuming there is
9334 * link so that it disables K1 if link is in 1Gbps.
9335 */
9336 wm_k1_gig_workaround_hv(sc, 1);
9337 }
9338
9339 static void
9340 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9341 {
9342
9343 wm_set_mdio_slow_mode_hv(sc);
9344 }
9345
9346 static void
9347 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9348 {
9349 int k1_enable = sc->sc_nvm_k1_enabled;
9350
9351 /* XXX acquire semaphore */
9352
9353 if (link) {
9354 k1_enable = 0;
9355
9356 /* Link stall fix for link up */
9357 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9358 } else {
9359 /* Link stall fix for link down */
9360 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9361 }
9362
9363 wm_configure_k1_ich8lan(sc, k1_enable);
9364
9365 /* XXX release semaphore */
9366 }
9367
9368 static void
9369 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9370 {
9371 uint32_t reg;
9372
9373 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9374 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9375 reg | HV_KMRN_MDIO_SLOW);
9376 }
9377
9378 static void
9379 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9380 {
9381 uint32_t ctrl, ctrl_ext, tmp;
9382 uint16_t kmrn_reg;
9383
9384 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9385
9386 if (k1_enable)
9387 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9388 else
9389 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9390
9391 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9392
9393 delay(20);
9394
9395 ctrl = CSR_READ(sc, WMREG_CTRL);
9396 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9397
9398 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9399 tmp |= CTRL_FRCSPD;
9400
9401 CSR_WRITE(sc, WMREG_CTRL, tmp);
9402 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9403 CSR_WRITE_FLUSH(sc);
9404 delay(20);
9405
9406 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9407 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9408 CSR_WRITE_FLUSH(sc);
9409 delay(20);
9410 }
9411
9412 /* special case - for 82575 - need to do manual init ... */
9413 static void
9414 wm_reset_init_script_82575(struct wm_softc *sc)
9415 {
9416 /*
9417 * remark: this is untested code - we have no board without EEPROM
9418 * same setup as mentioned int the freeBSD driver for the i82575
9419 */
9420
9421 /* SerDes configuration via SERDESCTRL */
9422 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9423 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9424 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9425 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9426
9427 /* CCM configuration via CCMCTL register */
9428 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9429 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9430
9431 /* PCIe lanes configuration */
9432 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9433 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9434 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9435 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9436
9437 /* PCIe PLL Configuration */
9438 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9439 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9440 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9441 }
9442