if_wm.c revision 1.292 1 /* $NetBSD: if_wm.c,v 1.292 2014/08/28 16:22:59 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Read SFP ROM and set media type correctly on 82575 and newer devices
77 * - EEE (Energy Efficiency Ethernet)
78 * - MSI/MSI-X
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.292 2014/08/28 16:22:59 msaitoh Exp $");
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99
100 #include <sys/rnd.h>
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134
135 #ifdef WM_DEBUG
136 #define WM_DEBUG_LINK 0x01
137 #define WM_DEBUG_TX 0x02
138 #define WM_DEBUG_RX 0x04
139 #define WM_DEBUG_GMII 0x08
140 #define WM_DEBUG_MANAGE 0x10
141 #define WM_DEBUG_NVM 0x20
142 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
143 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
144
145 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
146 #else
147 #define DPRINTF(x, y) /* nothing */
148 #endif /* WM_DEBUG */
149
150 #ifdef NET_MPSAFE
151 #define WM_MPSAFE 1
152 #endif
153
154 /*
155 * Transmit descriptor list size. Due to errata, we can only have
156 * 256 hardware descriptors in the ring on < 82544, but we use 4096
157 * on >= 82544. We tell the upper layers that they can queue a lot
158 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
159 * of them at a time.
160 *
161 * We allow up to 256 (!) DMA segments per packet. Pathological packet
162 * chains containing many small mbufs have been observed in zero-copy
163 * situations with jumbo frames.
164 */
165 #define WM_NTXSEGS 256
166 #define WM_IFQUEUELEN 256
167 #define WM_TXQUEUELEN_MAX 64
168 #define WM_TXQUEUELEN_MAX_82547 16
169 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
170 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
171 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
172 #define WM_NTXDESC_82542 256
173 #define WM_NTXDESC_82544 4096
174 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
175 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
176 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
177 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
178 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
179
180 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
181
182 /*
183 * Receive descriptor list size. We have one Rx buffer for normal
184 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
185 * packet. We allocate 256 receive descriptors, each with a 2k
186 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
187 */
188 #define WM_NRXDESC 256
189 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
190 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
191 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
192
193 /*
194 * Control structures are DMA'd to the i82542 chip. We allocate them in
195 * a single clump that maps to a single DMA segment to make several things
196 * easier.
197 */
198 struct wm_control_data_82544 {
199 /*
200 * The receive descriptors.
201 */
202 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
203
204 /*
205 * The transmit descriptors. Put these at the end, because
206 * we might use a smaller number of them.
207 */
208 union {
209 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
210 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
211 } wdc_u;
212 };
213
214 struct wm_control_data_82542 {
215 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
216 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
217 };
218
219 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
220 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
221 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
222
223 /*
224 * Software state for transmit jobs.
225 */
226 struct wm_txsoft {
227 struct mbuf *txs_mbuf; /* head of our mbuf chain */
228 bus_dmamap_t txs_dmamap; /* our DMA map */
229 int txs_firstdesc; /* first descriptor in packet */
230 int txs_lastdesc; /* last descriptor in packet */
231 int txs_ndesc; /* # of descriptors used */
232 };
233
234 /*
235 * Software state for receive buffers. Each descriptor gets a
236 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
237 * more than one buffer, we chain them together.
238 */
239 struct wm_rxsoft {
240 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
241 bus_dmamap_t rxs_dmamap; /* our DMA map */
242 };
243
244 #define WM_LINKUP_TIMEOUT 50
245
246 static uint16_t swfwphysem[] = {
247 SWFW_PHY0_SM,
248 SWFW_PHY1_SM,
249 SWFW_PHY2_SM,
250 SWFW_PHY3_SM
251 };
252
253 /*
254 * Software state per device.
255 */
256 struct wm_softc {
257 device_t sc_dev; /* generic device information */
258 bus_space_tag_t sc_st; /* bus space tag */
259 bus_space_handle_t sc_sh; /* bus space handle */
260 bus_size_t sc_ss; /* bus space size */
261 bus_space_tag_t sc_iot; /* I/O space tag */
262 bus_space_handle_t sc_ioh; /* I/O space handle */
263 bus_size_t sc_ios; /* I/O space size */
264 bus_space_tag_t sc_flasht; /* flash registers space tag */
265 bus_space_handle_t sc_flashh; /* flash registers space handle */
266 bus_dma_tag_t sc_dmat; /* bus DMA tag */
267
268 struct ethercom sc_ethercom; /* ethernet common data */
269 struct mii_data sc_mii; /* MII/media information */
270
271 pci_chipset_tag_t sc_pc;
272 pcitag_t sc_pcitag;
273 int sc_bus_speed; /* PCI/PCIX bus speed */
274 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
275
276 wm_chip_type sc_type; /* MAC type */
277 int sc_rev; /* MAC revision */
278 wm_phy_type sc_phytype; /* PHY type */
279 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
280 int sc_funcid; /* unit number of the chip (0 to 3) */
281 int sc_flags; /* flags; see below */
282 int sc_if_flags; /* last if_flags */
283 int sc_flowflags; /* 802.3x flow control flags */
284 int sc_align_tweak;
285
286 void *sc_ih; /* interrupt cookie */
287 callout_t sc_tick_ch; /* tick callout */
288 bool sc_stopping;
289
290 int sc_ee_addrbits; /* EEPROM address bits */
291 int sc_ich8_flash_base;
292 int sc_ich8_flash_bank_size;
293 int sc_nvm_k1_enabled;
294
295 /* Software state for the transmit and receive descriptors. */
296 int sc_txnum; /* must be a power of two */
297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299
300 /* Control data structures. */
301 int sc_ntxdesc; /* must be a power of two */
302 struct wm_control_data_82544 *sc_control_data;
303 bus_dmamap_t sc_cddmamap; /* control data DMA map */
304 bus_dma_segment_t sc_cd_seg; /* control data segment */
305 int sc_cd_rseg; /* real number of control segment */
306 size_t sc_cd_size; /* control data size */
307 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
308 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
309 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
310 #define sc_rxdescs sc_control_data->wcd_rxdescs
311
312 #ifdef WM_EVENT_COUNTERS
313 /* Event counters. */
314 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
315 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
316 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
317 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
318 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
319 struct evcnt sc_ev_rxintr; /* Rx interrupts */
320 struct evcnt sc_ev_linkintr; /* Link interrupts */
321
322 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
323 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
324 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
325 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
326 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
327 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
328 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
329 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
330
331 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
332 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
333
334 struct evcnt sc_ev_tu; /* Tx underrun */
335
336 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
337 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
339 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
340 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
341 #endif /* WM_EVENT_COUNTERS */
342
343 bus_addr_t sc_tdt_reg; /* offset of TDT register */
344
345 int sc_txfree; /* number of free Tx descriptors */
346 int sc_txnext; /* next ready Tx descriptor */
347
348 int sc_txsfree; /* number of free Tx jobs */
349 int sc_txsnext; /* next free Tx job */
350 int sc_txsdirty; /* dirty Tx jobs */
351
352 /* These 5 variables are used only on the 82547. */
353 int sc_txfifo_size; /* Tx FIFO size */
354 int sc_txfifo_head; /* current head of FIFO */
355 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
356 int sc_txfifo_stall; /* Tx FIFO is stalled */
357 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
358
359 bus_addr_t sc_rdt_reg; /* offset of RDT register */
360
361 int sc_rxptr; /* next ready Rx descriptor/queue ent */
362 int sc_rxdiscard;
363 int sc_rxlen;
364 struct mbuf *sc_rxhead;
365 struct mbuf *sc_rxtail;
366 struct mbuf **sc_rxtailp;
367
368 uint32_t sc_ctrl; /* prototype CTRL register */
369 #if 0
370 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
371 #endif
372 uint32_t sc_icr; /* prototype interrupt bits */
373 uint32_t sc_itr; /* prototype intr throttling reg */
374 uint32_t sc_tctl; /* prototype TCTL register */
375 uint32_t sc_rctl; /* prototype RCTL register */
376 uint32_t sc_txcw; /* prototype TXCW register */
377 uint32_t sc_tipg; /* prototype TIPG register */
378 uint32_t sc_fcrtl; /* prototype FCRTL register */
379 uint32_t sc_pba; /* prototype PBA register */
380
381 int sc_tbi_linkup; /* TBI link status */
382 int sc_tbi_anegticks; /* autonegotiation ticks */
383 int sc_tbi_ticks; /* tbi ticks */
384
385 int sc_mchash_type; /* multicast filter offset */
386
387 krndsource_t rnd_source; /* random source */
388
389 kmutex_t *sc_tx_lock; /* lock for tx operations */
390 kmutex_t *sc_rx_lock; /* lock for rx operations */
391 };
392
393 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
394 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
395 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
396 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
397 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
398 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
399 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
400 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
401 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
402
403 #ifdef WM_MPSAFE
404 #define CALLOUT_FLAGS CALLOUT_MPSAFE
405 #else
406 #define CALLOUT_FLAGS 0
407 #endif
408
409 #define WM_RXCHAIN_RESET(sc) \
410 do { \
411 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
412 *(sc)->sc_rxtailp = NULL; \
413 (sc)->sc_rxlen = 0; \
414 } while (/*CONSTCOND*/0)
415
416 #define WM_RXCHAIN_LINK(sc, m) \
417 do { \
418 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
419 (sc)->sc_rxtailp = &(m)->m_next; \
420 } while (/*CONSTCOND*/0)
421
422 #ifdef WM_EVENT_COUNTERS
423 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
424 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
425 #else
426 #define WM_EVCNT_INCR(ev) /* nothing */
427 #define WM_EVCNT_ADD(ev, val) /* nothing */
428 #endif
429
430 #define CSR_READ(sc, reg) \
431 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define CSR_WRITE(sc, reg, val) \
433 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define CSR_WRITE_FLUSH(sc) \
435 (void) CSR_READ((sc), WMREG_STATUS)
436
437 #define ICH8_FLASH_READ32(sc, reg) \
438 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441
442 #define ICH8_FLASH_READ16(sc, reg) \
443 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446
447 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
449
450 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define WM_CDTXADDR_HI(sc, x) \
452 (sizeof(bus_addr_t) == 8 ? \
453 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454
455 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define WM_CDRXADDR_HI(sc, x) \
457 (sizeof(bus_addr_t) == 8 ? \
458 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459
460 #define WM_CDTXSYNC(sc, x, n, ops) \
461 do { \
462 int __x, __n; \
463 \
464 __x = (x); \
465 __n = (n); \
466 \
467 /* If it will wrap around, sync to the end of the ring. */ \
468 if ((__x + __n) > WM_NTXDESC(sc)) { \
469 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
470 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
471 (WM_NTXDESC(sc) - __x), (ops)); \
472 __n -= (WM_NTXDESC(sc) - __x); \
473 __x = 0; \
474 } \
475 \
476 /* Now sync whatever is left. */ \
477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
478 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
479 } while (/*CONSTCOND*/0)
480
481 #define WM_CDRXSYNC(sc, x, ops) \
482 do { \
483 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
484 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
485 } while (/*CONSTCOND*/0)
486
487 #define WM_INIT_RXDESC(sc, x) \
488 do { \
489 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
490 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
491 struct mbuf *__m = __rxs->rxs_mbuf; \
492 \
493 /* \
494 * Note: We scoot the packet forward 2 bytes in the buffer \
495 * so that the payload after the Ethernet header is aligned \
496 * to a 4-byte boundary. \
497 * \
498 * XXX BRAINDAMAGE ALERT! \
499 * The stupid chip uses the same size for every buffer, which \
500 * is set in the Receive Control register. We are using the 2K \
501 * size option, but what we REALLY want is (2K - 2)! For this \
502 * reason, we can't "scoot" packets longer than the standard \
503 * Ethernet MTU. On strict-alignment platforms, if the total \
504 * size exceeds (2K - 2) we set align_tweak to 0 and let \
505 * the upper layer copy the headers. \
506 */ \
507 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
508 \
509 wm_set_dma_addr(&__rxd->wrx_addr, \
510 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 __rxd->wrx_len = 0; \
512 __rxd->wrx_cksum = 0; \
513 __rxd->wrx_status = 0; \
514 __rxd->wrx_errors = 0; \
515 __rxd->wrx_special = 0; \
516 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 \
518 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
519 } while (/*CONSTCOND*/0)
520
521 /*
522 * Register read/write functions.
523 * Other than CSR_{READ|WRITE}().
524 */
525 #if 0
526 static inline uint32_t wm_io_read(struct wm_softc *, int);
527 #endif
528 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
529 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
530 uint32_t, uint32_t);
531 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
532
533 /*
534 * Device driver interface functions and commonly used functions.
535 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536 */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int wm_match(device_t, cfdata_t, void *);
539 static void wm_attach(device_t, device_t, void *);
540 static int wm_detach(device_t, int);
541 static bool wm_suspend(device_t, const pmf_qual_t *);
542 static bool wm_resume(device_t, const pmf_qual_t *);
543 static void wm_watchdog(struct ifnet *);
544 static void wm_tick(void *);
545 static int wm_ifflags_cb(struct ethercom *);
546 static int wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static int wm_check_alt_mac_addr(struct wm_softc *);
549 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
552 static void wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void wm_set_vlan(struct wm_softc *);
555 static void wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void wm_get_auto_rd_done(struct wm_softc *);
557 static void wm_lan_init_done(struct wm_softc *);
558 static void wm_get_cfg_done(struct wm_softc *);
559 static void wm_reset(struct wm_softc *);
560 static int wm_add_rxbuf(struct wm_softc *, int);
561 static void wm_rxdrain(struct wm_softc *);
562 static int wm_init(struct ifnet *);
563 static int wm_init_locked(struct ifnet *);
564 static void wm_stop(struct ifnet *, int);
565 static void wm_stop_locked(struct ifnet *, int);
566 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
567 uint32_t *, uint8_t *);
568 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
569 static void wm_82547_txfifo_stall(void *);
570 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
571 /* Start */
572 static void wm_start(struct ifnet *);
573 static void wm_start_locked(struct ifnet *);
574 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
575 uint32_t *, uint32_t *, bool *);
576 static void wm_nq_start(struct ifnet *);
577 static void wm_nq_start_locked(struct ifnet *);
578 /* Interrupt */
579 static void wm_txintr(struct wm_softc *);
580 static void wm_rxintr(struct wm_softc *);
581 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
582 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
583 static void wm_linkintr(struct wm_softc *, uint32_t);
584 static int wm_intr(void *);
585
586 /*
587 * Media related.
588 * GMII, SGMII, TBI, SERDES and SFP.
589 */
590 /* GMII related */
591 static void wm_gmii_reset(struct wm_softc *);
592 static int wm_get_phy_id_82575(struct wm_softc *);
593 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
594 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
595 static int wm_gmii_mediachange(struct ifnet *);
596 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
597 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
598 static int wm_gmii_i82543_readreg(device_t, int, int);
599 static void wm_gmii_i82543_writereg(device_t, int, int, int);
600 static int wm_gmii_i82544_readreg(device_t, int, int);
601 static void wm_gmii_i82544_writereg(device_t, int, int, int);
602 static int wm_gmii_i80003_readreg(device_t, int, int);
603 static void wm_gmii_i80003_writereg(device_t, int, int, int);
604 static int wm_gmii_bm_readreg(device_t, int, int);
605 static void wm_gmii_bm_writereg(device_t, int, int, int);
606 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
607 static int wm_gmii_hv_readreg(device_t, int, int);
608 static void wm_gmii_hv_writereg(device_t, int, int, int);
609 static int wm_gmii_82580_readreg(device_t, int, int);
610 static void wm_gmii_82580_writereg(device_t, int, int, int);
611 static void wm_gmii_statchg(struct ifnet *);
612 static int wm_kmrn_readreg(struct wm_softc *, int);
613 static void wm_kmrn_writereg(struct wm_softc *, int, int);
614 /* SGMII */
615 static bool wm_sgmii_uses_mdio(struct wm_softc *);
616 static int wm_sgmii_readreg(device_t, int, int);
617 static void wm_sgmii_writereg(device_t, int, int, int);
618 /* TBI related */
619 static int wm_check_for_link(struct wm_softc *);
620 static void wm_tbi_mediainit(struct wm_softc *);
621 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
622 static int wm_tbi_mediachange(struct ifnet *);
623 static void wm_tbi_set_linkled(struct wm_softc *);
624 static void wm_tbi_check_link(struct wm_softc *);
625 /* SFP related */
626 static uint32_t wm_get_sfp_media_type(struct wm_softc *);
627
628 /*
629 * NVM related.
630 * Microwire, SPI (w/wo EERD) and Flash.
631 */
632 /* Both spi and uwire */
633 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
634 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
635 /* Microwire */
636 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
637 /* SPI */
638 static void wm_set_spiaddrbits(struct wm_softc *);
639 static int wm_nvm_ready_spi(struct wm_softc *);
640 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
641 /* Using with EERD */
642 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
643 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
644 /* Flash */
645 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
646 unsigned int *);
647 static int32_t wm_ich8_cycle_init(struct wm_softc *);
648 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
649 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
650 uint16_t *);
651 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
652 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
653 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
654 /* Lock, detecting NVM type, validate checksum and read */
655 static int wm_nvm_acquire(struct wm_softc *);
656 static void wm_nvm_release(struct wm_softc *);
657 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
658 static int wm_nvm_validate_checksum(struct wm_softc *);
659 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
660
661 /*
662 * Hardware semaphores.
663 * Very complexed...
664 */
665 static int wm_get_swsm_semaphore(struct wm_softc *);
666 static void wm_put_swsm_semaphore(struct wm_softc *);
667 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
668 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
669 static int wm_get_swfwhw_semaphore(struct wm_softc *);
670 static void wm_put_swfwhw_semaphore(struct wm_softc *);
671 static int wm_get_hw_semaphore_82573(struct wm_softc *);
672 static void wm_put_hw_semaphore_82573(struct wm_softc *);
673
674 /*
675 * Management mode and power management related subroutines.
676 * BMC, AMT, suspend/resume and EEE.
677 */
678 static int wm_check_mng_mode(struct wm_softc *);
679 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
680 static int wm_check_mng_mode_82574(struct wm_softc *);
681 static int wm_check_mng_mode_generic(struct wm_softc *);
682 static int wm_enable_mng_pass_thru(struct wm_softc *);
683 static int wm_check_reset_block(struct wm_softc *);
684 static void wm_get_hw_control(struct wm_softc *);
685 static void wm_release_hw_control(struct wm_softc *);
686 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
687 static void wm_smbustopci(struct wm_softc *);
688 static void wm_init_manageability(struct wm_softc *);
689 static void wm_release_manageability(struct wm_softc *);
690 static void wm_get_wakeup(struct wm_softc *);
691 #ifdef WM_WOL
692 static void wm_enable_phy_wakeup(struct wm_softc *);
693 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
694 static void wm_enable_wakeup(struct wm_softc *);
695 #endif
696 /* EEE */
697 static void wm_set_eee_i350(struct wm_softc *);
698
699 /*
700 * Workarounds (mainly PHY related).
701 * Basically, PHY's workarounds are in the PHY drivers.
702 */
703 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
704 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
705 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
706 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
707 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
708 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
709 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
710 static void wm_reset_init_script_82575(struct wm_softc *);
711
712 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
713 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
714
715 /*
716 * Devices supported by this driver.
717 */
718 static const struct wm_product {
719 pci_vendor_id_t wmp_vendor;
720 pci_product_id_t wmp_product;
721 const char *wmp_name;
722 wm_chip_type wmp_type;
723 uint32_t wmp_flags;
724 #define WMP_F_UNKNOWN 0x00
725 #define WMP_F_FIBER 0x01
726 #define WMP_F_COPPER 0x02
727 #define WMP_F_SERDES 0x03 /* Internal SERDES */
728 #define WMP_MEDIATYPE(x) ((x) & 0x03)
729 } wm_products[] = {
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
731 "Intel i82542 1000BASE-X Ethernet",
732 WM_T_82542_2_1, WMP_F_FIBER },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
735 "Intel i82543GC 1000BASE-X Ethernet",
736 WM_T_82543, WMP_F_FIBER },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
739 "Intel i82543GC 1000BASE-T Ethernet",
740 WM_T_82543, WMP_F_COPPER },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
743 "Intel i82544EI 1000BASE-T Ethernet",
744 WM_T_82544, WMP_F_COPPER },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
747 "Intel i82544EI 1000BASE-X Ethernet",
748 WM_T_82544, WMP_F_FIBER },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
751 "Intel i82544GC 1000BASE-T Ethernet",
752 WM_T_82544, WMP_F_COPPER },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
755 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
756 WM_T_82544, WMP_F_COPPER },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
759 "Intel i82540EM 1000BASE-T Ethernet",
760 WM_T_82540, WMP_F_COPPER },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
763 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
764 WM_T_82540, WMP_F_COPPER },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
767 "Intel i82540EP 1000BASE-T Ethernet",
768 WM_T_82540, WMP_F_COPPER },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
771 "Intel i82540EP 1000BASE-T Ethernet",
772 WM_T_82540, WMP_F_COPPER },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
775 "Intel i82540EP 1000BASE-T Ethernet",
776 WM_T_82540, WMP_F_COPPER },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
779 "Intel i82545EM 1000BASE-T Ethernet",
780 WM_T_82545, WMP_F_COPPER },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
783 "Intel i82545GM 1000BASE-T Ethernet",
784 WM_T_82545_3, WMP_F_COPPER },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
787 "Intel i82545GM 1000BASE-X Ethernet",
788 WM_T_82545_3, WMP_F_FIBER },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
791 "Intel i82545GM Gigabit Ethernet (SERDES)",
792 WM_T_82545_3, WMP_F_SERDES },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
795 "Intel i82546EB 1000BASE-T Ethernet",
796 WM_T_82546, WMP_F_COPPER },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
799 "Intel i82546EB 1000BASE-T Ethernet",
800 WM_T_82546, WMP_F_COPPER },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
803 "Intel i82545EM 1000BASE-X Ethernet",
804 WM_T_82545, WMP_F_FIBER },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
807 "Intel i82546EB 1000BASE-X Ethernet",
808 WM_T_82546, WMP_F_FIBER },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
811 "Intel i82546GB 1000BASE-T Ethernet",
812 WM_T_82546_3, WMP_F_COPPER },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
815 "Intel i82546GB 1000BASE-X Ethernet",
816 WM_T_82546_3, WMP_F_FIBER },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
819 "Intel i82546GB Gigabit Ethernet (SERDES)",
820 WM_T_82546_3, WMP_F_SERDES },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
823 "i82546GB quad-port Gigabit Ethernet",
824 WM_T_82546_3, WMP_F_COPPER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
827 "i82546GB quad-port Gigabit Ethernet (KSP3)",
828 WM_T_82546_3, WMP_F_COPPER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
831 "Intel PRO/1000MT (82546GB)",
832 WM_T_82546_3, WMP_F_COPPER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
835 "Intel i82541EI 1000BASE-T Ethernet",
836 WM_T_82541, WMP_F_COPPER },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
839 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
840 WM_T_82541, WMP_F_COPPER },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
843 "Intel i82541EI Mobile 1000BASE-T Ethernet",
844 WM_T_82541, WMP_F_COPPER },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
847 "Intel i82541ER 1000BASE-T Ethernet",
848 WM_T_82541_2, WMP_F_COPPER },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
851 "Intel i82541GI 1000BASE-T Ethernet",
852 WM_T_82541_2, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
855 "Intel i82541GI Mobile 1000BASE-T Ethernet",
856 WM_T_82541_2, WMP_F_COPPER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
859 "Intel i82541PI 1000BASE-T Ethernet",
860 WM_T_82541_2, WMP_F_COPPER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
863 "Intel i82547EI 1000BASE-T Ethernet",
864 WM_T_82547, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
867 "Intel i82547EI Mobile 1000BASE-T Ethernet",
868 WM_T_82547, WMP_F_COPPER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
871 "Intel i82547GI 1000BASE-T Ethernet",
872 WM_T_82547_2, WMP_F_COPPER },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
875 "Intel PRO/1000 PT (82571EB)",
876 WM_T_82571, WMP_F_COPPER },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
879 "Intel PRO/1000 PF (82571EB)",
880 WM_T_82571, WMP_F_FIBER },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
883 "Intel PRO/1000 PB (82571EB)",
884 WM_T_82571, WMP_F_SERDES },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
887 "Intel PRO/1000 QT (82571EB)",
888 WM_T_82571, WMP_F_COPPER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
891 "Intel i82572EI 1000baseT Ethernet",
892 WM_T_82572, WMP_F_COPPER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
895 "Intel PRO/1000 PT Quad Port Server Adapter",
896 WM_T_82571, WMP_F_COPPER, },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
899 "Intel i82572EI 1000baseX Ethernet",
900 WM_T_82572, WMP_F_FIBER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
903 "Intel i82572EI Gigabit Ethernet (SERDES)",
904 WM_T_82572, WMP_F_SERDES },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
907 "Intel i82572EI 1000baseT Ethernet",
908 WM_T_82572, WMP_F_COPPER },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
911 "Intel i82573E",
912 WM_T_82573, WMP_F_COPPER },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
915 "Intel i82573E IAMT",
916 WM_T_82573, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
919 "Intel i82573L Gigabit Ethernet",
920 WM_T_82573, WMP_F_COPPER },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
923 "Intel i82574L",
924 WM_T_82574, WMP_F_COPPER },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
927 "Intel i82583V",
928 WM_T_82583, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
931 "i80003 dual 1000baseT Ethernet",
932 WM_T_80003, WMP_F_COPPER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
935 "i80003 dual 1000baseX Ethernet",
936 WM_T_80003, WMP_F_COPPER },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
939 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
940 WM_T_80003, WMP_F_SERDES },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
943 "Intel i80003 1000baseT Ethernet",
944 WM_T_80003, WMP_F_COPPER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
947 "Intel i80003 Gigabit Ethernet (SERDES)",
948 WM_T_80003, WMP_F_SERDES },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
951 "Intel i82801H (M_AMT) LAN Controller",
952 WM_T_ICH8, WMP_F_COPPER },
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
954 "Intel i82801H (AMT) LAN Controller",
955 WM_T_ICH8, WMP_F_COPPER },
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
957 "Intel i82801H LAN Controller",
958 WM_T_ICH8, WMP_F_COPPER },
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
960 "Intel i82801H (IFE) LAN Controller",
961 WM_T_ICH8, WMP_F_COPPER },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
963 "Intel i82801H (M) LAN Controller",
964 WM_T_ICH8, WMP_F_COPPER },
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
966 "Intel i82801H IFE (GT) LAN Controller",
967 WM_T_ICH8, WMP_F_COPPER },
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
969 "Intel i82801H IFE (G) LAN Controller",
970 WM_T_ICH8, WMP_F_COPPER },
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
972 "82801I (AMT) LAN Controller",
973 WM_T_ICH9, WMP_F_COPPER },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
975 "82801I LAN Controller",
976 WM_T_ICH9, WMP_F_COPPER },
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
978 "82801I (G) LAN Controller",
979 WM_T_ICH9, WMP_F_COPPER },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
981 "82801I (GT) LAN Controller",
982 WM_T_ICH9, WMP_F_COPPER },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
984 "82801I (C) LAN Controller",
985 WM_T_ICH9, WMP_F_COPPER },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
987 "82801I mobile LAN Controller",
988 WM_T_ICH9, WMP_F_COPPER },
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
990 "82801I mobile (V) LAN Controller",
991 WM_T_ICH9, WMP_F_COPPER },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
993 "82801I mobile (AMT) LAN Controller",
994 WM_T_ICH9, WMP_F_COPPER },
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
996 "82567LM-4 LAN Controller",
997 WM_T_ICH9, WMP_F_COPPER },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
999 "82567V-3 LAN Controller",
1000 WM_T_ICH9, WMP_F_COPPER },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1002 "82567LM-2 LAN Controller",
1003 WM_T_ICH10, WMP_F_COPPER },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1005 "82567LF-2 LAN Controller",
1006 WM_T_ICH10, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1008 "82567LM-3 LAN Controller",
1009 WM_T_ICH10, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1011 "82567LF-3 LAN Controller",
1012 WM_T_ICH10, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1014 "82567V-2 LAN Controller",
1015 WM_T_ICH10, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1017 "82567V-3? LAN Controller",
1018 WM_T_ICH10, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1020 "HANKSVILLE LAN Controller",
1021 WM_T_ICH10, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1023 "PCH LAN (82577LM) Controller",
1024 WM_T_PCH, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1026 "PCH LAN (82577LC) Controller",
1027 WM_T_PCH, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1029 "PCH LAN (82578DM) Controller",
1030 WM_T_PCH, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1032 "PCH LAN (82578DC) Controller",
1033 WM_T_PCH, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1035 "PCH2 LAN (82579LM) Controller",
1036 WM_T_PCH2, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1038 "PCH2 LAN (82579V) Controller",
1039 WM_T_PCH2, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1041 "82575EB dual-1000baseT Ethernet",
1042 WM_T_82575, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1044 "82575EB dual-1000baseX Ethernet (SERDES)",
1045 WM_T_82575, WMP_F_SERDES },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1047 "82575GB quad-1000baseT Ethernet",
1048 WM_T_82575, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1050 "82575GB quad-1000baseT Ethernet (PM)",
1051 WM_T_82575, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1053 "82576 1000BaseT Ethernet",
1054 WM_T_82576, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1056 "82576 1000BaseX Ethernet",
1057 WM_T_82576, WMP_F_FIBER },
1058
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1060 "82576 gigabit Ethernet (SERDES)",
1061 WM_T_82576, WMP_F_SERDES },
1062
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1064 "82576 quad-1000BaseT Ethernet",
1065 WM_T_82576, WMP_F_COPPER },
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1067 "82576 gigabit Ethernet",
1068 WM_T_82576, WMP_F_COPPER },
1069
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1071 "82576 gigabit Ethernet (SERDES)",
1072 WM_T_82576, WMP_F_SERDES },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1074 "82576 quad-gigabit Ethernet (SERDES)",
1075 WM_T_82576, WMP_F_SERDES },
1076
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1078 "82580 1000BaseT Ethernet",
1079 WM_T_82580, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1081 "82580 1000BaseX Ethernet",
1082 WM_T_82580, WMP_F_FIBER },
1083
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1085 "82580 1000BaseT Ethernet (SERDES)",
1086 WM_T_82580, WMP_F_SERDES },
1087
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1089 "82580 gigabit Ethernet (SGMII)",
1090 WM_T_82580, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1092 "82580 dual-1000BaseT Ethernet",
1093 WM_T_82580, WMP_F_COPPER },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1095 "82580 1000BaseT Ethernet",
1096 WM_T_82580ER, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1098 "82580 dual-1000BaseT Ethernet",
1099 WM_T_82580ER, WMP_F_COPPER },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1101 "82580 quad-1000BaseX Ethernet",
1102 WM_T_82580, WMP_F_FIBER },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1104 "I350 Gigabit Network Connection",
1105 WM_T_I350, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1107 "I350 Gigabit Fiber Network Connection",
1108 WM_T_I350, WMP_F_FIBER },
1109
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1111 "I350 Gigabit Backplane Connection",
1112 WM_T_I350, WMP_F_SERDES },
1113
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1115 "I350 Gigabit Connection",
1116 WM_T_I350, WMP_F_COPPER },
1117
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1119 "I354 Gigabit Connection",
1120 WM_T_I354, WMP_F_COPPER },
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1122 "I210-T1 Ethernet Server Adapter",
1123 WM_T_I210, WMP_F_COPPER },
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1125 "I210 Ethernet (Copper OEM)",
1126 WM_T_I210, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1128 "I210 Ethernet (Copper IT)",
1129 WM_T_I210, WMP_F_COPPER },
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1131 "I210 Gigabit Ethernet (Fiber)",
1132 WM_T_I210, WMP_F_FIBER },
1133
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1135 "I210 Gigabit Ethernet (SERDES)",
1136 WM_T_I210, WMP_F_SERDES },
1137
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1139 "I210 Gigabit Ethernet (SGMII)",
1140 WM_T_I210, WMP_F_COPPER },
1141
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1143 "I211 Ethernet (COPPER)",
1144 WM_T_I211, WMP_F_COPPER },
1145 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1146 "I217 V Ethernet Connection",
1147 WM_T_PCH_LPT, WMP_F_COPPER },
1148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1149 "I217 LM Ethernet Connection",
1150 WM_T_PCH_LPT, WMP_F_COPPER },
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1152 "I218 V Ethernet Connection",
1153 WM_T_PCH_LPT, WMP_F_COPPER },
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1155 "I218 LM Ethernet Connection",
1156 WM_T_PCH_LPT, WMP_F_COPPER },
1157 { 0, 0,
1158 NULL,
1159 0, 0 },
1160 };
1161
1162 #ifdef WM_EVENT_COUNTERS
1163 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1164 #endif /* WM_EVENT_COUNTERS */
1165
1166
1167 /*
1168 * Register read/write functions.
1169 * Other than CSR_{READ|WRITE}().
1170 */
1171
1172 #if 0 /* Not currently used */
1173 static inline uint32_t
1174 wm_io_read(struct wm_softc *sc, int reg)
1175 {
1176
1177 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1178 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1179 }
1180 #endif
1181
1182 static inline void
1183 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1184 {
1185
1186 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1187 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1188 }
1189
1190 static inline void
1191 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1192 uint32_t data)
1193 {
1194 uint32_t regval;
1195 int i;
1196
1197 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1198
1199 CSR_WRITE(sc, reg, regval);
1200
1201 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1202 delay(5);
1203 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1204 break;
1205 }
1206 if (i == SCTL_CTL_POLL_TIMEOUT) {
1207 aprint_error("%s: WARNING:"
1208 " i82575 reg 0x%08x setup did not indicate ready\n",
1209 device_xname(sc->sc_dev), reg);
1210 }
1211 }
1212
1213 static inline void
1214 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1215 {
1216 wa->wa_low = htole32(v & 0xffffffffU);
1217 if (sizeof(bus_addr_t) == 8)
1218 wa->wa_high = htole32((uint64_t) v >> 32);
1219 else
1220 wa->wa_high = 0;
1221 }
1222
1223 /*
1224 * Device driver interface functions and commonly used functions.
1225 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1226 */
1227
1228 /* Lookup supported device table */
1229 static const struct wm_product *
1230 wm_lookup(const struct pci_attach_args *pa)
1231 {
1232 const struct wm_product *wmp;
1233
1234 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1235 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1236 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1237 return wmp;
1238 }
1239 return NULL;
1240 }
1241
1242 /* The match function (ca_match) */
1243 static int
1244 wm_match(device_t parent, cfdata_t cf, void *aux)
1245 {
1246 struct pci_attach_args *pa = aux;
1247
1248 if (wm_lookup(pa) != NULL)
1249 return 1;
1250
1251 return 0;
1252 }
1253
1254 /* The attach function (ca_attach) */
1255 static void
1256 wm_attach(device_t parent, device_t self, void *aux)
1257 {
1258 struct wm_softc *sc = device_private(self);
1259 struct pci_attach_args *pa = aux;
1260 prop_dictionary_t dict;
1261 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1262 pci_chipset_tag_t pc = pa->pa_pc;
1263 pci_intr_handle_t ih;
1264 const char *intrstr = NULL;
1265 const char *eetype, *xname;
1266 bus_space_tag_t memt;
1267 bus_space_handle_t memh;
1268 bus_size_t memsize;
1269 int memh_valid;
1270 int i, error;
1271 const struct wm_product *wmp;
1272 prop_data_t ea;
1273 prop_number_t pn;
1274 uint8_t enaddr[ETHER_ADDR_LEN];
1275 uint16_t cfg1, cfg2, swdpin, io3;
1276 pcireg_t preg, memtype;
1277 uint16_t eeprom_data, apme_mask;
1278 bool force_clear_smbi;
1279 uint32_t link_mode;
1280 uint32_t reg;
1281 char intrbuf[PCI_INTRSTR_LEN];
1282
1283 sc->sc_dev = self;
1284 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1285 sc->sc_stopping = false;
1286
1287 wmp = wm_lookup(pa);
1288 #ifdef DIAGNOSTIC
1289 if (wmp == NULL) {
1290 printf("\n");
1291 panic("wm_attach: impossible");
1292 }
1293 #endif
1294 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1295
1296 sc->sc_pc = pa->pa_pc;
1297 sc->sc_pcitag = pa->pa_tag;
1298
1299 if (pci_dma64_available(pa))
1300 sc->sc_dmat = pa->pa_dmat64;
1301 else
1302 sc->sc_dmat = pa->pa_dmat;
1303
1304 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1305 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1306
1307 sc->sc_type = wmp->wmp_type;
1308 if (sc->sc_type < WM_T_82543) {
1309 if (sc->sc_rev < 2) {
1310 aprint_error_dev(sc->sc_dev,
1311 "i82542 must be at least rev. 2\n");
1312 return;
1313 }
1314 if (sc->sc_rev < 3)
1315 sc->sc_type = WM_T_82542_2_0;
1316 }
1317
1318 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1319 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1320 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1321 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1322 sc->sc_flags |= WM_F_NEWQUEUE;
1323
1324 /* Set device properties (mactype) */
1325 dict = device_properties(sc->sc_dev);
1326 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1327
1328 /*
1329 * Map the device. All devices support memory-mapped acccess,
1330 * and it is really required for normal operation.
1331 */
1332 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1333 switch (memtype) {
1334 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1335 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1336 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1337 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1338 break;
1339 default:
1340 memh_valid = 0;
1341 break;
1342 }
1343
1344 if (memh_valid) {
1345 sc->sc_st = memt;
1346 sc->sc_sh = memh;
1347 sc->sc_ss = memsize;
1348 } else {
1349 aprint_error_dev(sc->sc_dev,
1350 "unable to map device registers\n");
1351 return;
1352 }
1353
1354 /*
1355 * In addition, i82544 and later support I/O mapped indirect
1356 * register access. It is not desirable (nor supported in
1357 * this driver) to use it for normal operation, though it is
1358 * required to work around bugs in some chip versions.
1359 */
1360 if (sc->sc_type >= WM_T_82544) {
1361 /* First we have to find the I/O BAR. */
1362 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1363 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1364 if (memtype == PCI_MAPREG_TYPE_IO)
1365 break;
1366 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1367 PCI_MAPREG_MEM_TYPE_64BIT)
1368 i += 4; /* skip high bits, too */
1369 }
1370 if (i < PCI_MAPREG_END) {
1371 /*
1372 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1373 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1374 * It's no problem because newer chips has no this
1375 * bug.
1376 *
1377 * The i8254x doesn't apparently respond when the
1378 * I/O BAR is 0, which looks somewhat like it's not
1379 * been configured.
1380 */
1381 preg = pci_conf_read(pc, pa->pa_tag, i);
1382 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1383 aprint_error_dev(sc->sc_dev,
1384 "WARNING: I/O BAR at zero.\n");
1385 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1386 0, &sc->sc_iot, &sc->sc_ioh,
1387 NULL, &sc->sc_ios) == 0) {
1388 sc->sc_flags |= WM_F_IOH_VALID;
1389 } else {
1390 aprint_error_dev(sc->sc_dev,
1391 "WARNING: unable to map I/O space\n");
1392 }
1393 }
1394
1395 }
1396
1397 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1398 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1399 preg |= PCI_COMMAND_MASTER_ENABLE;
1400 if (sc->sc_type < WM_T_82542_2_1)
1401 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1402 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1403
1404 /* power up chip */
1405 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1406 NULL)) && error != EOPNOTSUPP) {
1407 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1408 return;
1409 }
1410
1411 /*
1412 * Map and establish our interrupt.
1413 */
1414 if (pci_intr_map(pa, &ih)) {
1415 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1416 return;
1417 }
1418 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1419 #ifdef WM_MPSAFE
1420 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1421 #endif
1422 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1423 if (sc->sc_ih == NULL) {
1424 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1425 if (intrstr != NULL)
1426 aprint_error(" at %s", intrstr);
1427 aprint_error("\n");
1428 return;
1429 }
1430 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1431
1432 /*
1433 * Check the function ID (unit number of the chip).
1434 */
1435 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1436 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1437 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1438 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1439 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1440 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1441 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1442 else
1443 sc->sc_funcid = 0;
1444
1445 /*
1446 * Determine a few things about the bus we're connected to.
1447 */
1448 if (sc->sc_type < WM_T_82543) {
1449 /* We don't really know the bus characteristics here. */
1450 sc->sc_bus_speed = 33;
1451 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1452 /*
1453 * CSA (Communication Streaming Architecture) is about as fast
1454 * a 32-bit 66MHz PCI Bus.
1455 */
1456 sc->sc_flags |= WM_F_CSA;
1457 sc->sc_bus_speed = 66;
1458 aprint_verbose_dev(sc->sc_dev,
1459 "Communication Streaming Architecture\n");
1460 if (sc->sc_type == WM_T_82547) {
1461 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1462 callout_setfunc(&sc->sc_txfifo_ch,
1463 wm_82547_txfifo_stall, sc);
1464 aprint_verbose_dev(sc->sc_dev,
1465 "using 82547 Tx FIFO stall work-around\n");
1466 }
1467 } else if (sc->sc_type >= WM_T_82571) {
1468 sc->sc_flags |= WM_F_PCIE;
1469 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1470 && (sc->sc_type != WM_T_ICH10)
1471 && (sc->sc_type != WM_T_PCH)
1472 && (sc->sc_type != WM_T_PCH2)
1473 && (sc->sc_type != WM_T_PCH_LPT)) {
1474 /* ICH* and PCH* have no PCIe capability registers */
1475 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1476 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1477 NULL) == 0)
1478 aprint_error_dev(sc->sc_dev,
1479 "unable to find PCIe capability\n");
1480 }
1481 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1482 } else {
1483 reg = CSR_READ(sc, WMREG_STATUS);
1484 if (reg & STATUS_BUS64)
1485 sc->sc_flags |= WM_F_BUS64;
1486 if ((reg & STATUS_PCIX_MODE) != 0) {
1487 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1488
1489 sc->sc_flags |= WM_F_PCIX;
1490 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1491 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1492 aprint_error_dev(sc->sc_dev,
1493 "unable to find PCIX capability\n");
1494 else if (sc->sc_type != WM_T_82545_3 &&
1495 sc->sc_type != WM_T_82546_3) {
1496 /*
1497 * Work around a problem caused by the BIOS
1498 * setting the max memory read byte count
1499 * incorrectly.
1500 */
1501 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1502 sc->sc_pcixe_capoff + PCIX_CMD);
1503 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1504 sc->sc_pcixe_capoff + PCIX_STATUS);
1505
1506 bytecnt =
1507 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1508 PCIX_CMD_BYTECNT_SHIFT;
1509 maxb =
1510 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1511 PCIX_STATUS_MAXB_SHIFT;
1512 if (bytecnt > maxb) {
1513 aprint_verbose_dev(sc->sc_dev,
1514 "resetting PCI-X MMRBC: %d -> %d\n",
1515 512 << bytecnt, 512 << maxb);
1516 pcix_cmd = (pcix_cmd &
1517 ~PCIX_CMD_BYTECNT_MASK) |
1518 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1519 pci_conf_write(pa->pa_pc, pa->pa_tag,
1520 sc->sc_pcixe_capoff + PCIX_CMD,
1521 pcix_cmd);
1522 }
1523 }
1524 }
1525 /*
1526 * The quad port adapter is special; it has a PCIX-PCIX
1527 * bridge on the board, and can run the secondary bus at
1528 * a higher speed.
1529 */
1530 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1531 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1532 : 66;
1533 } else if (sc->sc_flags & WM_F_PCIX) {
1534 switch (reg & STATUS_PCIXSPD_MASK) {
1535 case STATUS_PCIXSPD_50_66:
1536 sc->sc_bus_speed = 66;
1537 break;
1538 case STATUS_PCIXSPD_66_100:
1539 sc->sc_bus_speed = 100;
1540 break;
1541 case STATUS_PCIXSPD_100_133:
1542 sc->sc_bus_speed = 133;
1543 break;
1544 default:
1545 aprint_error_dev(sc->sc_dev,
1546 "unknown PCIXSPD %d; assuming 66MHz\n",
1547 reg & STATUS_PCIXSPD_MASK);
1548 sc->sc_bus_speed = 66;
1549 break;
1550 }
1551 } else
1552 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1553 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1554 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1555 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1556 }
1557
1558 /*
1559 * Allocate the control data structures, and create and load the
1560 * DMA map for it.
1561 *
1562 * NOTE: All Tx descriptors must be in the same 4G segment of
1563 * memory. So must Rx descriptors. We simplify by allocating
1564 * both sets within the same 4G segment.
1565 */
1566 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1567 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1568 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1569 sizeof(struct wm_control_data_82542) :
1570 sizeof(struct wm_control_data_82544);
1571 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1572 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1573 &sc->sc_cd_rseg, 0)) != 0) {
1574 aprint_error_dev(sc->sc_dev,
1575 "unable to allocate control data, error = %d\n",
1576 error);
1577 goto fail_0;
1578 }
1579
1580 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1581 sc->sc_cd_rseg, sc->sc_cd_size,
1582 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1583 aprint_error_dev(sc->sc_dev,
1584 "unable to map control data, error = %d\n", error);
1585 goto fail_1;
1586 }
1587
1588 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1589 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1590 aprint_error_dev(sc->sc_dev,
1591 "unable to create control data DMA map, error = %d\n",
1592 error);
1593 goto fail_2;
1594 }
1595
1596 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1597 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1598 aprint_error_dev(sc->sc_dev,
1599 "unable to load control data DMA map, error = %d\n",
1600 error);
1601 goto fail_3;
1602 }
1603
1604 /* Create the transmit buffer DMA maps. */
1605 WM_TXQUEUELEN(sc) =
1606 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1607 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1608 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1609 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1610 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1611 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1612 aprint_error_dev(sc->sc_dev,
1613 "unable to create Tx DMA map %d, error = %d\n",
1614 i, error);
1615 goto fail_4;
1616 }
1617 }
1618
1619 /* Create the receive buffer DMA maps. */
1620 for (i = 0; i < WM_NRXDESC; i++) {
1621 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1622 MCLBYTES, 0, 0,
1623 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1624 aprint_error_dev(sc->sc_dev,
1625 "unable to create Rx DMA map %d error = %d\n",
1626 i, error);
1627 goto fail_5;
1628 }
1629 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1630 }
1631
1632 /* clear interesting stat counters */
1633 CSR_READ(sc, WMREG_COLC);
1634 CSR_READ(sc, WMREG_RXERRC);
1635
1636 /* get PHY control from SMBus to PCIe */
1637 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1638 || (sc->sc_type == WM_T_PCH_LPT))
1639 wm_smbustopci(sc);
1640
1641 /* Reset the chip to a known state. */
1642 wm_reset(sc);
1643
1644 /* Get some information about the EEPROM. */
1645 switch (sc->sc_type) {
1646 case WM_T_82542_2_0:
1647 case WM_T_82542_2_1:
1648 case WM_T_82543:
1649 case WM_T_82544:
1650 /* Microwire */
1651 sc->sc_ee_addrbits = 6;
1652 break;
1653 case WM_T_82540:
1654 case WM_T_82545:
1655 case WM_T_82545_3:
1656 case WM_T_82546:
1657 case WM_T_82546_3:
1658 /* Microwire */
1659 reg = CSR_READ(sc, WMREG_EECD);
1660 if (reg & EECD_EE_SIZE)
1661 sc->sc_ee_addrbits = 8;
1662 else
1663 sc->sc_ee_addrbits = 6;
1664 sc->sc_flags |= WM_F_LOCK_EECD;
1665 break;
1666 case WM_T_82541:
1667 case WM_T_82541_2:
1668 case WM_T_82547:
1669 case WM_T_82547_2:
1670 reg = CSR_READ(sc, WMREG_EECD);
1671 if (reg & EECD_EE_TYPE) {
1672 /* SPI */
1673 wm_set_spiaddrbits(sc);
1674 } else
1675 /* Microwire */
1676 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1677 sc->sc_flags |= WM_F_LOCK_EECD;
1678 break;
1679 case WM_T_82571:
1680 case WM_T_82572:
1681 /* SPI */
1682 wm_set_spiaddrbits(sc);
1683 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1684 break;
1685 case WM_T_82573:
1686 sc->sc_flags |= WM_F_LOCK_SWSM;
1687 /* FALLTHROUGH */
1688 case WM_T_82574:
1689 case WM_T_82583:
1690 if (wm_nvm_is_onboard_eeprom(sc) == 0)
1691 sc->sc_flags |= WM_F_EEPROM_FLASH;
1692 else {
1693 /* SPI */
1694 wm_set_spiaddrbits(sc);
1695 }
1696 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1697 break;
1698 case WM_T_82575:
1699 case WM_T_82576:
1700 case WM_T_82580:
1701 case WM_T_82580ER:
1702 case WM_T_I350:
1703 case WM_T_I354:
1704 case WM_T_80003:
1705 /* SPI */
1706 wm_set_spiaddrbits(sc);
1707 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1708 | WM_F_LOCK_SWSM;
1709 break;
1710 case WM_T_ICH8:
1711 case WM_T_ICH9:
1712 case WM_T_ICH10:
1713 case WM_T_PCH:
1714 case WM_T_PCH2:
1715 case WM_T_PCH_LPT:
1716 /* FLASH */
1717 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1718 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1719 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1720 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1721 aprint_error_dev(sc->sc_dev,
1722 "can't map FLASH registers\n");
1723 goto fail_5;
1724 }
1725 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1726 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1727 ICH_FLASH_SECTOR_SIZE;
1728 sc->sc_ich8_flash_bank_size =
1729 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1730 sc->sc_ich8_flash_bank_size -=
1731 (reg & ICH_GFPREG_BASE_MASK);
1732 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1733 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1734 break;
1735 case WM_T_I210:
1736 case WM_T_I211:
1737 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1738 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1739 break;
1740 default:
1741 break;
1742 }
1743
1744 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1745 switch (sc->sc_type) {
1746 case WM_T_82571:
1747 case WM_T_82572:
1748 reg = CSR_READ(sc, WMREG_SWSM2);
1749 if ((reg & SWSM2_LOCK) != 0) {
1750 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1751 force_clear_smbi = true;
1752 } else
1753 force_clear_smbi = false;
1754 break;
1755 case WM_T_82573:
1756 case WM_T_82574:
1757 case WM_T_82583:
1758 force_clear_smbi = true;
1759 break;
1760 default:
1761 force_clear_smbi = false;
1762 break;
1763 }
1764 if (force_clear_smbi) {
1765 reg = CSR_READ(sc, WMREG_SWSM);
1766 if ((reg & SWSM_SMBI) != 0)
1767 aprint_error_dev(sc->sc_dev,
1768 "Please update the Bootagent\n");
1769 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1770 }
1771
1772 /*
1773 * Defer printing the EEPROM type until after verifying the checksum
1774 * This allows the EEPROM type to be printed correctly in the case
1775 * that no EEPROM is attached.
1776 */
1777 /*
1778 * Validate the EEPROM checksum. If the checksum fails, flag
1779 * this for later, so we can fail future reads from the EEPROM.
1780 */
1781 if (wm_nvm_validate_checksum(sc)) {
1782 /*
1783 * Read twice again because some PCI-e parts fail the
1784 * first check due to the link being in sleep state.
1785 */
1786 if (wm_nvm_validate_checksum(sc))
1787 sc->sc_flags |= WM_F_EEPROM_INVALID;
1788 }
1789
1790 /* Set device properties (macflags) */
1791 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1792
1793 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1794 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1795 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1796 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1797 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1798 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1799 } else {
1800 if (sc->sc_flags & WM_F_EEPROM_SPI)
1801 eetype = "SPI";
1802 else
1803 eetype = "MicroWire";
1804 aprint_verbose_dev(sc->sc_dev,
1805 "%u word (%d address bits) %s EEPROM\n",
1806 1U << sc->sc_ee_addrbits,
1807 sc->sc_ee_addrbits, eetype);
1808 }
1809
1810 switch (sc->sc_type) {
1811 case WM_T_82571:
1812 case WM_T_82572:
1813 case WM_T_82573:
1814 case WM_T_82574:
1815 case WM_T_82583:
1816 case WM_T_80003:
1817 case WM_T_ICH8:
1818 case WM_T_ICH9:
1819 case WM_T_ICH10:
1820 case WM_T_PCH:
1821 case WM_T_PCH2:
1822 case WM_T_PCH_LPT:
1823 if (wm_check_mng_mode(sc) != 0)
1824 wm_get_hw_control(sc);
1825 break;
1826 default:
1827 break;
1828 }
1829 wm_get_wakeup(sc);
1830 /*
1831 * Read the Ethernet address from the EEPROM, if not first found
1832 * in device properties.
1833 */
1834 ea = prop_dictionary_get(dict, "mac-address");
1835 if (ea != NULL) {
1836 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1837 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1838 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1839 } else {
1840 if (wm_read_mac_addr(sc, enaddr) != 0) {
1841 aprint_error_dev(sc->sc_dev,
1842 "unable to read Ethernet address\n");
1843 goto fail_5;
1844 }
1845 }
1846
1847 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1848 ether_sprintf(enaddr));
1849
1850 /*
1851 * Read the config info from the EEPROM, and set up various
1852 * bits in the control registers based on their contents.
1853 */
1854 pn = prop_dictionary_get(dict, "i82543-cfg1");
1855 if (pn != NULL) {
1856 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1857 cfg1 = (uint16_t) prop_number_integer_value(pn);
1858 } else {
1859 if (wm_nvm_read(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1860 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1861 goto fail_5;
1862 }
1863 }
1864
1865 pn = prop_dictionary_get(dict, "i82543-cfg2");
1866 if (pn != NULL) {
1867 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1868 cfg2 = (uint16_t) prop_number_integer_value(pn);
1869 } else {
1870 if (wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1871 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1872 goto fail_5;
1873 }
1874 }
1875
1876 /* check for WM_F_WOL */
1877 switch (sc->sc_type) {
1878 case WM_T_82542_2_0:
1879 case WM_T_82542_2_1:
1880 case WM_T_82543:
1881 /* dummy? */
1882 eeprom_data = 0;
1883 apme_mask = EEPROM_CFG3_APME;
1884 break;
1885 case WM_T_82544:
1886 apme_mask = EEPROM_CFG2_82544_APM_EN;
1887 eeprom_data = cfg2;
1888 break;
1889 case WM_T_82546:
1890 case WM_T_82546_3:
1891 case WM_T_82571:
1892 case WM_T_82572:
1893 case WM_T_82573:
1894 case WM_T_82574:
1895 case WM_T_82583:
1896 case WM_T_80003:
1897 default:
1898 apme_mask = EEPROM_CFG3_APME;
1899 wm_nvm_read(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1900 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1901 break;
1902 case WM_T_82575:
1903 case WM_T_82576:
1904 case WM_T_82580:
1905 case WM_T_82580ER:
1906 case WM_T_I350:
1907 case WM_T_I354: /* XXX ok? */
1908 case WM_T_ICH8:
1909 case WM_T_ICH9:
1910 case WM_T_ICH10:
1911 case WM_T_PCH:
1912 case WM_T_PCH2:
1913 case WM_T_PCH_LPT:
1914 /* XXX The funcid should be checked on some devices */
1915 apme_mask = WUC_APME;
1916 eeprom_data = CSR_READ(sc, WMREG_WUC);
1917 break;
1918 }
1919
1920 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1921 if ((eeprom_data & apme_mask) != 0)
1922 sc->sc_flags |= WM_F_WOL;
1923 #ifdef WM_DEBUG
1924 if ((sc->sc_flags & WM_F_WOL) != 0)
1925 printf("WOL\n");
1926 #endif
1927
1928 /*
1929 * XXX need special handling for some multiple port cards
1930 * to disable a paticular port.
1931 */
1932
1933 if (sc->sc_type >= WM_T_82544) {
1934 pn = prop_dictionary_get(dict, "i82543-swdpin");
1935 if (pn != NULL) {
1936 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1937 swdpin = (uint16_t) prop_number_integer_value(pn);
1938 } else {
1939 if (wm_nvm_read(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1940 aprint_error_dev(sc->sc_dev,
1941 "unable to read SWDPIN\n");
1942 goto fail_5;
1943 }
1944 }
1945 }
1946
1947 if (cfg1 & EEPROM_CFG1_ILOS)
1948 sc->sc_ctrl |= CTRL_ILOS;
1949 if (sc->sc_type >= WM_T_82544) {
1950 sc->sc_ctrl |=
1951 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1952 CTRL_SWDPIO_SHIFT;
1953 sc->sc_ctrl |=
1954 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1955 CTRL_SWDPINS_SHIFT;
1956 } else {
1957 sc->sc_ctrl |=
1958 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1959 CTRL_SWDPIO_SHIFT;
1960 }
1961
1962 #if 0
1963 if (sc->sc_type >= WM_T_82544) {
1964 if (cfg1 & EEPROM_CFG1_IPS0)
1965 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1966 if (cfg1 & EEPROM_CFG1_IPS1)
1967 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1968 sc->sc_ctrl_ext |=
1969 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1970 CTRL_EXT_SWDPIO_SHIFT;
1971 sc->sc_ctrl_ext |=
1972 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1973 CTRL_EXT_SWDPINS_SHIFT;
1974 } else {
1975 sc->sc_ctrl_ext |=
1976 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1977 CTRL_EXT_SWDPIO_SHIFT;
1978 }
1979 #endif
1980
1981 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1982 #if 0
1983 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1984 #endif
1985
1986 /*
1987 * Set up some register offsets that are different between
1988 * the i82542 and the i82543 and later chips.
1989 */
1990 if (sc->sc_type < WM_T_82543) {
1991 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1992 sc->sc_tdt_reg = WMREG_OLD_TDT;
1993 } else {
1994 sc->sc_rdt_reg = WMREG_RDT;
1995 sc->sc_tdt_reg = WMREG_TDT;
1996 }
1997
1998 if (sc->sc_type == WM_T_PCH) {
1999 uint16_t val;
2000
2001 /* Save the NVM K1 bit setting */
2002 wm_nvm_read(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
2003
2004 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
2005 sc->sc_nvm_k1_enabled = 1;
2006 else
2007 sc->sc_nvm_k1_enabled = 0;
2008 }
2009
2010 /*
2011 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2012 * media structures accordingly.
2013 */
2014 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2015 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2016 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2017 || sc->sc_type == WM_T_82573
2018 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2019 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2020 wm_gmii_mediainit(sc, wmp->wmp_product);
2021 } else if (sc->sc_type < WM_T_82543 ||
2022 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2023 if (sc->sc_mediatype & WMP_F_COPPER) {
2024 aprint_error_dev(sc->sc_dev,
2025 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2026 sc->sc_mediatype = WMP_F_FIBER;
2027 }
2028 wm_tbi_mediainit(sc);
2029 } else {
2030 switch (sc->sc_type) {
2031 case WM_T_82575:
2032 case WM_T_82576:
2033 case WM_T_82580:
2034 case WM_T_82580ER:
2035 case WM_T_I350:
2036 case WM_T_I354:
2037 case WM_T_I210:
2038 case WM_T_I211:
2039 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2040 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2041 switch (link_mode) {
2042 case CTRL_EXT_LINK_MODE_1000KX:
2043 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2044 sc->sc_mediatype = WMP_F_SERDES;
2045 break;
2046 case CTRL_EXT_LINK_MODE_SGMII:
2047 if (wm_sgmii_uses_mdio(sc)) {
2048 aprint_verbose_dev(sc->sc_dev,
2049 "SGMII(MDIO)\n");
2050 sc->sc_flags |= WM_F_SGMII;
2051 sc->sc_mediatype = WMP_F_COPPER;
2052 break;
2053 }
2054 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2055 /*FALLTHROUGH*/
2056 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2057 sc->sc_mediatype = wm_get_sfp_media_type(sc);
2058 if (sc->sc_mediatype == WMP_F_UNKNOWN) {
2059 if (link_mode
2060 == CTRL_EXT_LINK_MODE_SGMII) {
2061 sc->sc_mediatype
2062 = WMP_F_COPPER;
2063 sc->sc_flags |= WM_F_SGMII;
2064 } else {
2065 sc->sc_mediatype
2066 = WMP_F_SERDES;
2067 aprint_verbose_dev(sc->sc_dev,
2068 "SERDES\n");
2069 }
2070 break;
2071 }
2072 if (sc->sc_mediatype == WMP_F_SERDES)
2073 aprint_verbose_dev(sc->sc_dev,
2074 "SERDES\n");
2075
2076 /* Change current link mode setting */
2077 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2078 switch (sc->sc_mediatype) {
2079 case WMP_F_COPPER:
2080 reg |= CTRL_EXT_LINK_MODE_SGMII;
2081 break;
2082 case WMP_F_SERDES:
2083 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2084 break;
2085 default:
2086 break;
2087 }
2088 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2089 break;
2090 case CTRL_EXT_LINK_MODE_GMII:
2091 default:
2092 sc->sc_mediatype = WMP_F_COPPER;
2093 break;
2094 }
2095
2096 reg &= ~CTRL_EXT_I2C_ENA;
2097 if ((sc->sc_flags & WM_F_SGMII) != 0)
2098 reg |= CTRL_EXT_I2C_ENA;
2099 else
2100 reg &= ~CTRL_EXT_I2C_ENA;
2101 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2102
2103 if (sc->sc_mediatype == WMP_F_COPPER)
2104 wm_gmii_mediainit(sc, wmp->wmp_product);
2105 else
2106 wm_tbi_mediainit(sc);
2107 break;
2108 default:
2109 if (sc->sc_mediatype & WMP_F_FIBER)
2110 aprint_error_dev(sc->sc_dev,
2111 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2112 sc->sc_mediatype = WMP_F_COPPER;
2113 wm_gmii_mediainit(sc, wmp->wmp_product);
2114 }
2115 }
2116
2117 ifp = &sc->sc_ethercom.ec_if;
2118 xname = device_xname(sc->sc_dev);
2119 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2120 ifp->if_softc = sc;
2121 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2122 ifp->if_ioctl = wm_ioctl;
2123 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2124 ifp->if_start = wm_nq_start;
2125 else
2126 ifp->if_start = wm_start;
2127 ifp->if_watchdog = wm_watchdog;
2128 ifp->if_init = wm_init;
2129 ifp->if_stop = wm_stop;
2130 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2131 IFQ_SET_READY(&ifp->if_snd);
2132
2133 /* Check for jumbo frame */
2134 switch (sc->sc_type) {
2135 case WM_T_82573:
2136 /* XXX limited to 9234 if ASPM is disabled */
2137 wm_nvm_read(sc, EEPROM_INIT_3GIO_3, 1, &io3);
2138 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
2139 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2140 break;
2141 case WM_T_82571:
2142 case WM_T_82572:
2143 case WM_T_82574:
2144 case WM_T_82575:
2145 case WM_T_82576:
2146 case WM_T_82580:
2147 case WM_T_82580ER:
2148 case WM_T_I350:
2149 case WM_T_I354: /* XXXX ok? */
2150 case WM_T_I210:
2151 case WM_T_I211:
2152 case WM_T_80003:
2153 case WM_T_ICH9:
2154 case WM_T_ICH10:
2155 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2156 case WM_T_PCH_LPT:
2157 /* XXX limited to 9234 */
2158 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2159 break;
2160 case WM_T_PCH:
2161 /* XXX limited to 4096 */
2162 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2163 break;
2164 case WM_T_82542_2_0:
2165 case WM_T_82542_2_1:
2166 case WM_T_82583:
2167 case WM_T_ICH8:
2168 /* No support for jumbo frame */
2169 break;
2170 default:
2171 /* ETHER_MAX_LEN_JUMBO */
2172 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2173 break;
2174 }
2175
2176 /* If we're a i82543 or greater, we can support VLANs. */
2177 if (sc->sc_type >= WM_T_82543)
2178 sc->sc_ethercom.ec_capabilities |=
2179 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2180
2181 /*
2182 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2183 * on i82543 and later.
2184 */
2185 if (sc->sc_type >= WM_T_82543) {
2186 ifp->if_capabilities |=
2187 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2188 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2189 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2190 IFCAP_CSUM_TCPv6_Tx |
2191 IFCAP_CSUM_UDPv6_Tx;
2192 }
2193
2194 /*
2195 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2196 *
2197 * 82541GI (8086:1076) ... no
2198 * 82572EI (8086:10b9) ... yes
2199 */
2200 if (sc->sc_type >= WM_T_82571) {
2201 ifp->if_capabilities |=
2202 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2203 }
2204
2205 /*
2206 * If we're a i82544 or greater (except i82547), we can do
2207 * TCP segmentation offload.
2208 */
2209 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2210 ifp->if_capabilities |= IFCAP_TSOv4;
2211 }
2212
2213 if (sc->sc_type >= WM_T_82571) {
2214 ifp->if_capabilities |= IFCAP_TSOv6;
2215 }
2216
2217 #ifdef WM_MPSAFE
2218 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2219 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2220 #else
2221 sc->sc_tx_lock = NULL;
2222 sc->sc_rx_lock = NULL;
2223 #endif
2224
2225 /* Attach the interface. */
2226 if_attach(ifp);
2227 ether_ifattach(ifp, enaddr);
2228 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2229 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2230 RND_FLAG_DEFAULT);
2231
2232 #ifdef WM_EVENT_COUNTERS
2233 /* Attach event counters. */
2234 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2235 NULL, xname, "txsstall");
2236 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2237 NULL, xname, "txdstall");
2238 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2239 NULL, xname, "txfifo_stall");
2240 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2241 NULL, xname, "txdw");
2242 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2243 NULL, xname, "txqe");
2244 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2245 NULL, xname, "rxintr");
2246 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2247 NULL, xname, "linkintr");
2248
2249 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2250 NULL, xname, "rxipsum");
2251 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2252 NULL, xname, "rxtusum");
2253 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2254 NULL, xname, "txipsum");
2255 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2256 NULL, xname, "txtusum");
2257 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2258 NULL, xname, "txtusum6");
2259
2260 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2261 NULL, xname, "txtso");
2262 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2263 NULL, xname, "txtso6");
2264 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2265 NULL, xname, "txtsopain");
2266
2267 for (i = 0; i < WM_NTXSEGS; i++) {
2268 snprintf(wm_txseg_evcnt_names[i],
2269 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2270 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2271 NULL, xname, wm_txseg_evcnt_names[i]);
2272 }
2273
2274 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2275 NULL, xname, "txdrop");
2276
2277 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2278 NULL, xname, "tu");
2279
2280 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2281 NULL, xname, "tx_xoff");
2282 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2283 NULL, xname, "tx_xon");
2284 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2285 NULL, xname, "rx_xoff");
2286 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2287 NULL, xname, "rx_xon");
2288 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2289 NULL, xname, "rx_macctl");
2290 #endif /* WM_EVENT_COUNTERS */
2291
2292 if (pmf_device_register(self, wm_suspend, wm_resume))
2293 pmf_class_network_register(self, ifp);
2294 else
2295 aprint_error_dev(self, "couldn't establish power handler\n");
2296
2297 sc->sc_flags |= WM_F_ATTACHED;
2298 return;
2299
2300 /*
2301 * Free any resources we've allocated during the failed attach
2302 * attempt. Do this in reverse order and fall through.
2303 */
2304 fail_5:
2305 for (i = 0; i < WM_NRXDESC; i++) {
2306 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2307 bus_dmamap_destroy(sc->sc_dmat,
2308 sc->sc_rxsoft[i].rxs_dmamap);
2309 }
2310 fail_4:
2311 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2312 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2313 bus_dmamap_destroy(sc->sc_dmat,
2314 sc->sc_txsoft[i].txs_dmamap);
2315 }
2316 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2317 fail_3:
2318 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2319 fail_2:
2320 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2321 sc->sc_cd_size);
2322 fail_1:
2323 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2324 fail_0:
2325 return;
2326 }
2327
2328 /* The detach function (ca_detach) */
2329 static int
2330 wm_detach(device_t self, int flags __unused)
2331 {
2332 struct wm_softc *sc = device_private(self);
2333 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2334 int i;
2335 #ifndef WM_MPSAFE
2336 int s;
2337 #endif
2338
2339 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2340 return 0;
2341
2342 #ifndef WM_MPSAFE
2343 s = splnet();
2344 #endif
2345 /* Stop the interface. Callouts are stopped in it. */
2346 wm_stop(ifp, 1);
2347
2348 #ifndef WM_MPSAFE
2349 splx(s);
2350 #endif
2351
2352 pmf_device_deregister(self);
2353
2354 /* Tell the firmware about the release */
2355 WM_BOTH_LOCK(sc);
2356 wm_release_manageability(sc);
2357 wm_release_hw_control(sc);
2358 WM_BOTH_UNLOCK(sc);
2359
2360 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2361
2362 /* Delete all remaining media. */
2363 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2364
2365 ether_ifdetach(ifp);
2366 if_detach(ifp);
2367
2368
2369 /* Unload RX dmamaps and free mbufs */
2370 WM_RX_LOCK(sc);
2371 wm_rxdrain(sc);
2372 WM_RX_UNLOCK(sc);
2373 /* Must unlock here */
2374
2375 /* Free dmamap. It's the same as the end of the wm_attach() function */
2376 for (i = 0; i < WM_NRXDESC; i++) {
2377 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2378 bus_dmamap_destroy(sc->sc_dmat,
2379 sc->sc_rxsoft[i].rxs_dmamap);
2380 }
2381 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2382 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2383 bus_dmamap_destroy(sc->sc_dmat,
2384 sc->sc_txsoft[i].txs_dmamap);
2385 }
2386 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2387 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2388 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2389 sc->sc_cd_size);
2390 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2391
2392 /* Disestablish the interrupt handler */
2393 if (sc->sc_ih != NULL) {
2394 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2395 sc->sc_ih = NULL;
2396 }
2397
2398 /* Unmap the registers */
2399 if (sc->sc_ss) {
2400 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2401 sc->sc_ss = 0;
2402 }
2403
2404 if (sc->sc_ios) {
2405 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2406 sc->sc_ios = 0;
2407 }
2408
2409 if (sc->sc_tx_lock)
2410 mutex_obj_free(sc->sc_tx_lock);
2411 if (sc->sc_rx_lock)
2412 mutex_obj_free(sc->sc_rx_lock);
2413
2414 return 0;
2415 }
2416
2417 static bool
2418 wm_suspend(device_t self, const pmf_qual_t *qual)
2419 {
2420 struct wm_softc *sc = device_private(self);
2421
2422 wm_release_manageability(sc);
2423 wm_release_hw_control(sc);
2424 #ifdef WM_WOL
2425 wm_enable_wakeup(sc);
2426 #endif
2427
2428 return true;
2429 }
2430
2431 static bool
2432 wm_resume(device_t self, const pmf_qual_t *qual)
2433 {
2434 struct wm_softc *sc = device_private(self);
2435
2436 wm_init_manageability(sc);
2437
2438 return true;
2439 }
2440
2441 /*
2442 * wm_watchdog: [ifnet interface function]
2443 *
2444 * Watchdog timer handler.
2445 */
2446 static void
2447 wm_watchdog(struct ifnet *ifp)
2448 {
2449 struct wm_softc *sc = ifp->if_softc;
2450
2451 /*
2452 * Since we're using delayed interrupts, sweep up
2453 * before we report an error.
2454 */
2455 WM_TX_LOCK(sc);
2456 wm_txintr(sc);
2457 WM_TX_UNLOCK(sc);
2458
2459 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2460 #ifdef WM_DEBUG
2461 int i, j;
2462 struct wm_txsoft *txs;
2463 #endif
2464 log(LOG_ERR,
2465 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2466 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2467 sc->sc_txnext);
2468 ifp->if_oerrors++;
2469 #ifdef WM_DEBUG
2470 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2471 i = WM_NEXTTXS(sc, i)) {
2472 txs = &sc->sc_txsoft[i];
2473 printf("txs %d tx %d -> %d\n",
2474 i, txs->txs_firstdesc, txs->txs_lastdesc);
2475 for (j = txs->txs_firstdesc; ;
2476 j = WM_NEXTTX(sc, j)) {
2477 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2478 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2479 printf("\t %#08x%08x\n",
2480 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2481 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2482 if (j == txs->txs_lastdesc)
2483 break;
2484 }
2485 }
2486 #endif
2487 /* Reset the interface. */
2488 (void) wm_init(ifp);
2489 }
2490
2491 /* Try to get more packets going. */
2492 ifp->if_start(ifp);
2493 }
2494
2495 /*
2496 * wm_tick:
2497 *
2498 * One second timer, used to check link status, sweep up
2499 * completed transmit jobs, etc.
2500 */
2501 static void
2502 wm_tick(void *arg)
2503 {
2504 struct wm_softc *sc = arg;
2505 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2506 #ifndef WM_MPSAFE
2507 int s;
2508
2509 s = splnet();
2510 #endif
2511
2512 WM_TX_LOCK(sc);
2513
2514 if (sc->sc_stopping)
2515 goto out;
2516
2517 if (sc->sc_type >= WM_T_82542_2_1) {
2518 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2519 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2520 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2521 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2522 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2523 }
2524
2525 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2526 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2527 + CSR_READ(sc, WMREG_CRCERRS)
2528 + CSR_READ(sc, WMREG_ALGNERRC)
2529 + CSR_READ(sc, WMREG_SYMERRC)
2530 + CSR_READ(sc, WMREG_RXERRC)
2531 + CSR_READ(sc, WMREG_SEC)
2532 + CSR_READ(sc, WMREG_CEXTERR)
2533 + CSR_READ(sc, WMREG_RLEC);
2534 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2535
2536 if (sc->sc_flags & WM_F_HAS_MII)
2537 mii_tick(&sc->sc_mii);
2538 else
2539 wm_tbi_check_link(sc);
2540
2541 out:
2542 WM_TX_UNLOCK(sc);
2543 #ifndef WM_MPSAFE
2544 splx(s);
2545 #endif
2546
2547 if (!sc->sc_stopping)
2548 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2549 }
2550
2551 static int
2552 wm_ifflags_cb(struct ethercom *ec)
2553 {
2554 struct ifnet *ifp = &ec->ec_if;
2555 struct wm_softc *sc = ifp->if_softc;
2556 int change = ifp->if_flags ^ sc->sc_if_flags;
2557 int rc = 0;
2558
2559 WM_BOTH_LOCK(sc);
2560
2561 if (change != 0)
2562 sc->sc_if_flags = ifp->if_flags;
2563
2564 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2565 rc = ENETRESET;
2566 goto out;
2567 }
2568
2569 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2570 wm_set_filter(sc);
2571
2572 wm_set_vlan(sc);
2573
2574 out:
2575 WM_BOTH_UNLOCK(sc);
2576
2577 return rc;
2578 }
2579
2580 /*
2581 * wm_ioctl: [ifnet interface function]
2582 *
2583 * Handle control requests from the operator.
2584 */
2585 static int
2586 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2587 {
2588 struct wm_softc *sc = ifp->if_softc;
2589 struct ifreq *ifr = (struct ifreq *) data;
2590 struct ifaddr *ifa = (struct ifaddr *)data;
2591 struct sockaddr_dl *sdl;
2592 int s, error;
2593
2594 #ifndef WM_MPSAFE
2595 s = splnet();
2596 #endif
2597 WM_BOTH_LOCK(sc);
2598
2599 switch (cmd) {
2600 case SIOCSIFMEDIA:
2601 case SIOCGIFMEDIA:
2602 /* Flow control requires full-duplex mode. */
2603 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2604 (ifr->ifr_media & IFM_FDX) == 0)
2605 ifr->ifr_media &= ~IFM_ETH_FMASK;
2606 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2607 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2608 /* We can do both TXPAUSE and RXPAUSE. */
2609 ifr->ifr_media |=
2610 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2611 }
2612 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2613 }
2614 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2615 break;
2616 case SIOCINITIFADDR:
2617 if (ifa->ifa_addr->sa_family == AF_LINK) {
2618 sdl = satosdl(ifp->if_dl->ifa_addr);
2619 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2620 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2621 /* unicast address is first multicast entry */
2622 wm_set_filter(sc);
2623 error = 0;
2624 break;
2625 }
2626 /*FALLTHROUGH*/
2627 default:
2628 WM_BOTH_UNLOCK(sc);
2629 #ifdef WM_MPSAFE
2630 s = splnet();
2631 #endif
2632 /* It may call wm_start, so unlock here */
2633 error = ether_ioctl(ifp, cmd, data);
2634 #ifdef WM_MPSAFE
2635 splx(s);
2636 #endif
2637 WM_BOTH_LOCK(sc);
2638
2639 if (error != ENETRESET)
2640 break;
2641
2642 error = 0;
2643
2644 if (cmd == SIOCSIFCAP) {
2645 WM_BOTH_UNLOCK(sc);
2646 error = (*ifp->if_init)(ifp);
2647 WM_BOTH_LOCK(sc);
2648 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2649 ;
2650 else if (ifp->if_flags & IFF_RUNNING) {
2651 /*
2652 * Multicast list has changed; set the hardware filter
2653 * accordingly.
2654 */
2655 wm_set_filter(sc);
2656 }
2657 break;
2658 }
2659
2660 WM_BOTH_UNLOCK(sc);
2661
2662 /* Try to get more packets going. */
2663 ifp->if_start(ifp);
2664
2665 #ifndef WM_MPSAFE
2666 splx(s);
2667 #endif
2668 return error;
2669 }
2670
2671 /* MAC address related */
2672
2673 static int
2674 wm_check_alt_mac_addr(struct wm_softc *sc)
2675 {
2676 uint16_t myea[ETHER_ADDR_LEN / 2];
2677 uint16_t offset = EEPROM_OFF_MACADDR;
2678
2679 /* Try to read alternative MAC address pointer */
2680 if (wm_nvm_read(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2681 return -1;
2682
2683 /* Check pointer */
2684 if (offset == 0xffff)
2685 return -1;
2686
2687 /*
2688 * Check whether alternative MAC address is valid or not.
2689 * Some cards have non 0xffff pointer but those don't use
2690 * alternative MAC address in reality.
2691 *
2692 * Check whether the broadcast bit is set or not.
2693 */
2694 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2695 if (((myea[0] & 0xff) & 0x01) == 0)
2696 return 0; /* found! */
2697
2698 /* not found */
2699 return -1;
2700 }
2701
2702 static int
2703 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2704 {
2705 uint16_t myea[ETHER_ADDR_LEN / 2];
2706 uint16_t offset = EEPROM_OFF_MACADDR;
2707 int do_invert = 0;
2708
2709 switch (sc->sc_type) {
2710 case WM_T_82580:
2711 case WM_T_82580ER:
2712 case WM_T_I350:
2713 case WM_T_I354:
2714 switch (sc->sc_funcid) {
2715 case 0:
2716 /* default value (== EEPROM_OFF_MACADDR) */
2717 break;
2718 case 1:
2719 offset = EEPROM_OFF_LAN1;
2720 break;
2721 case 2:
2722 offset = EEPROM_OFF_LAN2;
2723 break;
2724 case 3:
2725 offset = EEPROM_OFF_LAN3;
2726 break;
2727 default:
2728 goto bad;
2729 /* NOTREACHED */
2730 break;
2731 }
2732 break;
2733 case WM_T_82571:
2734 case WM_T_82575:
2735 case WM_T_82576:
2736 case WM_T_80003:
2737 case WM_T_I210:
2738 case WM_T_I211:
2739 if (wm_check_alt_mac_addr(sc) != 0) {
2740 /* reset the offset to LAN0 */
2741 offset = EEPROM_OFF_MACADDR;
2742 if ((sc->sc_funcid & 0x01) == 1)
2743 do_invert = 1;
2744 goto do_read;
2745 }
2746 switch (sc->sc_funcid) {
2747 case 0:
2748 /*
2749 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
2750 * itself.
2751 */
2752 break;
2753 case 1:
2754 offset += EEPROM_OFF_MACADDR_LAN1;
2755 break;
2756 case 2:
2757 offset += EEPROM_OFF_MACADDR_LAN2;
2758 break;
2759 case 3:
2760 offset += EEPROM_OFF_MACADDR_LAN3;
2761 break;
2762 default:
2763 goto bad;
2764 /* NOTREACHED */
2765 break;
2766 }
2767 break;
2768 default:
2769 if ((sc->sc_funcid & 0x01) == 1)
2770 do_invert = 1;
2771 break;
2772 }
2773
2774 do_read:
2775 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2776 myea) != 0) {
2777 goto bad;
2778 }
2779
2780 enaddr[0] = myea[0] & 0xff;
2781 enaddr[1] = myea[0] >> 8;
2782 enaddr[2] = myea[1] & 0xff;
2783 enaddr[3] = myea[1] >> 8;
2784 enaddr[4] = myea[2] & 0xff;
2785 enaddr[5] = myea[2] >> 8;
2786
2787 /*
2788 * Toggle the LSB of the MAC address on the second port
2789 * of some dual port cards.
2790 */
2791 if (do_invert != 0)
2792 enaddr[5] ^= 1;
2793
2794 return 0;
2795
2796 bad:
2797 return -1;
2798 }
2799
2800 /*
2801 * wm_set_ral:
2802 *
2803 * Set an entery in the receive address list.
2804 */
2805 static void
2806 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2807 {
2808 uint32_t ral_lo, ral_hi;
2809
2810 if (enaddr != NULL) {
2811 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2812 (enaddr[3] << 24);
2813 ral_hi = enaddr[4] | (enaddr[5] << 8);
2814 ral_hi |= RAL_AV;
2815 } else {
2816 ral_lo = 0;
2817 ral_hi = 0;
2818 }
2819
2820 if (sc->sc_type >= WM_T_82544) {
2821 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2822 ral_lo);
2823 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2824 ral_hi);
2825 } else {
2826 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2827 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2828 }
2829 }
2830
2831 /*
2832 * wm_mchash:
2833 *
2834 * Compute the hash of the multicast address for the 4096-bit
2835 * multicast filter.
2836 */
2837 static uint32_t
2838 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2839 {
2840 static const int lo_shift[4] = { 4, 3, 2, 0 };
2841 static const int hi_shift[4] = { 4, 5, 6, 8 };
2842 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2843 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2844 uint32_t hash;
2845
2846 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2847 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2848 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2849 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2850 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2851 return (hash & 0x3ff);
2852 }
2853 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2854 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2855
2856 return (hash & 0xfff);
2857 }
2858
2859 /*
2860 * wm_set_filter:
2861 *
2862 * Set up the receive filter.
2863 */
2864 static void
2865 wm_set_filter(struct wm_softc *sc)
2866 {
2867 struct ethercom *ec = &sc->sc_ethercom;
2868 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2869 struct ether_multi *enm;
2870 struct ether_multistep step;
2871 bus_addr_t mta_reg;
2872 uint32_t hash, reg, bit;
2873 int i, size;
2874
2875 if (sc->sc_type >= WM_T_82544)
2876 mta_reg = WMREG_CORDOVA_MTA;
2877 else
2878 mta_reg = WMREG_MTA;
2879
2880 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2881
2882 if (ifp->if_flags & IFF_BROADCAST)
2883 sc->sc_rctl |= RCTL_BAM;
2884 if (ifp->if_flags & IFF_PROMISC) {
2885 sc->sc_rctl |= RCTL_UPE;
2886 goto allmulti;
2887 }
2888
2889 /*
2890 * Set the station address in the first RAL slot, and
2891 * clear the remaining slots.
2892 */
2893 if (sc->sc_type == WM_T_ICH8)
2894 size = WM_RAL_TABSIZE_ICH8 -1;
2895 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2896 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2897 || (sc->sc_type == WM_T_PCH_LPT))
2898 size = WM_RAL_TABSIZE_ICH8;
2899 else if (sc->sc_type == WM_T_82575)
2900 size = WM_RAL_TABSIZE_82575;
2901 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2902 size = WM_RAL_TABSIZE_82576;
2903 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2904 size = WM_RAL_TABSIZE_I350;
2905 else
2906 size = WM_RAL_TABSIZE;
2907 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2908 for (i = 1; i < size; i++)
2909 wm_set_ral(sc, NULL, i);
2910
2911 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2912 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2913 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2914 size = WM_ICH8_MC_TABSIZE;
2915 else
2916 size = WM_MC_TABSIZE;
2917 /* Clear out the multicast table. */
2918 for (i = 0; i < size; i++)
2919 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2920
2921 ETHER_FIRST_MULTI(step, ec, enm);
2922 while (enm != NULL) {
2923 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2924 /*
2925 * We must listen to a range of multicast addresses.
2926 * For now, just accept all multicasts, rather than
2927 * trying to set only those filter bits needed to match
2928 * the range. (At this time, the only use of address
2929 * ranges is for IP multicast routing, for which the
2930 * range is big enough to require all bits set.)
2931 */
2932 goto allmulti;
2933 }
2934
2935 hash = wm_mchash(sc, enm->enm_addrlo);
2936
2937 reg = (hash >> 5);
2938 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2939 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2940 || (sc->sc_type == WM_T_PCH2)
2941 || (sc->sc_type == WM_T_PCH_LPT))
2942 reg &= 0x1f;
2943 else
2944 reg &= 0x7f;
2945 bit = hash & 0x1f;
2946
2947 hash = CSR_READ(sc, mta_reg + (reg << 2));
2948 hash |= 1U << bit;
2949
2950 /* XXX Hardware bug?? */
2951 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2952 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2953 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2954 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2955 } else
2956 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2957
2958 ETHER_NEXT_MULTI(step, enm);
2959 }
2960
2961 ifp->if_flags &= ~IFF_ALLMULTI;
2962 goto setit;
2963
2964 allmulti:
2965 ifp->if_flags |= IFF_ALLMULTI;
2966 sc->sc_rctl |= RCTL_MPE;
2967
2968 setit:
2969 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2970 }
2971
2972 /* Reset and init related */
2973
2974 static void
2975 wm_set_vlan(struct wm_softc *sc)
2976 {
2977 /* Deal with VLAN enables. */
2978 if (VLAN_ATTACHED(&sc->sc_ethercom))
2979 sc->sc_ctrl |= CTRL_VME;
2980 else
2981 sc->sc_ctrl &= ~CTRL_VME;
2982
2983 /* Write the control registers. */
2984 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2985 }
2986
2987 static void
2988 wm_set_pcie_completion_timeout(struct wm_softc *sc)
2989 {
2990 uint32_t gcr;
2991 pcireg_t ctrl2;
2992
2993 gcr = CSR_READ(sc, WMREG_GCR);
2994
2995 /* Only take action if timeout value is defaulted to 0 */
2996 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
2997 goto out;
2998
2999 if ((gcr & GCR_CAP_VER2) == 0) {
3000 gcr |= GCR_CMPL_TMOUT_10MS;
3001 goto out;
3002 }
3003
3004 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3005 sc->sc_pcixe_capoff + PCIE_DCSR2);
3006 ctrl2 |= WM_PCIE_DCSR2_16MS;
3007 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3008 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3009
3010 out:
3011 /* Disable completion timeout resend */
3012 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3013
3014 CSR_WRITE(sc, WMREG_GCR, gcr);
3015 }
3016
3017 void
3018 wm_get_auto_rd_done(struct wm_softc *sc)
3019 {
3020 int i;
3021
3022 /* wait for eeprom to reload */
3023 switch (sc->sc_type) {
3024 case WM_T_82571:
3025 case WM_T_82572:
3026 case WM_T_82573:
3027 case WM_T_82574:
3028 case WM_T_82583:
3029 case WM_T_82575:
3030 case WM_T_82576:
3031 case WM_T_82580:
3032 case WM_T_82580ER:
3033 case WM_T_I350:
3034 case WM_T_I354:
3035 case WM_T_I210:
3036 case WM_T_I211:
3037 case WM_T_80003:
3038 case WM_T_ICH8:
3039 case WM_T_ICH9:
3040 for (i = 0; i < 10; i++) {
3041 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3042 break;
3043 delay(1000);
3044 }
3045 if (i == 10) {
3046 log(LOG_ERR, "%s: auto read from eeprom failed to "
3047 "complete\n", device_xname(sc->sc_dev));
3048 }
3049 break;
3050 default:
3051 break;
3052 }
3053 }
3054
3055 void
3056 wm_lan_init_done(struct wm_softc *sc)
3057 {
3058 uint32_t reg = 0;
3059 int i;
3060
3061 /* wait for eeprom to reload */
3062 switch (sc->sc_type) {
3063 case WM_T_ICH10:
3064 case WM_T_PCH:
3065 case WM_T_PCH2:
3066 case WM_T_PCH_LPT:
3067 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3068 reg = CSR_READ(sc, WMREG_STATUS);
3069 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3070 break;
3071 delay(100);
3072 }
3073 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3074 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3075 "complete\n", device_xname(sc->sc_dev), __func__);
3076 }
3077 break;
3078 default:
3079 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3080 __func__);
3081 break;
3082 }
3083
3084 reg &= ~STATUS_LAN_INIT_DONE;
3085 CSR_WRITE(sc, WMREG_STATUS, reg);
3086 }
3087
3088 void
3089 wm_get_cfg_done(struct wm_softc *sc)
3090 {
3091 int mask;
3092 uint32_t reg;
3093 int i;
3094
3095 /* wait for eeprom to reload */
3096 switch (sc->sc_type) {
3097 case WM_T_82542_2_0:
3098 case WM_T_82542_2_1:
3099 /* null */
3100 break;
3101 case WM_T_82543:
3102 case WM_T_82544:
3103 case WM_T_82540:
3104 case WM_T_82545:
3105 case WM_T_82545_3:
3106 case WM_T_82546:
3107 case WM_T_82546_3:
3108 case WM_T_82541:
3109 case WM_T_82541_2:
3110 case WM_T_82547:
3111 case WM_T_82547_2:
3112 case WM_T_82573:
3113 case WM_T_82574:
3114 case WM_T_82583:
3115 /* generic */
3116 delay(10*1000);
3117 break;
3118 case WM_T_80003:
3119 case WM_T_82571:
3120 case WM_T_82572:
3121 case WM_T_82575:
3122 case WM_T_82576:
3123 case WM_T_82580:
3124 case WM_T_82580ER:
3125 case WM_T_I350:
3126 case WM_T_I354:
3127 case WM_T_I210:
3128 case WM_T_I211:
3129 if (sc->sc_type == WM_T_82571) {
3130 /* Only 82571 shares port 0 */
3131 mask = EEMNGCTL_CFGDONE_0;
3132 } else
3133 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3134 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3135 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3136 break;
3137 delay(1000);
3138 }
3139 if (i >= WM_PHY_CFG_TIMEOUT) {
3140 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3141 device_xname(sc->sc_dev), __func__));
3142 }
3143 break;
3144 case WM_T_ICH8:
3145 case WM_T_ICH9:
3146 case WM_T_ICH10:
3147 case WM_T_PCH:
3148 case WM_T_PCH2:
3149 case WM_T_PCH_LPT:
3150 delay(10*1000);
3151 if (sc->sc_type >= WM_T_ICH10)
3152 wm_lan_init_done(sc);
3153 else
3154 wm_get_auto_rd_done(sc);
3155
3156 reg = CSR_READ(sc, WMREG_STATUS);
3157 if ((reg & STATUS_PHYRA) != 0)
3158 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3159 break;
3160 default:
3161 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3162 __func__);
3163 break;
3164 }
3165 }
3166
3167 /*
3168 * wm_reset:
3169 *
3170 * Reset the i82542 chip.
3171 */
3172 static void
3173 wm_reset(struct wm_softc *sc)
3174 {
3175 int phy_reset = 0;
3176 int error = 0;
3177 uint32_t reg, mask;
3178
3179 /*
3180 * Allocate on-chip memory according to the MTU size.
3181 * The Packet Buffer Allocation register must be written
3182 * before the chip is reset.
3183 */
3184 switch (sc->sc_type) {
3185 case WM_T_82547:
3186 case WM_T_82547_2:
3187 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3188 PBA_22K : PBA_30K;
3189 sc->sc_txfifo_head = 0;
3190 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3191 sc->sc_txfifo_size =
3192 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3193 sc->sc_txfifo_stall = 0;
3194 break;
3195 case WM_T_82571:
3196 case WM_T_82572:
3197 case WM_T_82575: /* XXX need special handing for jumbo frames */
3198 case WM_T_I350:
3199 case WM_T_I354:
3200 case WM_T_80003:
3201 sc->sc_pba = PBA_32K;
3202 break;
3203 case WM_T_82580:
3204 case WM_T_82580ER:
3205 sc->sc_pba = PBA_35K;
3206 break;
3207 case WM_T_I210:
3208 case WM_T_I211:
3209 sc->sc_pba = PBA_34K;
3210 break;
3211 case WM_T_82576:
3212 sc->sc_pba = PBA_64K;
3213 break;
3214 case WM_T_82573:
3215 sc->sc_pba = PBA_12K;
3216 break;
3217 case WM_T_82574:
3218 case WM_T_82583:
3219 sc->sc_pba = PBA_20K;
3220 break;
3221 case WM_T_ICH8:
3222 sc->sc_pba = PBA_8K;
3223 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3224 break;
3225 case WM_T_ICH9:
3226 case WM_T_ICH10:
3227 sc->sc_pba = PBA_10K;
3228 break;
3229 case WM_T_PCH:
3230 case WM_T_PCH2:
3231 case WM_T_PCH_LPT:
3232 sc->sc_pba = PBA_26K;
3233 break;
3234 default:
3235 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3236 PBA_40K : PBA_48K;
3237 break;
3238 }
3239 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3240
3241 /* Prevent the PCI-E bus from sticking */
3242 if (sc->sc_flags & WM_F_PCIE) {
3243 int timeout = 800;
3244
3245 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3246 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3247
3248 while (timeout--) {
3249 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3250 == 0)
3251 break;
3252 delay(100);
3253 }
3254 }
3255
3256 /* Set the completion timeout for interface */
3257 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3258 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3259 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3260 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3261 wm_set_pcie_completion_timeout(sc);
3262
3263 /* Clear interrupt */
3264 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3265
3266 /* Stop the transmit and receive processes. */
3267 CSR_WRITE(sc, WMREG_RCTL, 0);
3268 sc->sc_rctl &= ~RCTL_EN;
3269 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3270 CSR_WRITE_FLUSH(sc);
3271
3272 /* XXX set_tbi_sbp_82543() */
3273
3274 delay(10*1000);
3275
3276 /* Must acquire the MDIO ownership before MAC reset */
3277 switch (sc->sc_type) {
3278 case WM_T_82573:
3279 case WM_T_82574:
3280 case WM_T_82583:
3281 error = wm_get_hw_semaphore_82573(sc);
3282 break;
3283 default:
3284 break;
3285 }
3286
3287 /*
3288 * 82541 Errata 29? & 82547 Errata 28?
3289 * See also the description about PHY_RST bit in CTRL register
3290 * in 8254x_GBe_SDM.pdf.
3291 */
3292 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3293 CSR_WRITE(sc, WMREG_CTRL,
3294 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3295 CSR_WRITE_FLUSH(sc);
3296 delay(5000);
3297 }
3298
3299 switch (sc->sc_type) {
3300 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3301 case WM_T_82541:
3302 case WM_T_82541_2:
3303 case WM_T_82547:
3304 case WM_T_82547_2:
3305 /*
3306 * On some chipsets, a reset through a memory-mapped write
3307 * cycle can cause the chip to reset before completing the
3308 * write cycle. This causes major headache that can be
3309 * avoided by issuing the reset via indirect register writes
3310 * through I/O space.
3311 *
3312 * So, if we successfully mapped the I/O BAR at attach time,
3313 * use that. Otherwise, try our luck with a memory-mapped
3314 * reset.
3315 */
3316 if (sc->sc_flags & WM_F_IOH_VALID)
3317 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3318 else
3319 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3320 break;
3321 case WM_T_82545_3:
3322 case WM_T_82546_3:
3323 /* Use the shadow control register on these chips. */
3324 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3325 break;
3326 case WM_T_80003:
3327 mask = swfwphysem[sc->sc_funcid];
3328 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3329 wm_get_swfw_semaphore(sc, mask);
3330 CSR_WRITE(sc, WMREG_CTRL, reg);
3331 wm_put_swfw_semaphore(sc, mask);
3332 break;
3333 case WM_T_ICH8:
3334 case WM_T_ICH9:
3335 case WM_T_ICH10:
3336 case WM_T_PCH:
3337 case WM_T_PCH2:
3338 case WM_T_PCH_LPT:
3339 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3340 if (wm_check_reset_block(sc) == 0) {
3341 /*
3342 * Gate automatic PHY configuration by hardware on
3343 * non-managed 82579
3344 */
3345 if ((sc->sc_type == WM_T_PCH2)
3346 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3347 != 0))
3348 wm_gate_hw_phy_config_ich8lan(sc, 1);
3349
3350
3351 reg |= CTRL_PHY_RESET;
3352 phy_reset = 1;
3353 }
3354 wm_get_swfwhw_semaphore(sc);
3355 CSR_WRITE(sc, WMREG_CTRL, reg);
3356 /* Don't insert a completion barrier when reset */
3357 delay(20*1000);
3358 wm_put_swfwhw_semaphore(sc);
3359 break;
3360 case WM_T_82542_2_0:
3361 case WM_T_82542_2_1:
3362 case WM_T_82543:
3363 case WM_T_82540:
3364 case WM_T_82545:
3365 case WM_T_82546:
3366 case WM_T_82571:
3367 case WM_T_82572:
3368 case WM_T_82573:
3369 case WM_T_82574:
3370 case WM_T_82575:
3371 case WM_T_82576:
3372 case WM_T_82580:
3373 case WM_T_82580ER:
3374 case WM_T_82583:
3375 case WM_T_I350:
3376 case WM_T_I354:
3377 case WM_T_I210:
3378 case WM_T_I211:
3379 default:
3380 /* Everything else can safely use the documented method. */
3381 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3382 break;
3383 }
3384
3385 /* Must release the MDIO ownership after MAC reset */
3386 switch (sc->sc_type) {
3387 case WM_T_82573:
3388 case WM_T_82574:
3389 case WM_T_82583:
3390 if (error == 0)
3391 wm_put_hw_semaphore_82573(sc);
3392 break;
3393 default:
3394 break;
3395 }
3396
3397 if (phy_reset != 0)
3398 wm_get_cfg_done(sc);
3399
3400 /* reload EEPROM */
3401 switch (sc->sc_type) {
3402 case WM_T_82542_2_0:
3403 case WM_T_82542_2_1:
3404 case WM_T_82543:
3405 case WM_T_82544:
3406 delay(10);
3407 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3408 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3409 CSR_WRITE_FLUSH(sc);
3410 delay(2000);
3411 break;
3412 case WM_T_82540:
3413 case WM_T_82545:
3414 case WM_T_82545_3:
3415 case WM_T_82546:
3416 case WM_T_82546_3:
3417 delay(5*1000);
3418 /* XXX Disable HW ARPs on ASF enabled adapters */
3419 break;
3420 case WM_T_82541:
3421 case WM_T_82541_2:
3422 case WM_T_82547:
3423 case WM_T_82547_2:
3424 delay(20000);
3425 /* XXX Disable HW ARPs on ASF enabled adapters */
3426 break;
3427 case WM_T_82571:
3428 case WM_T_82572:
3429 case WM_T_82573:
3430 case WM_T_82574:
3431 case WM_T_82583:
3432 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3433 delay(10);
3434 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3435 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3436 CSR_WRITE_FLUSH(sc);
3437 }
3438 /* check EECD_EE_AUTORD */
3439 wm_get_auto_rd_done(sc);
3440 /*
3441 * Phy configuration from NVM just starts after EECD_AUTO_RD
3442 * is set.
3443 */
3444 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3445 || (sc->sc_type == WM_T_82583))
3446 delay(25*1000);
3447 break;
3448 case WM_T_82575:
3449 case WM_T_82576:
3450 case WM_T_82580:
3451 case WM_T_82580ER:
3452 case WM_T_I350:
3453 case WM_T_I354:
3454 case WM_T_I210:
3455 case WM_T_I211:
3456 case WM_T_80003:
3457 /* check EECD_EE_AUTORD */
3458 wm_get_auto_rd_done(sc);
3459 break;
3460 case WM_T_ICH8:
3461 case WM_T_ICH9:
3462 case WM_T_ICH10:
3463 case WM_T_PCH:
3464 case WM_T_PCH2:
3465 case WM_T_PCH_LPT:
3466 break;
3467 default:
3468 panic("%s: unknown type\n", __func__);
3469 }
3470
3471 /* Check whether EEPROM is present or not */
3472 switch (sc->sc_type) {
3473 case WM_T_82575:
3474 case WM_T_82576:
3475 #if 0 /* XXX */
3476 case WM_T_82580:
3477 case WM_T_82580ER:
3478 #endif
3479 case WM_T_I350:
3480 case WM_T_I354:
3481 case WM_T_ICH8:
3482 case WM_T_ICH9:
3483 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3484 /* Not found */
3485 sc->sc_flags |= WM_F_EEPROM_INVALID;
3486 if ((sc->sc_type == WM_T_82575)
3487 || (sc->sc_type == WM_T_82576)
3488 || (sc->sc_type == WM_T_82580)
3489 || (sc->sc_type == WM_T_82580ER)
3490 || (sc->sc_type == WM_T_I350)
3491 || (sc->sc_type == WM_T_I354))
3492 wm_reset_init_script_82575(sc);
3493 }
3494 break;
3495 default:
3496 break;
3497 }
3498
3499 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3500 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3501 /* clear global device reset status bit */
3502 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3503 }
3504
3505 /* Clear any pending interrupt events. */
3506 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3507 reg = CSR_READ(sc, WMREG_ICR);
3508
3509 /* reload sc_ctrl */
3510 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3511
3512 if (sc->sc_type == WM_T_I350)
3513 wm_set_eee_i350(sc);
3514
3515 /* dummy read from WUC */
3516 if (sc->sc_type == WM_T_PCH)
3517 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3518 /*
3519 * For PCH, this write will make sure that any noise will be detected
3520 * as a CRC error and be dropped rather than show up as a bad packet
3521 * to the DMA engine
3522 */
3523 if (sc->sc_type == WM_T_PCH)
3524 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3525
3526 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3527 CSR_WRITE(sc, WMREG_WUC, 0);
3528
3529 /* XXX need special handling for 82580 */
3530 }
3531
3532 /*
3533 * wm_add_rxbuf:
3534 *
3535 * Add a receive buffer to the indiciated descriptor.
3536 */
3537 static int
3538 wm_add_rxbuf(struct wm_softc *sc, int idx)
3539 {
3540 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3541 struct mbuf *m;
3542 int error;
3543
3544 KASSERT(WM_RX_LOCKED(sc));
3545
3546 MGETHDR(m, M_DONTWAIT, MT_DATA);
3547 if (m == NULL)
3548 return ENOBUFS;
3549
3550 MCLGET(m, M_DONTWAIT);
3551 if ((m->m_flags & M_EXT) == 0) {
3552 m_freem(m);
3553 return ENOBUFS;
3554 }
3555
3556 if (rxs->rxs_mbuf != NULL)
3557 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3558
3559 rxs->rxs_mbuf = m;
3560
3561 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3562 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3563 BUS_DMA_READ|BUS_DMA_NOWAIT);
3564 if (error) {
3565 /* XXX XXX XXX */
3566 aprint_error_dev(sc->sc_dev,
3567 "unable to load rx DMA map %d, error = %d\n",
3568 idx, error);
3569 panic("wm_add_rxbuf");
3570 }
3571
3572 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3573 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3574
3575 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3576 if ((sc->sc_rctl & RCTL_EN) != 0)
3577 WM_INIT_RXDESC(sc, idx);
3578 } else
3579 WM_INIT_RXDESC(sc, idx);
3580
3581 return 0;
3582 }
3583
3584 /*
3585 * wm_rxdrain:
3586 *
3587 * Drain the receive queue.
3588 */
3589 static void
3590 wm_rxdrain(struct wm_softc *sc)
3591 {
3592 struct wm_rxsoft *rxs;
3593 int i;
3594
3595 KASSERT(WM_RX_LOCKED(sc));
3596
3597 for (i = 0; i < WM_NRXDESC; i++) {
3598 rxs = &sc->sc_rxsoft[i];
3599 if (rxs->rxs_mbuf != NULL) {
3600 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3601 m_freem(rxs->rxs_mbuf);
3602 rxs->rxs_mbuf = NULL;
3603 }
3604 }
3605 }
3606
3607 /*
3608 * wm_init: [ifnet interface function]
3609 *
3610 * Initialize the interface.
3611 */
3612 static int
3613 wm_init(struct ifnet *ifp)
3614 {
3615 struct wm_softc *sc = ifp->if_softc;
3616 int ret;
3617
3618 WM_BOTH_LOCK(sc);
3619 ret = wm_init_locked(ifp);
3620 WM_BOTH_UNLOCK(sc);
3621
3622 return ret;
3623 }
3624
3625 static int
3626 wm_init_locked(struct ifnet *ifp)
3627 {
3628 struct wm_softc *sc = ifp->if_softc;
3629 struct wm_rxsoft *rxs;
3630 int i, j, trynum, error = 0;
3631 uint32_t reg;
3632
3633 KASSERT(WM_BOTH_LOCKED(sc));
3634 /*
3635 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3636 * There is a small but measurable benefit to avoiding the adjusment
3637 * of the descriptor so that the headers are aligned, for normal mtu,
3638 * on such platforms. One possibility is that the DMA itself is
3639 * slightly more efficient if the front of the entire packet (instead
3640 * of the front of the headers) is aligned.
3641 *
3642 * Note we must always set align_tweak to 0 if we are using
3643 * jumbo frames.
3644 */
3645 #ifdef __NO_STRICT_ALIGNMENT
3646 sc->sc_align_tweak = 0;
3647 #else
3648 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3649 sc->sc_align_tweak = 0;
3650 else
3651 sc->sc_align_tweak = 2;
3652 #endif /* __NO_STRICT_ALIGNMENT */
3653
3654 /* Cancel any pending I/O. */
3655 wm_stop_locked(ifp, 0);
3656
3657 /* update statistics before reset */
3658 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3659 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3660
3661 /* Reset the chip to a known state. */
3662 wm_reset(sc);
3663
3664 switch (sc->sc_type) {
3665 case WM_T_82571:
3666 case WM_T_82572:
3667 case WM_T_82573:
3668 case WM_T_82574:
3669 case WM_T_82583:
3670 case WM_T_80003:
3671 case WM_T_ICH8:
3672 case WM_T_ICH9:
3673 case WM_T_ICH10:
3674 case WM_T_PCH:
3675 case WM_T_PCH2:
3676 case WM_T_PCH_LPT:
3677 if (wm_check_mng_mode(sc) != 0)
3678 wm_get_hw_control(sc);
3679 break;
3680 default:
3681 break;
3682 }
3683
3684 /* Reset the PHY. */
3685 if (sc->sc_flags & WM_F_HAS_MII)
3686 wm_gmii_reset(sc);
3687
3688 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3689 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3690 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3691 || (sc->sc_type == WM_T_PCH_LPT))
3692 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3693
3694 /* Initialize the transmit descriptor ring. */
3695 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3696 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3697 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3698 sc->sc_txfree = WM_NTXDESC(sc);
3699 sc->sc_txnext = 0;
3700
3701 if (sc->sc_type < WM_T_82543) {
3702 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3703 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3704 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3705 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3706 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3707 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3708 } else {
3709 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3710 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3711 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3712 CSR_WRITE(sc, WMREG_TDH, 0);
3713 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3714 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3715
3716 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3717 /*
3718 * Don't write TDT before TCTL.EN is set.
3719 * See the document.
3720 */
3721 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3722 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3723 | TXDCTL_WTHRESH(0));
3724 else {
3725 CSR_WRITE(sc, WMREG_TDT, 0);
3726 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3727 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3728 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3729 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3730 }
3731 }
3732 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3733 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3734
3735 /* Initialize the transmit job descriptors. */
3736 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3737 sc->sc_txsoft[i].txs_mbuf = NULL;
3738 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3739 sc->sc_txsnext = 0;
3740 sc->sc_txsdirty = 0;
3741
3742 /*
3743 * Initialize the receive descriptor and receive job
3744 * descriptor rings.
3745 */
3746 if (sc->sc_type < WM_T_82543) {
3747 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3748 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3749 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3750 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3751 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3752 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3753
3754 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3755 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3756 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3757 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3758 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3759 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3760 } else {
3761 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3762 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3763 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3764 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3765 CSR_WRITE(sc, WMREG_EITR(0), 450);
3766 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3767 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3768 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3769 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3770 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3771 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3772 | RXDCTL_WTHRESH(1));
3773 } else {
3774 CSR_WRITE(sc, WMREG_RDH, 0);
3775 CSR_WRITE(sc, WMREG_RDT, 0);
3776 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3777 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3778 }
3779 }
3780 for (i = 0; i < WM_NRXDESC; i++) {
3781 rxs = &sc->sc_rxsoft[i];
3782 if (rxs->rxs_mbuf == NULL) {
3783 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3784 log(LOG_ERR, "%s: unable to allocate or map "
3785 "rx buffer %d, error = %d\n",
3786 device_xname(sc->sc_dev), i, error);
3787 /*
3788 * XXX Should attempt to run with fewer receive
3789 * XXX buffers instead of just failing.
3790 */
3791 wm_rxdrain(sc);
3792 goto out;
3793 }
3794 } else {
3795 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3796 WM_INIT_RXDESC(sc, i);
3797 /*
3798 * For 82575 and newer device, the RX descriptors
3799 * must be initialized after the setting of RCTL.EN in
3800 * wm_set_filter()
3801 */
3802 }
3803 }
3804 sc->sc_rxptr = 0;
3805 sc->sc_rxdiscard = 0;
3806 WM_RXCHAIN_RESET(sc);
3807
3808 /*
3809 * Clear out the VLAN table -- we don't use it (yet).
3810 */
3811 CSR_WRITE(sc, WMREG_VET, 0);
3812 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3813 trynum = 10; /* Due to hw errata */
3814 else
3815 trynum = 1;
3816 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3817 for (j = 0; j < trynum; j++)
3818 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3819
3820 /*
3821 * Set up flow-control parameters.
3822 *
3823 * XXX Values could probably stand some tuning.
3824 */
3825 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3826 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3827 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3828 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3829 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3830 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3831 }
3832
3833 sc->sc_fcrtl = FCRTL_DFLT;
3834 if (sc->sc_type < WM_T_82543) {
3835 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3836 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3837 } else {
3838 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3839 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3840 }
3841
3842 if (sc->sc_type == WM_T_80003)
3843 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3844 else
3845 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3846
3847 /* Writes the control register. */
3848 wm_set_vlan(sc);
3849
3850 if (sc->sc_flags & WM_F_HAS_MII) {
3851 int val;
3852
3853 switch (sc->sc_type) {
3854 case WM_T_80003:
3855 case WM_T_ICH8:
3856 case WM_T_ICH9:
3857 case WM_T_ICH10:
3858 case WM_T_PCH:
3859 case WM_T_PCH2:
3860 case WM_T_PCH_LPT:
3861 /*
3862 * Set the mac to wait the maximum time between each
3863 * iteration and increase the max iterations when
3864 * polling the phy; this fixes erroneous timeouts at
3865 * 10Mbps.
3866 */
3867 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3868 0xFFFF);
3869 val = wm_kmrn_readreg(sc,
3870 KUMCTRLSTA_OFFSET_INB_PARAM);
3871 val |= 0x3F;
3872 wm_kmrn_writereg(sc,
3873 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3874 break;
3875 default:
3876 break;
3877 }
3878
3879 if (sc->sc_type == WM_T_80003) {
3880 val = CSR_READ(sc, WMREG_CTRL_EXT);
3881 val &= ~CTRL_EXT_LINK_MODE_MASK;
3882 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3883
3884 /* Bypass RX and TX FIFO's */
3885 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3886 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3887 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3888 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3889 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3890 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3891 }
3892 }
3893 #if 0
3894 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3895 #endif
3896
3897 /* Set up checksum offload parameters. */
3898 reg = CSR_READ(sc, WMREG_RXCSUM);
3899 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3900 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3901 reg |= RXCSUM_IPOFL;
3902 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3903 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3904 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3905 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3906 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3907
3908 /* Set up the interrupt registers. */
3909 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3910 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3911 ICR_RXO | ICR_RXT0;
3912 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3913
3914 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3915 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3916 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3917 reg = CSR_READ(sc, WMREG_KABGTXD);
3918 reg |= KABGTXD_BGSQLBIAS;
3919 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3920 }
3921
3922 /* Set up the inter-packet gap. */
3923 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3924
3925 if (sc->sc_type >= WM_T_82543) {
3926 /*
3927 * Set up the interrupt throttling register (units of 256ns)
3928 * Note that a footnote in Intel's documentation says this
3929 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3930 * or 10Mbit mode. Empirically, it appears to be the case
3931 * that that is also true for the 1024ns units of the other
3932 * interrupt-related timer registers -- so, really, we ought
3933 * to divide this value by 4 when the link speed is low.
3934 *
3935 * XXX implement this division at link speed change!
3936 */
3937
3938 /*
3939 * For N interrupts/sec, set this value to:
3940 * 1000000000 / (N * 256). Note that we set the
3941 * absolute and packet timer values to this value
3942 * divided by 4 to get "simple timer" behavior.
3943 */
3944
3945 sc->sc_itr = 1500; /* 2604 ints/sec */
3946 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3947 }
3948
3949 /* Set the VLAN ethernetype. */
3950 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3951
3952 /*
3953 * Set up the transmit control register; we start out with
3954 * a collision distance suitable for FDX, but update it whe
3955 * we resolve the media type.
3956 */
3957 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3958 | TCTL_CT(TX_COLLISION_THRESHOLD)
3959 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3960 if (sc->sc_type >= WM_T_82571)
3961 sc->sc_tctl |= TCTL_MULR;
3962 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3963
3964 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3965 /* Write TDT after TCTL.EN is set. See the document. */
3966 CSR_WRITE(sc, WMREG_TDT, 0);
3967 }
3968
3969 if (sc->sc_type == WM_T_80003) {
3970 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3971 reg &= ~TCTL_EXT_GCEX_MASK;
3972 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3973 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3974 }
3975
3976 /* Set the media. */
3977 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3978 goto out;
3979
3980 /* Configure for OS presence */
3981 wm_init_manageability(sc);
3982
3983 /*
3984 * Set up the receive control register; we actually program
3985 * the register when we set the receive filter. Use multicast
3986 * address offset type 0.
3987 *
3988 * Only the i82544 has the ability to strip the incoming
3989 * CRC, so we don't enable that feature.
3990 */
3991 sc->sc_mchash_type = 0;
3992 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3993 | RCTL_MO(sc->sc_mchash_type);
3994
3995 /*
3996 * The I350 has a bug where it always strips the CRC whether
3997 * asked to or not. So ask for stripped CRC here and cope in rxeof
3998 */
3999 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4000 || (sc->sc_type == WM_T_I210))
4001 sc->sc_rctl |= RCTL_SECRC;
4002
4003 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4004 && (ifp->if_mtu > ETHERMTU)) {
4005 sc->sc_rctl |= RCTL_LPE;
4006 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4007 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4008 }
4009
4010 if (MCLBYTES == 2048) {
4011 sc->sc_rctl |= RCTL_2k;
4012 } else {
4013 if (sc->sc_type >= WM_T_82543) {
4014 switch (MCLBYTES) {
4015 case 4096:
4016 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4017 break;
4018 case 8192:
4019 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4020 break;
4021 case 16384:
4022 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4023 break;
4024 default:
4025 panic("wm_init: MCLBYTES %d unsupported",
4026 MCLBYTES);
4027 break;
4028 }
4029 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4030 }
4031
4032 /* Set the receive filter. */
4033 wm_set_filter(sc);
4034
4035 /* Enable ECC */
4036 switch (sc->sc_type) {
4037 case WM_T_82571:
4038 reg = CSR_READ(sc, WMREG_PBA_ECC);
4039 reg |= PBA_ECC_CORR_EN;
4040 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4041 break;
4042 case WM_T_PCH_LPT:
4043 reg = CSR_READ(sc, WMREG_PBECCSTS);
4044 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4045 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4046
4047 reg = CSR_READ(sc, WMREG_CTRL);
4048 reg |= CTRL_MEHE;
4049 CSR_WRITE(sc, WMREG_CTRL, reg);
4050 break;
4051 default:
4052 break;
4053 }
4054
4055 /* On 575 and later set RDT only if RX enabled */
4056 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4057 for (i = 0; i < WM_NRXDESC; i++)
4058 WM_INIT_RXDESC(sc, i);
4059
4060 sc->sc_stopping = false;
4061
4062 /* Start the one second link check clock. */
4063 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4064
4065 /* ...all done! */
4066 ifp->if_flags |= IFF_RUNNING;
4067 ifp->if_flags &= ~IFF_OACTIVE;
4068
4069 out:
4070 sc->sc_if_flags = ifp->if_flags;
4071 if (error)
4072 log(LOG_ERR, "%s: interface not running\n",
4073 device_xname(sc->sc_dev));
4074 return error;
4075 }
4076
4077 /*
4078 * wm_stop: [ifnet interface function]
4079 *
4080 * Stop transmission on the interface.
4081 */
4082 static void
4083 wm_stop(struct ifnet *ifp, int disable)
4084 {
4085 struct wm_softc *sc = ifp->if_softc;
4086
4087 WM_BOTH_LOCK(sc);
4088 wm_stop_locked(ifp, disable);
4089 WM_BOTH_UNLOCK(sc);
4090 }
4091
4092 static void
4093 wm_stop_locked(struct ifnet *ifp, int disable)
4094 {
4095 struct wm_softc *sc = ifp->if_softc;
4096 struct wm_txsoft *txs;
4097 int i;
4098
4099 KASSERT(WM_BOTH_LOCKED(sc));
4100
4101 sc->sc_stopping = true;
4102
4103 /* Stop the one second clock. */
4104 callout_stop(&sc->sc_tick_ch);
4105
4106 /* Stop the 82547 Tx FIFO stall check timer. */
4107 if (sc->sc_type == WM_T_82547)
4108 callout_stop(&sc->sc_txfifo_ch);
4109
4110 if (sc->sc_flags & WM_F_HAS_MII) {
4111 /* Down the MII. */
4112 mii_down(&sc->sc_mii);
4113 } else {
4114 #if 0
4115 /* Should we clear PHY's status properly? */
4116 wm_reset(sc);
4117 #endif
4118 }
4119
4120 /* Stop the transmit and receive processes. */
4121 CSR_WRITE(sc, WMREG_TCTL, 0);
4122 CSR_WRITE(sc, WMREG_RCTL, 0);
4123 sc->sc_rctl &= ~RCTL_EN;
4124
4125 /*
4126 * Clear the interrupt mask to ensure the device cannot assert its
4127 * interrupt line.
4128 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4129 * any currently pending or shared interrupt.
4130 */
4131 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4132 sc->sc_icr = 0;
4133
4134 /* Release any queued transmit buffers. */
4135 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4136 txs = &sc->sc_txsoft[i];
4137 if (txs->txs_mbuf != NULL) {
4138 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4139 m_freem(txs->txs_mbuf);
4140 txs->txs_mbuf = NULL;
4141 }
4142 }
4143
4144 /* Mark the interface as down and cancel the watchdog timer. */
4145 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4146 ifp->if_timer = 0;
4147
4148 if (disable)
4149 wm_rxdrain(sc);
4150
4151 #if 0 /* notyet */
4152 if (sc->sc_type >= WM_T_82544)
4153 CSR_WRITE(sc, WMREG_WUC, 0);
4154 #endif
4155 }
4156
4157 /*
4158 * wm_tx_offload:
4159 *
4160 * Set up TCP/IP checksumming parameters for the
4161 * specified packet.
4162 */
4163 static int
4164 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4165 uint8_t *fieldsp)
4166 {
4167 struct mbuf *m0 = txs->txs_mbuf;
4168 struct livengood_tcpip_ctxdesc *t;
4169 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4170 uint32_t ipcse;
4171 struct ether_header *eh;
4172 int offset, iphl;
4173 uint8_t fields;
4174
4175 /*
4176 * XXX It would be nice if the mbuf pkthdr had offset
4177 * fields for the protocol headers.
4178 */
4179
4180 eh = mtod(m0, struct ether_header *);
4181 switch (htons(eh->ether_type)) {
4182 case ETHERTYPE_IP:
4183 case ETHERTYPE_IPV6:
4184 offset = ETHER_HDR_LEN;
4185 break;
4186
4187 case ETHERTYPE_VLAN:
4188 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4189 break;
4190
4191 default:
4192 /*
4193 * Don't support this protocol or encapsulation.
4194 */
4195 *fieldsp = 0;
4196 *cmdp = 0;
4197 return 0;
4198 }
4199
4200 if ((m0->m_pkthdr.csum_flags &
4201 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4202 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4203 } else {
4204 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4205 }
4206 ipcse = offset + iphl - 1;
4207
4208 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4209 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4210 seg = 0;
4211 fields = 0;
4212
4213 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4214 int hlen = offset + iphl;
4215 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4216
4217 if (__predict_false(m0->m_len <
4218 (hlen + sizeof(struct tcphdr)))) {
4219 /*
4220 * TCP/IP headers are not in the first mbuf; we need
4221 * to do this the slow and painful way. Let's just
4222 * hope this doesn't happen very often.
4223 */
4224 struct tcphdr th;
4225
4226 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4227
4228 m_copydata(m0, hlen, sizeof(th), &th);
4229 if (v4) {
4230 struct ip ip;
4231
4232 m_copydata(m0, offset, sizeof(ip), &ip);
4233 ip.ip_len = 0;
4234 m_copyback(m0,
4235 offset + offsetof(struct ip, ip_len),
4236 sizeof(ip.ip_len), &ip.ip_len);
4237 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4238 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4239 } else {
4240 struct ip6_hdr ip6;
4241
4242 m_copydata(m0, offset, sizeof(ip6), &ip6);
4243 ip6.ip6_plen = 0;
4244 m_copyback(m0,
4245 offset + offsetof(struct ip6_hdr, ip6_plen),
4246 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4247 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4248 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4249 }
4250 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4251 sizeof(th.th_sum), &th.th_sum);
4252
4253 hlen += th.th_off << 2;
4254 } else {
4255 /*
4256 * TCP/IP headers are in the first mbuf; we can do
4257 * this the easy way.
4258 */
4259 struct tcphdr *th;
4260
4261 if (v4) {
4262 struct ip *ip =
4263 (void *)(mtod(m0, char *) + offset);
4264 th = (void *)(mtod(m0, char *) + hlen);
4265
4266 ip->ip_len = 0;
4267 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4268 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4269 } else {
4270 struct ip6_hdr *ip6 =
4271 (void *)(mtod(m0, char *) + offset);
4272 th = (void *)(mtod(m0, char *) + hlen);
4273
4274 ip6->ip6_plen = 0;
4275 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4276 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4277 }
4278 hlen += th->th_off << 2;
4279 }
4280
4281 if (v4) {
4282 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4283 cmdlen |= WTX_TCPIP_CMD_IP;
4284 } else {
4285 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4286 ipcse = 0;
4287 }
4288 cmd |= WTX_TCPIP_CMD_TSE;
4289 cmdlen |= WTX_TCPIP_CMD_TSE |
4290 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4291 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4292 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4293 }
4294
4295 /*
4296 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4297 * offload feature, if we load the context descriptor, we
4298 * MUST provide valid values for IPCSS and TUCSS fields.
4299 */
4300
4301 ipcs = WTX_TCPIP_IPCSS(offset) |
4302 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4303 WTX_TCPIP_IPCSE(ipcse);
4304 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4305 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4306 fields |= WTX_IXSM;
4307 }
4308
4309 offset += iphl;
4310
4311 if (m0->m_pkthdr.csum_flags &
4312 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4313 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4314 fields |= WTX_TXSM;
4315 tucs = WTX_TCPIP_TUCSS(offset) |
4316 WTX_TCPIP_TUCSO(offset +
4317 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4318 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4319 } else if ((m0->m_pkthdr.csum_flags &
4320 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4321 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4322 fields |= WTX_TXSM;
4323 tucs = WTX_TCPIP_TUCSS(offset) |
4324 WTX_TCPIP_TUCSO(offset +
4325 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4326 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4327 } else {
4328 /* Just initialize it to a valid TCP context. */
4329 tucs = WTX_TCPIP_TUCSS(offset) |
4330 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4331 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4332 }
4333
4334 /* Fill in the context descriptor. */
4335 t = (struct livengood_tcpip_ctxdesc *)
4336 &sc->sc_txdescs[sc->sc_txnext];
4337 t->tcpip_ipcs = htole32(ipcs);
4338 t->tcpip_tucs = htole32(tucs);
4339 t->tcpip_cmdlen = htole32(cmdlen);
4340 t->tcpip_seg = htole32(seg);
4341 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4342
4343 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4344 txs->txs_ndesc++;
4345
4346 *cmdp = cmd;
4347 *fieldsp = fields;
4348
4349 return 0;
4350 }
4351
4352 static void
4353 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4354 {
4355 struct mbuf *m;
4356 int i;
4357
4358 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4359 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4360 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4361 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4362 m->m_data, m->m_len, m->m_flags);
4363 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4364 i, i == 1 ? "" : "s");
4365 }
4366
4367 /*
4368 * wm_82547_txfifo_stall:
4369 *
4370 * Callout used to wait for the 82547 Tx FIFO to drain,
4371 * reset the FIFO pointers, and restart packet transmission.
4372 */
4373 static void
4374 wm_82547_txfifo_stall(void *arg)
4375 {
4376 struct wm_softc *sc = arg;
4377 #ifndef WM_MPSAFE
4378 int s;
4379
4380 s = splnet();
4381 #endif
4382 WM_TX_LOCK(sc);
4383
4384 if (sc->sc_stopping)
4385 goto out;
4386
4387 if (sc->sc_txfifo_stall) {
4388 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4389 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4390 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4391 /*
4392 * Packets have drained. Stop transmitter, reset
4393 * FIFO pointers, restart transmitter, and kick
4394 * the packet queue.
4395 */
4396 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4397 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4398 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4399 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4400 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4401 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4402 CSR_WRITE(sc, WMREG_TCTL, tctl);
4403 CSR_WRITE_FLUSH(sc);
4404
4405 sc->sc_txfifo_head = 0;
4406 sc->sc_txfifo_stall = 0;
4407 wm_start_locked(&sc->sc_ethercom.ec_if);
4408 } else {
4409 /*
4410 * Still waiting for packets to drain; try again in
4411 * another tick.
4412 */
4413 callout_schedule(&sc->sc_txfifo_ch, 1);
4414 }
4415 }
4416
4417 out:
4418 WM_TX_UNLOCK(sc);
4419 #ifndef WM_MPSAFE
4420 splx(s);
4421 #endif
4422 }
4423
4424 /*
4425 * wm_82547_txfifo_bugchk:
4426 *
4427 * Check for bug condition in the 82547 Tx FIFO. We need to
4428 * prevent enqueueing a packet that would wrap around the end
4429 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4430 *
4431 * We do this by checking the amount of space before the end
4432 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4433 * the Tx FIFO, wait for all remaining packets to drain, reset
4434 * the internal FIFO pointers to the beginning, and restart
4435 * transmission on the interface.
4436 */
4437 #define WM_FIFO_HDR 0x10
4438 #define WM_82547_PAD_LEN 0x3e0
4439 static int
4440 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4441 {
4442 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4443 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4444
4445 /* Just return if already stalled. */
4446 if (sc->sc_txfifo_stall)
4447 return 1;
4448
4449 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4450 /* Stall only occurs in half-duplex mode. */
4451 goto send_packet;
4452 }
4453
4454 if (len >= WM_82547_PAD_LEN + space) {
4455 sc->sc_txfifo_stall = 1;
4456 callout_schedule(&sc->sc_txfifo_ch, 1);
4457 return 1;
4458 }
4459
4460 send_packet:
4461 sc->sc_txfifo_head += len;
4462 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4463 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4464
4465 return 0;
4466 }
4467
4468 /*
4469 * wm_start: [ifnet interface function]
4470 *
4471 * Start packet transmission on the interface.
4472 */
4473 static void
4474 wm_start(struct ifnet *ifp)
4475 {
4476 struct wm_softc *sc = ifp->if_softc;
4477
4478 WM_TX_LOCK(sc);
4479 if (!sc->sc_stopping)
4480 wm_start_locked(ifp);
4481 WM_TX_UNLOCK(sc);
4482 }
4483
4484 static void
4485 wm_start_locked(struct ifnet *ifp)
4486 {
4487 struct wm_softc *sc = ifp->if_softc;
4488 struct mbuf *m0;
4489 struct m_tag *mtag;
4490 struct wm_txsoft *txs;
4491 bus_dmamap_t dmamap;
4492 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4493 bus_addr_t curaddr;
4494 bus_size_t seglen, curlen;
4495 uint32_t cksumcmd;
4496 uint8_t cksumfields;
4497
4498 KASSERT(WM_TX_LOCKED(sc));
4499
4500 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4501 return;
4502
4503 /* Remember the previous number of free descriptors. */
4504 ofree = sc->sc_txfree;
4505
4506 /*
4507 * Loop through the send queue, setting up transmit descriptors
4508 * until we drain the queue, or use up all available transmit
4509 * descriptors.
4510 */
4511 for (;;) {
4512 m0 = NULL;
4513
4514 /* Get a work queue entry. */
4515 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4516 wm_txintr(sc);
4517 if (sc->sc_txsfree == 0) {
4518 DPRINTF(WM_DEBUG_TX,
4519 ("%s: TX: no free job descriptors\n",
4520 device_xname(sc->sc_dev)));
4521 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4522 break;
4523 }
4524 }
4525
4526 /* Grab a packet off the queue. */
4527 IFQ_DEQUEUE(&ifp->if_snd, m0);
4528 if (m0 == NULL)
4529 break;
4530
4531 DPRINTF(WM_DEBUG_TX,
4532 ("%s: TX: have packet to transmit: %p\n",
4533 device_xname(sc->sc_dev), m0));
4534
4535 txs = &sc->sc_txsoft[sc->sc_txsnext];
4536 dmamap = txs->txs_dmamap;
4537
4538 use_tso = (m0->m_pkthdr.csum_flags &
4539 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4540
4541 /*
4542 * So says the Linux driver:
4543 * The controller does a simple calculation to make sure
4544 * there is enough room in the FIFO before initiating the
4545 * DMA for each buffer. The calc is:
4546 * 4 = ceil(buffer len / MSS)
4547 * To make sure we don't overrun the FIFO, adjust the max
4548 * buffer len if the MSS drops.
4549 */
4550 dmamap->dm_maxsegsz =
4551 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4552 ? m0->m_pkthdr.segsz << 2
4553 : WTX_MAX_LEN;
4554
4555 /*
4556 * Load the DMA map. If this fails, the packet either
4557 * didn't fit in the allotted number of segments, or we
4558 * were short on resources. For the too-many-segments
4559 * case, we simply report an error and drop the packet,
4560 * since we can't sanely copy a jumbo packet to a single
4561 * buffer.
4562 */
4563 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4564 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4565 if (error) {
4566 if (error == EFBIG) {
4567 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4568 log(LOG_ERR, "%s: Tx packet consumes too many "
4569 "DMA segments, dropping...\n",
4570 device_xname(sc->sc_dev));
4571 wm_dump_mbuf_chain(sc, m0);
4572 m_freem(m0);
4573 continue;
4574 }
4575 /* Short on resources, just stop for now. */
4576 DPRINTF(WM_DEBUG_TX,
4577 ("%s: TX: dmamap load failed: %d\n",
4578 device_xname(sc->sc_dev), error));
4579 break;
4580 }
4581
4582 segs_needed = dmamap->dm_nsegs;
4583 if (use_tso) {
4584 /* For sentinel descriptor; see below. */
4585 segs_needed++;
4586 }
4587
4588 /*
4589 * Ensure we have enough descriptors free to describe
4590 * the packet. Note, we always reserve one descriptor
4591 * at the end of the ring due to the semantics of the
4592 * TDT register, plus one more in the event we need
4593 * to load offload context.
4594 */
4595 if (segs_needed > sc->sc_txfree - 2) {
4596 /*
4597 * Not enough free descriptors to transmit this
4598 * packet. We haven't committed anything yet,
4599 * so just unload the DMA map, put the packet
4600 * pack on the queue, and punt. Notify the upper
4601 * layer that there are no more slots left.
4602 */
4603 DPRINTF(WM_DEBUG_TX,
4604 ("%s: TX: need %d (%d) descriptors, have %d\n",
4605 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4606 segs_needed, sc->sc_txfree - 1));
4607 ifp->if_flags |= IFF_OACTIVE;
4608 bus_dmamap_unload(sc->sc_dmat, dmamap);
4609 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4610 break;
4611 }
4612
4613 /*
4614 * Check for 82547 Tx FIFO bug. We need to do this
4615 * once we know we can transmit the packet, since we
4616 * do some internal FIFO space accounting here.
4617 */
4618 if (sc->sc_type == WM_T_82547 &&
4619 wm_82547_txfifo_bugchk(sc, m0)) {
4620 DPRINTF(WM_DEBUG_TX,
4621 ("%s: TX: 82547 Tx FIFO bug detected\n",
4622 device_xname(sc->sc_dev)));
4623 ifp->if_flags |= IFF_OACTIVE;
4624 bus_dmamap_unload(sc->sc_dmat, dmamap);
4625 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4626 break;
4627 }
4628
4629 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4630
4631 DPRINTF(WM_DEBUG_TX,
4632 ("%s: TX: packet has %d (%d) DMA segments\n",
4633 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4634
4635 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4636
4637 /*
4638 * Store a pointer to the packet so that we can free it
4639 * later.
4640 *
4641 * Initially, we consider the number of descriptors the
4642 * packet uses the number of DMA segments. This may be
4643 * incremented by 1 if we do checksum offload (a descriptor
4644 * is used to set the checksum context).
4645 */
4646 txs->txs_mbuf = m0;
4647 txs->txs_firstdesc = sc->sc_txnext;
4648 txs->txs_ndesc = segs_needed;
4649
4650 /* Set up offload parameters for this packet. */
4651 if (m0->m_pkthdr.csum_flags &
4652 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4653 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4654 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4655 if (wm_tx_offload(sc, txs, &cksumcmd,
4656 &cksumfields) != 0) {
4657 /* Error message already displayed. */
4658 bus_dmamap_unload(sc->sc_dmat, dmamap);
4659 continue;
4660 }
4661 } else {
4662 cksumcmd = 0;
4663 cksumfields = 0;
4664 }
4665
4666 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4667
4668 /* Sync the DMA map. */
4669 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4670 BUS_DMASYNC_PREWRITE);
4671
4672 /* Initialize the transmit descriptor. */
4673 for (nexttx = sc->sc_txnext, seg = 0;
4674 seg < dmamap->dm_nsegs; seg++) {
4675 for (seglen = dmamap->dm_segs[seg].ds_len,
4676 curaddr = dmamap->dm_segs[seg].ds_addr;
4677 seglen != 0;
4678 curaddr += curlen, seglen -= curlen,
4679 nexttx = WM_NEXTTX(sc, nexttx)) {
4680 curlen = seglen;
4681
4682 /*
4683 * So says the Linux driver:
4684 * Work around for premature descriptor
4685 * write-backs in TSO mode. Append a
4686 * 4-byte sentinel descriptor.
4687 */
4688 if (use_tso &&
4689 seg == dmamap->dm_nsegs - 1 &&
4690 curlen > 8)
4691 curlen -= 4;
4692
4693 wm_set_dma_addr(
4694 &sc->sc_txdescs[nexttx].wtx_addr,
4695 curaddr);
4696 sc->sc_txdescs[nexttx].wtx_cmdlen =
4697 htole32(cksumcmd | curlen);
4698 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4699 0;
4700 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4701 cksumfields;
4702 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4703 lasttx = nexttx;
4704
4705 DPRINTF(WM_DEBUG_TX,
4706 ("%s: TX: desc %d: low %#" PRIx64 ", "
4707 "len %#04zx\n",
4708 device_xname(sc->sc_dev), nexttx,
4709 (uint64_t)curaddr, curlen));
4710 }
4711 }
4712
4713 KASSERT(lasttx != -1);
4714
4715 /*
4716 * Set up the command byte on the last descriptor of
4717 * the packet. If we're in the interrupt delay window,
4718 * delay the interrupt.
4719 */
4720 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4721 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4722
4723 /*
4724 * If VLANs are enabled and the packet has a VLAN tag, set
4725 * up the descriptor to encapsulate the packet for us.
4726 *
4727 * This is only valid on the last descriptor of the packet.
4728 */
4729 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4730 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4731 htole32(WTX_CMD_VLE);
4732 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4733 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4734 }
4735
4736 txs->txs_lastdesc = lasttx;
4737
4738 DPRINTF(WM_DEBUG_TX,
4739 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4740 device_xname(sc->sc_dev),
4741 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4742
4743 /* Sync the descriptors we're using. */
4744 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4745 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4746
4747 /* Give the packet to the chip. */
4748 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4749
4750 DPRINTF(WM_DEBUG_TX,
4751 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4752
4753 DPRINTF(WM_DEBUG_TX,
4754 ("%s: TX: finished transmitting packet, job %d\n",
4755 device_xname(sc->sc_dev), sc->sc_txsnext));
4756
4757 /* Advance the tx pointer. */
4758 sc->sc_txfree -= txs->txs_ndesc;
4759 sc->sc_txnext = nexttx;
4760
4761 sc->sc_txsfree--;
4762 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4763
4764 /* Pass the packet to any BPF listeners. */
4765 bpf_mtap(ifp, m0);
4766 }
4767
4768 if (m0 != NULL) {
4769 ifp->if_flags |= IFF_OACTIVE;
4770 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4771 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4772 m_freem(m0);
4773 }
4774
4775 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4776 /* No more slots; notify upper layer. */
4777 ifp->if_flags |= IFF_OACTIVE;
4778 }
4779
4780 if (sc->sc_txfree != ofree) {
4781 /* Set a watchdog timer in case the chip flakes out. */
4782 ifp->if_timer = 5;
4783 }
4784 }
4785
4786 /*
4787 * wm_nq_tx_offload:
4788 *
4789 * Set up TCP/IP checksumming parameters for the
4790 * specified packet, for NEWQUEUE devices
4791 */
4792 static int
4793 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4794 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4795 {
4796 struct mbuf *m0 = txs->txs_mbuf;
4797 struct m_tag *mtag;
4798 uint32_t vl_len, mssidx, cmdc;
4799 struct ether_header *eh;
4800 int offset, iphl;
4801
4802 /*
4803 * XXX It would be nice if the mbuf pkthdr had offset
4804 * fields for the protocol headers.
4805 */
4806 *cmdlenp = 0;
4807 *fieldsp = 0;
4808
4809 eh = mtod(m0, struct ether_header *);
4810 switch (htons(eh->ether_type)) {
4811 case ETHERTYPE_IP:
4812 case ETHERTYPE_IPV6:
4813 offset = ETHER_HDR_LEN;
4814 break;
4815
4816 case ETHERTYPE_VLAN:
4817 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4818 break;
4819
4820 default:
4821 /* Don't support this protocol or encapsulation. */
4822 *do_csum = false;
4823 return 0;
4824 }
4825 *do_csum = true;
4826 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4827 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4828
4829 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4830 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4831
4832 if ((m0->m_pkthdr.csum_flags &
4833 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4834 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4835 } else {
4836 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4837 }
4838 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4839 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4840
4841 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4842 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4843 << NQTXC_VLLEN_VLAN_SHIFT);
4844 *cmdlenp |= NQTX_CMD_VLE;
4845 }
4846
4847 mssidx = 0;
4848
4849 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4850 int hlen = offset + iphl;
4851 int tcp_hlen;
4852 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4853
4854 if (__predict_false(m0->m_len <
4855 (hlen + sizeof(struct tcphdr)))) {
4856 /*
4857 * TCP/IP headers are not in the first mbuf; we need
4858 * to do this the slow and painful way. Let's just
4859 * hope this doesn't happen very often.
4860 */
4861 struct tcphdr th;
4862
4863 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4864
4865 m_copydata(m0, hlen, sizeof(th), &th);
4866 if (v4) {
4867 struct ip ip;
4868
4869 m_copydata(m0, offset, sizeof(ip), &ip);
4870 ip.ip_len = 0;
4871 m_copyback(m0,
4872 offset + offsetof(struct ip, ip_len),
4873 sizeof(ip.ip_len), &ip.ip_len);
4874 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4875 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4876 } else {
4877 struct ip6_hdr ip6;
4878
4879 m_copydata(m0, offset, sizeof(ip6), &ip6);
4880 ip6.ip6_plen = 0;
4881 m_copyback(m0,
4882 offset + offsetof(struct ip6_hdr, ip6_plen),
4883 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4884 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4885 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4886 }
4887 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4888 sizeof(th.th_sum), &th.th_sum);
4889
4890 tcp_hlen = th.th_off << 2;
4891 } else {
4892 /*
4893 * TCP/IP headers are in the first mbuf; we can do
4894 * this the easy way.
4895 */
4896 struct tcphdr *th;
4897
4898 if (v4) {
4899 struct ip *ip =
4900 (void *)(mtod(m0, char *) + offset);
4901 th = (void *)(mtod(m0, char *) + hlen);
4902
4903 ip->ip_len = 0;
4904 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4905 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4906 } else {
4907 struct ip6_hdr *ip6 =
4908 (void *)(mtod(m0, char *) + offset);
4909 th = (void *)(mtod(m0, char *) + hlen);
4910
4911 ip6->ip6_plen = 0;
4912 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4913 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4914 }
4915 tcp_hlen = th->th_off << 2;
4916 }
4917 hlen += tcp_hlen;
4918 *cmdlenp |= NQTX_CMD_TSE;
4919
4920 if (v4) {
4921 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4922 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4923 } else {
4924 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4925 *fieldsp |= NQTXD_FIELDS_TUXSM;
4926 }
4927 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4928 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4929 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4930 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4931 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4932 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4933 } else {
4934 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4935 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4936 }
4937
4938 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
4939 *fieldsp |= NQTXD_FIELDS_IXSM;
4940 cmdc |= NQTXC_CMD_IP4;
4941 }
4942
4943 if (m0->m_pkthdr.csum_flags &
4944 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4945 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4946 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4947 cmdc |= NQTXC_CMD_TCP;
4948 } else {
4949 cmdc |= NQTXC_CMD_UDP;
4950 }
4951 cmdc |= NQTXC_CMD_IP4;
4952 *fieldsp |= NQTXD_FIELDS_TUXSM;
4953 }
4954 if (m0->m_pkthdr.csum_flags &
4955 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4956 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4957 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4958 cmdc |= NQTXC_CMD_TCP;
4959 } else {
4960 cmdc |= NQTXC_CMD_UDP;
4961 }
4962 cmdc |= NQTXC_CMD_IP6;
4963 *fieldsp |= NQTXD_FIELDS_TUXSM;
4964 }
4965
4966 /* Fill in the context descriptor. */
4967 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
4968 htole32(vl_len);
4969 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
4970 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
4971 htole32(cmdc);
4972 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
4973 htole32(mssidx);
4974 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4975 DPRINTF(WM_DEBUG_TX,
4976 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
4977 sc->sc_txnext, 0, vl_len));
4978 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
4979 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4980 txs->txs_ndesc++;
4981 return 0;
4982 }
4983
4984 /*
4985 * wm_nq_start: [ifnet interface function]
4986 *
4987 * Start packet transmission on the interface for NEWQUEUE devices
4988 */
4989 static void
4990 wm_nq_start(struct ifnet *ifp)
4991 {
4992 struct wm_softc *sc = ifp->if_softc;
4993
4994 WM_TX_LOCK(sc);
4995 if (!sc->sc_stopping)
4996 wm_nq_start_locked(ifp);
4997 WM_TX_UNLOCK(sc);
4998 }
4999
5000 static void
5001 wm_nq_start_locked(struct ifnet *ifp)
5002 {
5003 struct wm_softc *sc = ifp->if_softc;
5004 struct mbuf *m0;
5005 struct m_tag *mtag;
5006 struct wm_txsoft *txs;
5007 bus_dmamap_t dmamap;
5008 int error, nexttx, lasttx = -1, seg, segs_needed;
5009 bool do_csum, sent;
5010
5011 KASSERT(WM_TX_LOCKED(sc));
5012
5013 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5014 return;
5015
5016 sent = false;
5017
5018 /*
5019 * Loop through the send queue, setting up transmit descriptors
5020 * until we drain the queue, or use up all available transmit
5021 * descriptors.
5022 */
5023 for (;;) {
5024 m0 = NULL;
5025
5026 /* Get a work queue entry. */
5027 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5028 wm_txintr(sc);
5029 if (sc->sc_txsfree == 0) {
5030 DPRINTF(WM_DEBUG_TX,
5031 ("%s: TX: no free job descriptors\n",
5032 device_xname(sc->sc_dev)));
5033 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5034 break;
5035 }
5036 }
5037
5038 /* Grab a packet off the queue. */
5039 IFQ_DEQUEUE(&ifp->if_snd, m0);
5040 if (m0 == NULL)
5041 break;
5042
5043 DPRINTF(WM_DEBUG_TX,
5044 ("%s: TX: have packet to transmit: %p\n",
5045 device_xname(sc->sc_dev), m0));
5046
5047 txs = &sc->sc_txsoft[sc->sc_txsnext];
5048 dmamap = txs->txs_dmamap;
5049
5050 /*
5051 * Load the DMA map. If this fails, the packet either
5052 * didn't fit in the allotted number of segments, or we
5053 * were short on resources. For the too-many-segments
5054 * case, we simply report an error and drop the packet,
5055 * since we can't sanely copy a jumbo packet to a single
5056 * buffer.
5057 */
5058 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5059 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5060 if (error) {
5061 if (error == EFBIG) {
5062 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5063 log(LOG_ERR, "%s: Tx packet consumes too many "
5064 "DMA segments, dropping...\n",
5065 device_xname(sc->sc_dev));
5066 wm_dump_mbuf_chain(sc, m0);
5067 m_freem(m0);
5068 continue;
5069 }
5070 /* Short on resources, just stop for now. */
5071 DPRINTF(WM_DEBUG_TX,
5072 ("%s: TX: dmamap load failed: %d\n",
5073 device_xname(sc->sc_dev), error));
5074 break;
5075 }
5076
5077 segs_needed = dmamap->dm_nsegs;
5078
5079 /*
5080 * Ensure we have enough descriptors free to describe
5081 * the packet. Note, we always reserve one descriptor
5082 * at the end of the ring due to the semantics of the
5083 * TDT register, plus one more in the event we need
5084 * to load offload context.
5085 */
5086 if (segs_needed > sc->sc_txfree - 2) {
5087 /*
5088 * Not enough free descriptors to transmit this
5089 * packet. We haven't committed anything yet,
5090 * so just unload the DMA map, put the packet
5091 * pack on the queue, and punt. Notify the upper
5092 * layer that there are no more slots left.
5093 */
5094 DPRINTF(WM_DEBUG_TX,
5095 ("%s: TX: need %d (%d) descriptors, have %d\n",
5096 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5097 segs_needed, sc->sc_txfree - 1));
5098 ifp->if_flags |= IFF_OACTIVE;
5099 bus_dmamap_unload(sc->sc_dmat, dmamap);
5100 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5101 break;
5102 }
5103
5104 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5105
5106 DPRINTF(WM_DEBUG_TX,
5107 ("%s: TX: packet has %d (%d) DMA segments\n",
5108 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5109
5110 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5111
5112 /*
5113 * Store a pointer to the packet so that we can free it
5114 * later.
5115 *
5116 * Initially, we consider the number of descriptors the
5117 * packet uses the number of DMA segments. This may be
5118 * incremented by 1 if we do checksum offload (a descriptor
5119 * is used to set the checksum context).
5120 */
5121 txs->txs_mbuf = m0;
5122 txs->txs_firstdesc = sc->sc_txnext;
5123 txs->txs_ndesc = segs_needed;
5124
5125 /* Set up offload parameters for this packet. */
5126 uint32_t cmdlen, fields, dcmdlen;
5127 if (m0->m_pkthdr.csum_flags &
5128 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5129 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5130 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5131 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5132 &do_csum) != 0) {
5133 /* Error message already displayed. */
5134 bus_dmamap_unload(sc->sc_dmat, dmamap);
5135 continue;
5136 }
5137 } else {
5138 do_csum = false;
5139 cmdlen = 0;
5140 fields = 0;
5141 }
5142
5143 /* Sync the DMA map. */
5144 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5145 BUS_DMASYNC_PREWRITE);
5146
5147 /* Initialize the first transmit descriptor. */
5148 nexttx = sc->sc_txnext;
5149 if (!do_csum) {
5150 /* setup a legacy descriptor */
5151 wm_set_dma_addr(
5152 &sc->sc_txdescs[nexttx].wtx_addr,
5153 dmamap->dm_segs[0].ds_addr);
5154 sc->sc_txdescs[nexttx].wtx_cmdlen =
5155 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5156 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5157 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5158 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5159 NULL) {
5160 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5161 htole32(WTX_CMD_VLE);
5162 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5163 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5164 } else {
5165 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5166 }
5167 dcmdlen = 0;
5168 } else {
5169 /* setup an advanced data descriptor */
5170 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5171 htole64(dmamap->dm_segs[0].ds_addr);
5172 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5173 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5174 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5175 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5176 htole32(fields);
5177 DPRINTF(WM_DEBUG_TX,
5178 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5179 device_xname(sc->sc_dev), nexttx,
5180 (uint64_t)dmamap->dm_segs[0].ds_addr));
5181 DPRINTF(WM_DEBUG_TX,
5182 ("\t 0x%08x%08x\n", fields,
5183 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5184 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5185 }
5186
5187 lasttx = nexttx;
5188 nexttx = WM_NEXTTX(sc, nexttx);
5189 /*
5190 * fill in the next descriptors. legacy or adcanced format
5191 * is the same here
5192 */
5193 for (seg = 1; seg < dmamap->dm_nsegs;
5194 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5195 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5196 htole64(dmamap->dm_segs[seg].ds_addr);
5197 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5198 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5199 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5200 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5201 lasttx = nexttx;
5202
5203 DPRINTF(WM_DEBUG_TX,
5204 ("%s: TX: desc %d: %#" PRIx64 ", "
5205 "len %#04zx\n",
5206 device_xname(sc->sc_dev), nexttx,
5207 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5208 dmamap->dm_segs[seg].ds_len));
5209 }
5210
5211 KASSERT(lasttx != -1);
5212
5213 /*
5214 * Set up the command byte on the last descriptor of
5215 * the packet. If we're in the interrupt delay window,
5216 * delay the interrupt.
5217 */
5218 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5219 (NQTX_CMD_EOP | NQTX_CMD_RS));
5220 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5221 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5222
5223 txs->txs_lastdesc = lasttx;
5224
5225 DPRINTF(WM_DEBUG_TX,
5226 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5227 device_xname(sc->sc_dev),
5228 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5229
5230 /* Sync the descriptors we're using. */
5231 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5232 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5233
5234 /* Give the packet to the chip. */
5235 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5236 sent = true;
5237
5238 DPRINTF(WM_DEBUG_TX,
5239 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5240
5241 DPRINTF(WM_DEBUG_TX,
5242 ("%s: TX: finished transmitting packet, job %d\n",
5243 device_xname(sc->sc_dev), sc->sc_txsnext));
5244
5245 /* Advance the tx pointer. */
5246 sc->sc_txfree -= txs->txs_ndesc;
5247 sc->sc_txnext = nexttx;
5248
5249 sc->sc_txsfree--;
5250 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5251
5252 /* Pass the packet to any BPF listeners. */
5253 bpf_mtap(ifp, m0);
5254 }
5255
5256 if (m0 != NULL) {
5257 ifp->if_flags |= IFF_OACTIVE;
5258 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5259 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5260 m_freem(m0);
5261 }
5262
5263 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5264 /* No more slots; notify upper layer. */
5265 ifp->if_flags |= IFF_OACTIVE;
5266 }
5267
5268 if (sent) {
5269 /* Set a watchdog timer in case the chip flakes out. */
5270 ifp->if_timer = 5;
5271 }
5272 }
5273
5274 /* Interrupt */
5275
5276 /*
5277 * wm_txintr:
5278 *
5279 * Helper; handle transmit interrupts.
5280 */
5281 static void
5282 wm_txintr(struct wm_softc *sc)
5283 {
5284 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5285 struct wm_txsoft *txs;
5286 uint8_t status;
5287 int i;
5288
5289 if (sc->sc_stopping)
5290 return;
5291
5292 ifp->if_flags &= ~IFF_OACTIVE;
5293
5294 /*
5295 * Go through the Tx list and free mbufs for those
5296 * frames which have been transmitted.
5297 */
5298 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5299 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5300 txs = &sc->sc_txsoft[i];
5301
5302 DPRINTF(WM_DEBUG_TX,
5303 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5304
5305 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5306 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5307
5308 status =
5309 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5310 if ((status & WTX_ST_DD) == 0) {
5311 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5312 BUS_DMASYNC_PREREAD);
5313 break;
5314 }
5315
5316 DPRINTF(WM_DEBUG_TX,
5317 ("%s: TX: job %d done: descs %d..%d\n",
5318 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5319 txs->txs_lastdesc));
5320
5321 /*
5322 * XXX We should probably be using the statistics
5323 * XXX registers, but I don't know if they exist
5324 * XXX on chips before the i82544.
5325 */
5326
5327 #ifdef WM_EVENT_COUNTERS
5328 if (status & WTX_ST_TU)
5329 WM_EVCNT_INCR(&sc->sc_ev_tu);
5330 #endif /* WM_EVENT_COUNTERS */
5331
5332 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5333 ifp->if_oerrors++;
5334 if (status & WTX_ST_LC)
5335 log(LOG_WARNING, "%s: late collision\n",
5336 device_xname(sc->sc_dev));
5337 else if (status & WTX_ST_EC) {
5338 ifp->if_collisions += 16;
5339 log(LOG_WARNING, "%s: excessive collisions\n",
5340 device_xname(sc->sc_dev));
5341 }
5342 } else
5343 ifp->if_opackets++;
5344
5345 sc->sc_txfree += txs->txs_ndesc;
5346 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5347 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5348 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5349 m_freem(txs->txs_mbuf);
5350 txs->txs_mbuf = NULL;
5351 }
5352
5353 /* Update the dirty transmit buffer pointer. */
5354 sc->sc_txsdirty = i;
5355 DPRINTF(WM_DEBUG_TX,
5356 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5357
5358 /*
5359 * If there are no more pending transmissions, cancel the watchdog
5360 * timer.
5361 */
5362 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5363 ifp->if_timer = 0;
5364 }
5365
5366 /*
5367 * wm_rxintr:
5368 *
5369 * Helper; handle receive interrupts.
5370 */
5371 static void
5372 wm_rxintr(struct wm_softc *sc)
5373 {
5374 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5375 struct wm_rxsoft *rxs;
5376 struct mbuf *m;
5377 int i, len;
5378 uint8_t status, errors;
5379 uint16_t vlantag;
5380
5381 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5382 rxs = &sc->sc_rxsoft[i];
5383
5384 DPRINTF(WM_DEBUG_RX,
5385 ("%s: RX: checking descriptor %d\n",
5386 device_xname(sc->sc_dev), i));
5387
5388 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5389
5390 status = sc->sc_rxdescs[i].wrx_status;
5391 errors = sc->sc_rxdescs[i].wrx_errors;
5392 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5393 vlantag = sc->sc_rxdescs[i].wrx_special;
5394
5395 if ((status & WRX_ST_DD) == 0) {
5396 /* We have processed all of the receive descriptors. */
5397 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5398 break;
5399 }
5400
5401 if (__predict_false(sc->sc_rxdiscard)) {
5402 DPRINTF(WM_DEBUG_RX,
5403 ("%s: RX: discarding contents of descriptor %d\n",
5404 device_xname(sc->sc_dev), i));
5405 WM_INIT_RXDESC(sc, i);
5406 if (status & WRX_ST_EOP) {
5407 /* Reset our state. */
5408 DPRINTF(WM_DEBUG_RX,
5409 ("%s: RX: resetting rxdiscard -> 0\n",
5410 device_xname(sc->sc_dev)));
5411 sc->sc_rxdiscard = 0;
5412 }
5413 continue;
5414 }
5415
5416 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5417 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5418
5419 m = rxs->rxs_mbuf;
5420
5421 /*
5422 * Add a new receive buffer to the ring, unless of
5423 * course the length is zero. Treat the latter as a
5424 * failed mapping.
5425 */
5426 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5427 /*
5428 * Failed, throw away what we've done so
5429 * far, and discard the rest of the packet.
5430 */
5431 ifp->if_ierrors++;
5432 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5433 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5434 WM_INIT_RXDESC(sc, i);
5435 if ((status & WRX_ST_EOP) == 0)
5436 sc->sc_rxdiscard = 1;
5437 if (sc->sc_rxhead != NULL)
5438 m_freem(sc->sc_rxhead);
5439 WM_RXCHAIN_RESET(sc);
5440 DPRINTF(WM_DEBUG_RX,
5441 ("%s: RX: Rx buffer allocation failed, "
5442 "dropping packet%s\n", device_xname(sc->sc_dev),
5443 sc->sc_rxdiscard ? " (discard)" : ""));
5444 continue;
5445 }
5446
5447 m->m_len = len;
5448 sc->sc_rxlen += len;
5449 DPRINTF(WM_DEBUG_RX,
5450 ("%s: RX: buffer at %p len %d\n",
5451 device_xname(sc->sc_dev), m->m_data, len));
5452
5453 /* If this is not the end of the packet, keep looking. */
5454 if ((status & WRX_ST_EOP) == 0) {
5455 WM_RXCHAIN_LINK(sc, m);
5456 DPRINTF(WM_DEBUG_RX,
5457 ("%s: RX: not yet EOP, rxlen -> %d\n",
5458 device_xname(sc->sc_dev), sc->sc_rxlen));
5459 continue;
5460 }
5461
5462 /*
5463 * Okay, we have the entire packet now. The chip is
5464 * configured to include the FCS except I350 and I21[01]
5465 * (not all chips can be configured to strip it),
5466 * so we need to trim it.
5467 * May need to adjust length of previous mbuf in the
5468 * chain if the current mbuf is too short.
5469 * For an eratta, the RCTL_SECRC bit in RCTL register
5470 * is always set in I350, so we don't trim it.
5471 */
5472 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5473 && (sc->sc_type != WM_T_I210)
5474 && (sc->sc_type != WM_T_I211)) {
5475 if (m->m_len < ETHER_CRC_LEN) {
5476 sc->sc_rxtail->m_len
5477 -= (ETHER_CRC_LEN - m->m_len);
5478 m->m_len = 0;
5479 } else
5480 m->m_len -= ETHER_CRC_LEN;
5481 len = sc->sc_rxlen - ETHER_CRC_LEN;
5482 } else
5483 len = sc->sc_rxlen;
5484
5485 WM_RXCHAIN_LINK(sc, m);
5486
5487 *sc->sc_rxtailp = NULL;
5488 m = sc->sc_rxhead;
5489
5490 WM_RXCHAIN_RESET(sc);
5491
5492 DPRINTF(WM_DEBUG_RX,
5493 ("%s: RX: have entire packet, len -> %d\n",
5494 device_xname(sc->sc_dev), len));
5495
5496 /* If an error occurred, update stats and drop the packet. */
5497 if (errors &
5498 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5499 if (errors & WRX_ER_SE)
5500 log(LOG_WARNING, "%s: symbol error\n",
5501 device_xname(sc->sc_dev));
5502 else if (errors & WRX_ER_SEQ)
5503 log(LOG_WARNING, "%s: receive sequence error\n",
5504 device_xname(sc->sc_dev));
5505 else if (errors & WRX_ER_CE)
5506 log(LOG_WARNING, "%s: CRC error\n",
5507 device_xname(sc->sc_dev));
5508 m_freem(m);
5509 continue;
5510 }
5511
5512 /* No errors. Receive the packet. */
5513 m->m_pkthdr.rcvif = ifp;
5514 m->m_pkthdr.len = len;
5515
5516 /*
5517 * If VLANs are enabled, VLAN packets have been unwrapped
5518 * for us. Associate the tag with the packet.
5519 */
5520 /* XXXX should check for i350 and i354 */
5521 if ((status & WRX_ST_VP) != 0) {
5522 VLAN_INPUT_TAG(ifp, m,
5523 le16toh(vlantag),
5524 continue);
5525 }
5526
5527 /* Set up checksum info for this packet. */
5528 if ((status & WRX_ST_IXSM) == 0) {
5529 if (status & WRX_ST_IPCS) {
5530 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5531 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5532 if (errors & WRX_ER_IPE)
5533 m->m_pkthdr.csum_flags |=
5534 M_CSUM_IPv4_BAD;
5535 }
5536 if (status & WRX_ST_TCPCS) {
5537 /*
5538 * Note: we don't know if this was TCP or UDP,
5539 * so we just set both bits, and expect the
5540 * upper layers to deal.
5541 */
5542 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5543 m->m_pkthdr.csum_flags |=
5544 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5545 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5546 if (errors & WRX_ER_TCPE)
5547 m->m_pkthdr.csum_flags |=
5548 M_CSUM_TCP_UDP_BAD;
5549 }
5550 }
5551
5552 ifp->if_ipackets++;
5553
5554 WM_RX_UNLOCK(sc);
5555
5556 /* Pass this up to any BPF listeners. */
5557 bpf_mtap(ifp, m);
5558
5559 /* Pass it on. */
5560 (*ifp->if_input)(ifp, m);
5561
5562 WM_RX_LOCK(sc);
5563
5564 if (sc->sc_stopping)
5565 break;
5566 }
5567
5568 /* Update the receive pointer. */
5569 sc->sc_rxptr = i;
5570
5571 DPRINTF(WM_DEBUG_RX,
5572 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5573 }
5574
5575 /*
5576 * wm_linkintr_gmii:
5577 *
5578 * Helper; handle link interrupts for GMII.
5579 */
5580 static void
5581 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5582 {
5583
5584 KASSERT(WM_TX_LOCKED(sc));
5585
5586 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5587 __func__));
5588
5589 if (icr & ICR_LSC) {
5590 DPRINTF(WM_DEBUG_LINK,
5591 ("%s: LINK: LSC -> mii_pollstat\n",
5592 device_xname(sc->sc_dev)));
5593 mii_pollstat(&sc->sc_mii);
5594 if (sc->sc_type == WM_T_82543) {
5595 int miistatus, active;
5596
5597 /*
5598 * With 82543, we need to force speed and
5599 * duplex on the MAC equal to what the PHY
5600 * speed and duplex configuration is.
5601 */
5602 miistatus = sc->sc_mii.mii_media_status;
5603
5604 if (miistatus & IFM_ACTIVE) {
5605 active = sc->sc_mii.mii_media_active;
5606 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5607 switch (IFM_SUBTYPE(active)) {
5608 case IFM_10_T:
5609 sc->sc_ctrl |= CTRL_SPEED_10;
5610 break;
5611 case IFM_100_TX:
5612 sc->sc_ctrl |= CTRL_SPEED_100;
5613 break;
5614 case IFM_1000_T:
5615 sc->sc_ctrl |= CTRL_SPEED_1000;
5616 break;
5617 default:
5618 /*
5619 * fiber?
5620 * Shoud not enter here.
5621 */
5622 printf("unknown media (%x)\n",
5623 active);
5624 break;
5625 }
5626 if (active & IFM_FDX)
5627 sc->sc_ctrl |= CTRL_FD;
5628 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5629 }
5630 } else if ((sc->sc_type == WM_T_ICH8)
5631 && (sc->sc_phytype == WMPHY_IGP_3)) {
5632 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5633 } else if (sc->sc_type == WM_T_PCH) {
5634 wm_k1_gig_workaround_hv(sc,
5635 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5636 }
5637
5638 if ((sc->sc_phytype == WMPHY_82578)
5639 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5640 == IFM_1000_T)) {
5641
5642 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5643 delay(200*1000); /* XXX too big */
5644
5645 /* Link stall fix for link up */
5646 wm_gmii_hv_writereg(sc->sc_dev, 1,
5647 HV_MUX_DATA_CTRL,
5648 HV_MUX_DATA_CTRL_GEN_TO_MAC
5649 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5650 wm_gmii_hv_writereg(sc->sc_dev, 1,
5651 HV_MUX_DATA_CTRL,
5652 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5653 }
5654 }
5655 } else if (icr & ICR_RXSEQ) {
5656 DPRINTF(WM_DEBUG_LINK,
5657 ("%s: LINK Receive sequence error\n",
5658 device_xname(sc->sc_dev)));
5659 }
5660 }
5661
5662 /*
5663 * wm_linkintr_tbi:
5664 *
5665 * Helper; handle link interrupts for TBI mode.
5666 */
5667 static void
5668 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5669 {
5670 uint32_t status;
5671
5672 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5673 __func__));
5674
5675 status = CSR_READ(sc, WMREG_STATUS);
5676 if (icr & ICR_LSC) {
5677 if (status & STATUS_LU) {
5678 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5679 device_xname(sc->sc_dev),
5680 (status & STATUS_FD) ? "FDX" : "HDX"));
5681 /*
5682 * NOTE: CTRL will update TFCE and RFCE automatically,
5683 * so we should update sc->sc_ctrl
5684 */
5685
5686 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5687 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5688 sc->sc_fcrtl &= ~FCRTL_XONE;
5689 if (status & STATUS_FD)
5690 sc->sc_tctl |=
5691 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5692 else
5693 sc->sc_tctl |=
5694 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5695 if (sc->sc_ctrl & CTRL_TFCE)
5696 sc->sc_fcrtl |= FCRTL_XONE;
5697 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5698 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5699 WMREG_OLD_FCRTL : WMREG_FCRTL,
5700 sc->sc_fcrtl);
5701 sc->sc_tbi_linkup = 1;
5702 } else {
5703 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5704 device_xname(sc->sc_dev)));
5705 sc->sc_tbi_linkup = 0;
5706 }
5707 wm_tbi_set_linkled(sc);
5708 } else if (icr & ICR_RXSEQ) {
5709 DPRINTF(WM_DEBUG_LINK,
5710 ("%s: LINK: Receive sequence error\n",
5711 device_xname(sc->sc_dev)));
5712 }
5713 }
5714
5715 /*
5716 * wm_linkintr:
5717 *
5718 * Helper; handle link interrupts.
5719 */
5720 static void
5721 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5722 {
5723
5724 if (sc->sc_flags & WM_F_HAS_MII)
5725 wm_linkintr_gmii(sc, icr);
5726 else
5727 wm_linkintr_tbi(sc, icr);
5728 }
5729
5730 /*
5731 * wm_intr:
5732 *
5733 * Interrupt service routine.
5734 */
5735 static int
5736 wm_intr(void *arg)
5737 {
5738 struct wm_softc *sc = arg;
5739 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5740 uint32_t icr;
5741 int handled = 0;
5742
5743 while (1 /* CONSTCOND */) {
5744 icr = CSR_READ(sc, WMREG_ICR);
5745 if ((icr & sc->sc_icr) == 0)
5746 break;
5747 rnd_add_uint32(&sc->rnd_source, icr);
5748
5749 WM_RX_LOCK(sc);
5750
5751 if (sc->sc_stopping) {
5752 WM_RX_UNLOCK(sc);
5753 break;
5754 }
5755
5756 handled = 1;
5757
5758 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5759 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5760 DPRINTF(WM_DEBUG_RX,
5761 ("%s: RX: got Rx intr 0x%08x\n",
5762 device_xname(sc->sc_dev),
5763 icr & (ICR_RXDMT0|ICR_RXT0)));
5764 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5765 }
5766 #endif
5767 wm_rxintr(sc);
5768
5769 WM_RX_UNLOCK(sc);
5770 WM_TX_LOCK(sc);
5771
5772 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5773 if (icr & ICR_TXDW) {
5774 DPRINTF(WM_DEBUG_TX,
5775 ("%s: TX: got TXDW interrupt\n",
5776 device_xname(sc->sc_dev)));
5777 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5778 }
5779 #endif
5780 wm_txintr(sc);
5781
5782 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5783 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5784 wm_linkintr(sc, icr);
5785 }
5786
5787 WM_TX_UNLOCK(sc);
5788
5789 if (icr & ICR_RXO) {
5790 #if defined(WM_DEBUG)
5791 log(LOG_WARNING, "%s: Receive overrun\n",
5792 device_xname(sc->sc_dev));
5793 #endif /* defined(WM_DEBUG) */
5794 }
5795 }
5796
5797 if (handled) {
5798 /* Try to get more packets going. */
5799 ifp->if_start(ifp);
5800 }
5801
5802 return handled;
5803 }
5804
5805 /*
5806 * Media related.
5807 * GMII, SGMII, TBI (and SERDES)
5808 */
5809
5810 /* GMII related */
5811
5812 /*
5813 * wm_gmii_reset:
5814 *
5815 * Reset the PHY.
5816 */
5817 static void
5818 wm_gmii_reset(struct wm_softc *sc)
5819 {
5820 uint32_t reg;
5821 int rv;
5822
5823 /* get phy semaphore */
5824 switch (sc->sc_type) {
5825 case WM_T_82571:
5826 case WM_T_82572:
5827 case WM_T_82573:
5828 case WM_T_82574:
5829 case WM_T_82583:
5830 /* XXX should get sw semaphore, too */
5831 rv = wm_get_swsm_semaphore(sc);
5832 break;
5833 case WM_T_82575:
5834 case WM_T_82576:
5835 case WM_T_82580:
5836 case WM_T_82580ER:
5837 case WM_T_I350:
5838 case WM_T_I354:
5839 case WM_T_I210:
5840 case WM_T_I211:
5841 case WM_T_80003:
5842 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5843 break;
5844 case WM_T_ICH8:
5845 case WM_T_ICH9:
5846 case WM_T_ICH10:
5847 case WM_T_PCH:
5848 case WM_T_PCH2:
5849 case WM_T_PCH_LPT:
5850 rv = wm_get_swfwhw_semaphore(sc);
5851 break;
5852 default:
5853 /* nothing to do*/
5854 rv = 0;
5855 break;
5856 }
5857 if (rv != 0) {
5858 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5859 __func__);
5860 return;
5861 }
5862
5863 switch (sc->sc_type) {
5864 case WM_T_82542_2_0:
5865 case WM_T_82542_2_1:
5866 /* null */
5867 break;
5868 case WM_T_82543:
5869 /*
5870 * With 82543, we need to force speed and duplex on the MAC
5871 * equal to what the PHY speed and duplex configuration is.
5872 * In addition, we need to perform a hardware reset on the PHY
5873 * to take it out of reset.
5874 */
5875 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5876 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5877
5878 /* The PHY reset pin is active-low. */
5879 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5880 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5881 CTRL_EXT_SWDPIN(4));
5882 reg |= CTRL_EXT_SWDPIO(4);
5883
5884 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5885 CSR_WRITE_FLUSH(sc);
5886 delay(10*1000);
5887
5888 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5889 CSR_WRITE_FLUSH(sc);
5890 delay(150);
5891 #if 0
5892 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5893 #endif
5894 delay(20*1000); /* XXX extra delay to get PHY ID? */
5895 break;
5896 case WM_T_82544: /* reset 10000us */
5897 case WM_T_82540:
5898 case WM_T_82545:
5899 case WM_T_82545_3:
5900 case WM_T_82546:
5901 case WM_T_82546_3:
5902 case WM_T_82541:
5903 case WM_T_82541_2:
5904 case WM_T_82547:
5905 case WM_T_82547_2:
5906 case WM_T_82571: /* reset 100us */
5907 case WM_T_82572:
5908 case WM_T_82573:
5909 case WM_T_82574:
5910 case WM_T_82575:
5911 case WM_T_82576:
5912 case WM_T_82580:
5913 case WM_T_82580ER:
5914 case WM_T_I350:
5915 case WM_T_I354:
5916 case WM_T_I210:
5917 case WM_T_I211:
5918 case WM_T_82583:
5919 case WM_T_80003:
5920 /* generic reset */
5921 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5922 CSR_WRITE_FLUSH(sc);
5923 delay(20000);
5924 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5925 CSR_WRITE_FLUSH(sc);
5926 delay(20000);
5927
5928 if ((sc->sc_type == WM_T_82541)
5929 || (sc->sc_type == WM_T_82541_2)
5930 || (sc->sc_type == WM_T_82547)
5931 || (sc->sc_type == WM_T_82547_2)) {
5932 /* workaround for igp are done in igp_reset() */
5933 /* XXX add code to set LED after phy reset */
5934 }
5935 break;
5936 case WM_T_ICH8:
5937 case WM_T_ICH9:
5938 case WM_T_ICH10:
5939 case WM_T_PCH:
5940 case WM_T_PCH2:
5941 case WM_T_PCH_LPT:
5942 /* generic reset */
5943 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5944 CSR_WRITE_FLUSH(sc);
5945 delay(100);
5946 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5947 CSR_WRITE_FLUSH(sc);
5948 delay(150);
5949 break;
5950 default:
5951 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5952 __func__);
5953 break;
5954 }
5955
5956 /* release PHY semaphore */
5957 switch (sc->sc_type) {
5958 case WM_T_82571:
5959 case WM_T_82572:
5960 case WM_T_82573:
5961 case WM_T_82574:
5962 case WM_T_82583:
5963 /* XXX should put sw semaphore, too */
5964 wm_put_swsm_semaphore(sc);
5965 break;
5966 case WM_T_82575:
5967 case WM_T_82576:
5968 case WM_T_82580:
5969 case WM_T_82580ER:
5970 case WM_T_I350:
5971 case WM_T_I354:
5972 case WM_T_I210:
5973 case WM_T_I211:
5974 case WM_T_80003:
5975 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5976 break;
5977 case WM_T_ICH8:
5978 case WM_T_ICH9:
5979 case WM_T_ICH10:
5980 case WM_T_PCH:
5981 case WM_T_PCH2:
5982 case WM_T_PCH_LPT:
5983 wm_put_swfwhw_semaphore(sc);
5984 break;
5985 default:
5986 /* nothing to do*/
5987 rv = 0;
5988 break;
5989 }
5990
5991 /* get_cfg_done */
5992 wm_get_cfg_done(sc);
5993
5994 /* extra setup */
5995 switch (sc->sc_type) {
5996 case WM_T_82542_2_0:
5997 case WM_T_82542_2_1:
5998 case WM_T_82543:
5999 case WM_T_82544:
6000 case WM_T_82540:
6001 case WM_T_82545:
6002 case WM_T_82545_3:
6003 case WM_T_82546:
6004 case WM_T_82546_3:
6005 case WM_T_82541_2:
6006 case WM_T_82547_2:
6007 case WM_T_82571:
6008 case WM_T_82572:
6009 case WM_T_82573:
6010 case WM_T_82574:
6011 case WM_T_82575:
6012 case WM_T_82576:
6013 case WM_T_82580:
6014 case WM_T_82580ER:
6015 case WM_T_I350:
6016 case WM_T_I354:
6017 case WM_T_I210:
6018 case WM_T_I211:
6019 case WM_T_82583:
6020 case WM_T_80003:
6021 /* null */
6022 break;
6023 case WM_T_82541:
6024 case WM_T_82547:
6025 /* XXX Configure actively LED after PHY reset */
6026 break;
6027 case WM_T_ICH8:
6028 case WM_T_ICH9:
6029 case WM_T_ICH10:
6030 case WM_T_PCH:
6031 case WM_T_PCH2:
6032 case WM_T_PCH_LPT:
6033 /* Allow time for h/w to get to a quiescent state afer reset */
6034 delay(10*1000);
6035
6036 if (sc->sc_type == WM_T_PCH)
6037 wm_hv_phy_workaround_ich8lan(sc);
6038
6039 if (sc->sc_type == WM_T_PCH2)
6040 wm_lv_phy_workaround_ich8lan(sc);
6041
6042 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6043 /*
6044 * dummy read to clear the phy wakeup bit after lcd
6045 * reset
6046 */
6047 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6048 }
6049
6050 /*
6051 * XXX Configure the LCD with th extended configuration region
6052 * in NVM
6053 */
6054
6055 /* Configure the LCD with the OEM bits in NVM */
6056 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6057 || (sc->sc_type == WM_T_PCH_LPT)) {
6058 /*
6059 * Disable LPLU.
6060 * XXX It seems that 82567 has LPLU, too.
6061 */
6062 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6063 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6064 reg |= HV_OEM_BITS_ANEGNOW;
6065 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6066 }
6067 break;
6068 default:
6069 panic("%s: unknown type\n", __func__);
6070 break;
6071 }
6072 }
6073
6074 /*
6075 * wm_get_phy_id_82575:
6076 *
6077 * Return PHY ID. Return -1 if it failed.
6078 */
6079 static int
6080 wm_get_phy_id_82575(struct wm_softc *sc)
6081 {
6082 uint32_t reg;
6083 int phyid = -1;
6084
6085 /* XXX */
6086 if ((sc->sc_flags & WM_F_SGMII) == 0)
6087 return -1;
6088
6089 if (wm_sgmii_uses_mdio(sc)) {
6090 switch (sc->sc_type) {
6091 case WM_T_82575:
6092 case WM_T_82576:
6093 reg = CSR_READ(sc, WMREG_MDIC);
6094 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6095 break;
6096 case WM_T_82580:
6097 case WM_T_I350:
6098 case WM_T_I354:
6099 case WM_T_I210:
6100 case WM_T_I211:
6101 reg = CSR_READ(sc, WMREG_MDICNFG);
6102 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6103 break;
6104 default:
6105 return -1;
6106 }
6107 }
6108
6109 return phyid;
6110 }
6111
6112
6113 /*
6114 * wm_gmii_mediainit:
6115 *
6116 * Initialize media for use on 1000BASE-T devices.
6117 */
6118 static void
6119 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6120 {
6121 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6122 struct mii_data *mii = &sc->sc_mii;
6123 uint32_t reg;
6124
6125 /* We have GMII. */
6126 sc->sc_flags |= WM_F_HAS_MII;
6127
6128 if (sc->sc_type == WM_T_80003)
6129 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6130 else
6131 sc->sc_tipg = TIPG_1000T_DFLT;
6132
6133 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6134 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6135 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6136 || (sc->sc_type == WM_T_I211)) {
6137 reg = CSR_READ(sc, WMREG_PHPM);
6138 reg &= ~PHPM_GO_LINK_D;
6139 CSR_WRITE(sc, WMREG_PHPM, reg);
6140 }
6141
6142 /*
6143 * Let the chip set speed/duplex on its own based on
6144 * signals from the PHY.
6145 * XXXbouyer - I'm not sure this is right for the 80003,
6146 * the em driver only sets CTRL_SLU here - but it seems to work.
6147 */
6148 sc->sc_ctrl |= CTRL_SLU;
6149 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6150
6151 /* Initialize our media structures and probe the GMII. */
6152 mii->mii_ifp = ifp;
6153
6154 /*
6155 * Determine the PHY access method.
6156 *
6157 * For SGMII, use SGMII specific method.
6158 *
6159 * For some devices, we can determine the PHY access method
6160 * from sc_type.
6161 *
6162 * For ICH8 variants, it's difficult to detemine the PHY access
6163 * method by sc_type, so use the PCI product ID for some devices.
6164 * For other ICH8 variants, try to use igp's method. If the PHY
6165 * can't detect, then use bm's method.
6166 */
6167 switch (prodid) {
6168 case PCI_PRODUCT_INTEL_PCH_M_LM:
6169 case PCI_PRODUCT_INTEL_PCH_M_LC:
6170 /* 82577 */
6171 sc->sc_phytype = WMPHY_82577;
6172 mii->mii_readreg = wm_gmii_hv_readreg;
6173 mii->mii_writereg = wm_gmii_hv_writereg;
6174 break;
6175 case PCI_PRODUCT_INTEL_PCH_D_DM:
6176 case PCI_PRODUCT_INTEL_PCH_D_DC:
6177 /* 82578 */
6178 sc->sc_phytype = WMPHY_82578;
6179 mii->mii_readreg = wm_gmii_hv_readreg;
6180 mii->mii_writereg = wm_gmii_hv_writereg;
6181 break;
6182 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6183 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6184 /* 82579 */
6185 sc->sc_phytype = WMPHY_82579;
6186 mii->mii_readreg = wm_gmii_hv_readreg;
6187 mii->mii_writereg = wm_gmii_hv_writereg;
6188 break;
6189 case PCI_PRODUCT_INTEL_I217_LM:
6190 case PCI_PRODUCT_INTEL_I217_V:
6191 case PCI_PRODUCT_INTEL_I218_LM:
6192 case PCI_PRODUCT_INTEL_I218_V:
6193 /* I21[78] */
6194 mii->mii_readreg = wm_gmii_hv_readreg;
6195 mii->mii_writereg = wm_gmii_hv_writereg;
6196 break;
6197 case PCI_PRODUCT_INTEL_82801I_BM:
6198 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6199 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6200 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6201 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6202 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6203 /* 82567 */
6204 sc->sc_phytype = WMPHY_BM;
6205 mii->mii_readreg = wm_gmii_bm_readreg;
6206 mii->mii_writereg = wm_gmii_bm_writereg;
6207 break;
6208 default:
6209 if (((sc->sc_flags & WM_F_SGMII) != 0)
6210 && !wm_sgmii_uses_mdio(sc)){
6211 mii->mii_readreg = wm_sgmii_readreg;
6212 mii->mii_writereg = wm_sgmii_writereg;
6213 } else if (sc->sc_type >= WM_T_80003) {
6214 mii->mii_readreg = wm_gmii_i80003_readreg;
6215 mii->mii_writereg = wm_gmii_i80003_writereg;
6216 } else if (sc->sc_type >= WM_T_I210) {
6217 mii->mii_readreg = wm_gmii_i82544_readreg;
6218 mii->mii_writereg = wm_gmii_i82544_writereg;
6219 } else if (sc->sc_type >= WM_T_82580) {
6220 sc->sc_phytype = WMPHY_82580;
6221 mii->mii_readreg = wm_gmii_82580_readreg;
6222 mii->mii_writereg = wm_gmii_82580_writereg;
6223 } else if (sc->sc_type >= WM_T_82544) {
6224 mii->mii_readreg = wm_gmii_i82544_readreg;
6225 mii->mii_writereg = wm_gmii_i82544_writereg;
6226 } else {
6227 mii->mii_readreg = wm_gmii_i82543_readreg;
6228 mii->mii_writereg = wm_gmii_i82543_writereg;
6229 }
6230 break;
6231 }
6232 mii->mii_statchg = wm_gmii_statchg;
6233
6234 wm_gmii_reset(sc);
6235
6236 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6237 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6238 wm_gmii_mediastatus);
6239
6240 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6241 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6242 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6243 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6244 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6245 /* Attach only one port */
6246 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6247 MII_OFFSET_ANY, MIIF_DOPAUSE);
6248 } else {
6249 int i, id;
6250 uint32_t ctrl_ext;
6251
6252 id = wm_get_phy_id_82575(sc);
6253 if (id != -1) {
6254 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6255 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6256 }
6257 if ((id == -1)
6258 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6259 /* Power on sgmii phy if it is disabled */
6260 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6261 CSR_WRITE(sc, WMREG_CTRL_EXT,
6262 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6263 CSR_WRITE_FLUSH(sc);
6264 delay(300*1000); /* XXX too long */
6265
6266 /* from 1 to 8 */
6267 for (i = 1; i < 8; i++)
6268 mii_attach(sc->sc_dev, &sc->sc_mii,
6269 0xffffffff, i, MII_OFFSET_ANY,
6270 MIIF_DOPAUSE);
6271
6272 /* restore previous sfp cage power state */
6273 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6274 }
6275 }
6276 } else {
6277 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6278 MII_OFFSET_ANY, MIIF_DOPAUSE);
6279 }
6280
6281 /*
6282 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6283 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6284 */
6285 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6286 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6287 wm_set_mdio_slow_mode_hv(sc);
6288 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6289 MII_OFFSET_ANY, MIIF_DOPAUSE);
6290 }
6291
6292 /*
6293 * (For ICH8 variants)
6294 * If PHY detection failed, use BM's r/w function and retry.
6295 */
6296 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6297 /* if failed, retry with *_bm_* */
6298 mii->mii_readreg = wm_gmii_bm_readreg;
6299 mii->mii_writereg = wm_gmii_bm_writereg;
6300
6301 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6302 MII_OFFSET_ANY, MIIF_DOPAUSE);
6303 }
6304
6305 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6306 /* Any PHY wasn't find */
6307 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6308 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6309 sc->sc_phytype = WMPHY_NONE;
6310 } else {
6311 /*
6312 * PHY Found!
6313 * Check PHY type.
6314 */
6315 uint32_t model;
6316 struct mii_softc *child;
6317
6318 child = LIST_FIRST(&mii->mii_phys);
6319 if (device_is_a(child->mii_dev, "igphy")) {
6320 struct igphy_softc *isc = (struct igphy_softc *)child;
6321
6322 model = isc->sc_mii.mii_mpd_model;
6323 if (model == MII_MODEL_yyINTEL_I82566)
6324 sc->sc_phytype = WMPHY_IGP_3;
6325 }
6326
6327 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6328 }
6329 }
6330
6331 /*
6332 * wm_gmii_mediastatus: [ifmedia interface function]
6333 *
6334 * Get the current interface media status on a 1000BASE-T device.
6335 */
6336 static void
6337 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6338 {
6339 struct wm_softc *sc = ifp->if_softc;
6340
6341 ether_mediastatus(ifp, ifmr);
6342 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6343 | sc->sc_flowflags;
6344 }
6345
6346 /*
6347 * wm_gmii_mediachange: [ifmedia interface function]
6348 *
6349 * Set hardware to newly-selected media on a 1000BASE-T device.
6350 */
6351 static int
6352 wm_gmii_mediachange(struct ifnet *ifp)
6353 {
6354 struct wm_softc *sc = ifp->if_softc;
6355 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6356 int rc;
6357
6358 if ((ifp->if_flags & IFF_UP) == 0)
6359 return 0;
6360
6361 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6362 sc->sc_ctrl |= CTRL_SLU;
6363 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6364 || (sc->sc_type > WM_T_82543)) {
6365 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6366 } else {
6367 sc->sc_ctrl &= ~CTRL_ASDE;
6368 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6369 if (ife->ifm_media & IFM_FDX)
6370 sc->sc_ctrl |= CTRL_FD;
6371 switch (IFM_SUBTYPE(ife->ifm_media)) {
6372 case IFM_10_T:
6373 sc->sc_ctrl |= CTRL_SPEED_10;
6374 break;
6375 case IFM_100_TX:
6376 sc->sc_ctrl |= CTRL_SPEED_100;
6377 break;
6378 case IFM_1000_T:
6379 sc->sc_ctrl |= CTRL_SPEED_1000;
6380 break;
6381 default:
6382 panic("wm_gmii_mediachange: bad media 0x%x",
6383 ife->ifm_media);
6384 }
6385 }
6386 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6387 if (sc->sc_type <= WM_T_82543)
6388 wm_gmii_reset(sc);
6389
6390 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6391 return 0;
6392 return rc;
6393 }
6394
6395 #define MDI_IO CTRL_SWDPIN(2)
6396 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6397 #define MDI_CLK CTRL_SWDPIN(3)
6398
6399 static void
6400 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6401 {
6402 uint32_t i, v;
6403
6404 v = CSR_READ(sc, WMREG_CTRL);
6405 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6406 v |= MDI_DIR | CTRL_SWDPIO(3);
6407
6408 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6409 if (data & i)
6410 v |= MDI_IO;
6411 else
6412 v &= ~MDI_IO;
6413 CSR_WRITE(sc, WMREG_CTRL, v);
6414 CSR_WRITE_FLUSH(sc);
6415 delay(10);
6416 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6417 CSR_WRITE_FLUSH(sc);
6418 delay(10);
6419 CSR_WRITE(sc, WMREG_CTRL, v);
6420 CSR_WRITE_FLUSH(sc);
6421 delay(10);
6422 }
6423 }
6424
6425 static uint32_t
6426 wm_i82543_mii_recvbits(struct wm_softc *sc)
6427 {
6428 uint32_t v, i, data = 0;
6429
6430 v = CSR_READ(sc, WMREG_CTRL);
6431 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6432 v |= CTRL_SWDPIO(3);
6433
6434 CSR_WRITE(sc, WMREG_CTRL, v);
6435 CSR_WRITE_FLUSH(sc);
6436 delay(10);
6437 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6438 CSR_WRITE_FLUSH(sc);
6439 delay(10);
6440 CSR_WRITE(sc, WMREG_CTRL, v);
6441 CSR_WRITE_FLUSH(sc);
6442 delay(10);
6443
6444 for (i = 0; i < 16; i++) {
6445 data <<= 1;
6446 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6447 CSR_WRITE_FLUSH(sc);
6448 delay(10);
6449 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6450 data |= 1;
6451 CSR_WRITE(sc, WMREG_CTRL, v);
6452 CSR_WRITE_FLUSH(sc);
6453 delay(10);
6454 }
6455
6456 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6457 CSR_WRITE_FLUSH(sc);
6458 delay(10);
6459 CSR_WRITE(sc, WMREG_CTRL, v);
6460 CSR_WRITE_FLUSH(sc);
6461 delay(10);
6462
6463 return data;
6464 }
6465
6466 #undef MDI_IO
6467 #undef MDI_DIR
6468 #undef MDI_CLK
6469
6470 /*
6471 * wm_gmii_i82543_readreg: [mii interface function]
6472 *
6473 * Read a PHY register on the GMII (i82543 version).
6474 */
6475 static int
6476 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6477 {
6478 struct wm_softc *sc = device_private(self);
6479 int rv;
6480
6481 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6482 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6483 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6484 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6485
6486 DPRINTF(WM_DEBUG_GMII,
6487 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6488 device_xname(sc->sc_dev), phy, reg, rv));
6489
6490 return rv;
6491 }
6492
6493 /*
6494 * wm_gmii_i82543_writereg: [mii interface function]
6495 *
6496 * Write a PHY register on the GMII (i82543 version).
6497 */
6498 static void
6499 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6500 {
6501 struct wm_softc *sc = device_private(self);
6502
6503 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6504 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6505 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6506 (MII_COMMAND_START << 30), 32);
6507 }
6508
6509 /*
6510 * wm_gmii_i82544_readreg: [mii interface function]
6511 *
6512 * Read a PHY register on the GMII.
6513 */
6514 static int
6515 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6516 {
6517 struct wm_softc *sc = device_private(self);
6518 uint32_t mdic = 0;
6519 int i, rv;
6520
6521 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6522 MDIC_REGADD(reg));
6523
6524 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6525 mdic = CSR_READ(sc, WMREG_MDIC);
6526 if (mdic & MDIC_READY)
6527 break;
6528 delay(50);
6529 }
6530
6531 if ((mdic & MDIC_READY) == 0) {
6532 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6533 device_xname(sc->sc_dev), phy, reg);
6534 rv = 0;
6535 } else if (mdic & MDIC_E) {
6536 #if 0 /* This is normal if no PHY is present. */
6537 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6538 device_xname(sc->sc_dev), phy, reg);
6539 #endif
6540 rv = 0;
6541 } else {
6542 rv = MDIC_DATA(mdic);
6543 if (rv == 0xffff)
6544 rv = 0;
6545 }
6546
6547 return rv;
6548 }
6549
6550 /*
6551 * wm_gmii_i82544_writereg: [mii interface function]
6552 *
6553 * Write a PHY register on the GMII.
6554 */
6555 static void
6556 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6557 {
6558 struct wm_softc *sc = device_private(self);
6559 uint32_t mdic = 0;
6560 int i;
6561
6562 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6563 MDIC_REGADD(reg) | MDIC_DATA(val));
6564
6565 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6566 mdic = CSR_READ(sc, WMREG_MDIC);
6567 if (mdic & MDIC_READY)
6568 break;
6569 delay(50);
6570 }
6571
6572 if ((mdic & MDIC_READY) == 0)
6573 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6574 device_xname(sc->sc_dev), phy, reg);
6575 else if (mdic & MDIC_E)
6576 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6577 device_xname(sc->sc_dev), phy, reg);
6578 }
6579
6580 /*
6581 * wm_gmii_i80003_readreg: [mii interface function]
6582 *
6583 * Read a PHY register on the kumeran
6584 * This could be handled by the PHY layer if we didn't have to lock the
6585 * ressource ...
6586 */
6587 static int
6588 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6589 {
6590 struct wm_softc *sc = device_private(self);
6591 int sem;
6592 int rv;
6593
6594 if (phy != 1) /* only one PHY on kumeran bus */
6595 return 0;
6596
6597 sem = swfwphysem[sc->sc_funcid];
6598 if (wm_get_swfw_semaphore(sc, sem)) {
6599 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6600 __func__);
6601 return 0;
6602 }
6603
6604 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6605 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6606 reg >> GG82563_PAGE_SHIFT);
6607 } else {
6608 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6609 reg >> GG82563_PAGE_SHIFT);
6610 }
6611 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6612 delay(200);
6613 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6614 delay(200);
6615
6616 wm_put_swfw_semaphore(sc, sem);
6617 return rv;
6618 }
6619
6620 /*
6621 * wm_gmii_i80003_writereg: [mii interface function]
6622 *
6623 * Write a PHY register on the kumeran.
6624 * This could be handled by the PHY layer if we didn't have to lock the
6625 * ressource ...
6626 */
6627 static void
6628 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6629 {
6630 struct wm_softc *sc = device_private(self);
6631 int sem;
6632
6633 if (phy != 1) /* only one PHY on kumeran bus */
6634 return;
6635
6636 sem = swfwphysem[sc->sc_funcid];
6637 if (wm_get_swfw_semaphore(sc, sem)) {
6638 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6639 __func__);
6640 return;
6641 }
6642
6643 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6644 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6645 reg >> GG82563_PAGE_SHIFT);
6646 } else {
6647 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6648 reg >> GG82563_PAGE_SHIFT);
6649 }
6650 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6651 delay(200);
6652 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6653 delay(200);
6654
6655 wm_put_swfw_semaphore(sc, sem);
6656 }
6657
6658 /*
6659 * wm_gmii_bm_readreg: [mii interface function]
6660 *
6661 * Read a PHY register on the kumeran
6662 * This could be handled by the PHY layer if we didn't have to lock the
6663 * ressource ...
6664 */
6665 static int
6666 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6667 {
6668 struct wm_softc *sc = device_private(self);
6669 int sem;
6670 int rv;
6671
6672 sem = swfwphysem[sc->sc_funcid];
6673 if (wm_get_swfw_semaphore(sc, sem)) {
6674 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6675 __func__);
6676 return 0;
6677 }
6678
6679 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6680 if (phy == 1)
6681 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6682 reg);
6683 else
6684 wm_gmii_i82544_writereg(self, phy,
6685 GG82563_PHY_PAGE_SELECT,
6686 reg >> GG82563_PAGE_SHIFT);
6687 }
6688
6689 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6690 wm_put_swfw_semaphore(sc, sem);
6691 return rv;
6692 }
6693
6694 /*
6695 * wm_gmii_bm_writereg: [mii interface function]
6696 *
6697 * Write a PHY register on the kumeran.
6698 * This could be handled by the PHY layer if we didn't have to lock the
6699 * ressource ...
6700 */
6701 static void
6702 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6703 {
6704 struct wm_softc *sc = device_private(self);
6705 int sem;
6706
6707 sem = swfwphysem[sc->sc_funcid];
6708 if (wm_get_swfw_semaphore(sc, sem)) {
6709 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6710 __func__);
6711 return;
6712 }
6713
6714 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6715 if (phy == 1)
6716 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6717 reg);
6718 else
6719 wm_gmii_i82544_writereg(self, phy,
6720 GG82563_PHY_PAGE_SELECT,
6721 reg >> GG82563_PAGE_SHIFT);
6722 }
6723
6724 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6725 wm_put_swfw_semaphore(sc, sem);
6726 }
6727
6728 static void
6729 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6730 {
6731 struct wm_softc *sc = device_private(self);
6732 uint16_t regnum = BM_PHY_REG_NUM(offset);
6733 uint16_t wuce;
6734
6735 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6736 if (sc->sc_type == WM_T_PCH) {
6737 /* XXX e1000 driver do nothing... why? */
6738 }
6739
6740 /* Set page 769 */
6741 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6742 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6743
6744 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6745
6746 wuce &= ~BM_WUC_HOST_WU_BIT;
6747 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6748 wuce | BM_WUC_ENABLE_BIT);
6749
6750 /* Select page 800 */
6751 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6752 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6753
6754 /* Write page 800 */
6755 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6756
6757 if (rd)
6758 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6759 else
6760 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6761
6762 /* Set page 769 */
6763 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6764 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6765
6766 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6767 }
6768
6769 /*
6770 * wm_gmii_hv_readreg: [mii interface function]
6771 *
6772 * Read a PHY register on the kumeran
6773 * This could be handled by the PHY layer if we didn't have to lock the
6774 * ressource ...
6775 */
6776 static int
6777 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6778 {
6779 struct wm_softc *sc = device_private(self);
6780 uint16_t page = BM_PHY_REG_PAGE(reg);
6781 uint16_t regnum = BM_PHY_REG_NUM(reg);
6782 uint16_t val;
6783 int rv;
6784
6785 if (wm_get_swfwhw_semaphore(sc)) {
6786 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6787 __func__);
6788 return 0;
6789 }
6790
6791 /* XXX Workaround failure in MDIO access while cable is disconnected */
6792 if (sc->sc_phytype == WMPHY_82577) {
6793 /* XXX must write */
6794 }
6795
6796 /* Page 800 works differently than the rest so it has its own func */
6797 if (page == BM_WUC_PAGE) {
6798 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6799 return val;
6800 }
6801
6802 /*
6803 * Lower than page 768 works differently than the rest so it has its
6804 * own func
6805 */
6806 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6807 printf("gmii_hv_readreg!!!\n");
6808 return 0;
6809 }
6810
6811 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6812 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6813 page << BME1000_PAGE_SHIFT);
6814 }
6815
6816 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6817 wm_put_swfwhw_semaphore(sc);
6818 return rv;
6819 }
6820
6821 /*
6822 * wm_gmii_hv_writereg: [mii interface function]
6823 *
6824 * Write a PHY register on the kumeran.
6825 * This could be handled by the PHY layer if we didn't have to lock the
6826 * ressource ...
6827 */
6828 static void
6829 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6830 {
6831 struct wm_softc *sc = device_private(self);
6832 uint16_t page = BM_PHY_REG_PAGE(reg);
6833 uint16_t regnum = BM_PHY_REG_NUM(reg);
6834
6835 if (wm_get_swfwhw_semaphore(sc)) {
6836 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6837 __func__);
6838 return;
6839 }
6840
6841 /* XXX Workaround failure in MDIO access while cable is disconnected */
6842
6843 /* Page 800 works differently than the rest so it has its own func */
6844 if (page == BM_WUC_PAGE) {
6845 uint16_t tmp;
6846
6847 tmp = val;
6848 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6849 return;
6850 }
6851
6852 /*
6853 * Lower than page 768 works differently than the rest so it has its
6854 * own func
6855 */
6856 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6857 printf("gmii_hv_writereg!!!\n");
6858 return;
6859 }
6860
6861 /*
6862 * XXX Workaround MDIO accesses being disabled after entering IEEE
6863 * Power Down (whenever bit 11 of the PHY control register is set)
6864 */
6865
6866 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6867 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6868 page << BME1000_PAGE_SHIFT);
6869 }
6870
6871 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6872 wm_put_swfwhw_semaphore(sc);
6873 }
6874
6875 /*
6876 * wm_gmii_82580_readreg: [mii interface function]
6877 *
6878 * Read a PHY register on the 82580 and I350.
6879 * This could be handled by the PHY layer if we didn't have to lock the
6880 * ressource ...
6881 */
6882 static int
6883 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6884 {
6885 struct wm_softc *sc = device_private(self);
6886 int sem;
6887 int rv;
6888
6889 sem = swfwphysem[sc->sc_funcid];
6890 if (wm_get_swfw_semaphore(sc, sem)) {
6891 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6892 __func__);
6893 return 0;
6894 }
6895
6896 rv = wm_gmii_i82544_readreg(self, phy, reg);
6897
6898 wm_put_swfw_semaphore(sc, sem);
6899 return rv;
6900 }
6901
6902 /*
6903 * wm_gmii_82580_writereg: [mii interface function]
6904 *
6905 * Write a PHY register on the 82580 and I350.
6906 * This could be handled by the PHY layer if we didn't have to lock the
6907 * ressource ...
6908 */
6909 static void
6910 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6911 {
6912 struct wm_softc *sc = device_private(self);
6913 int sem;
6914
6915 sem = swfwphysem[sc->sc_funcid];
6916 if (wm_get_swfw_semaphore(sc, sem)) {
6917 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6918 __func__);
6919 return;
6920 }
6921
6922 wm_gmii_i82544_writereg(self, phy, reg, val);
6923
6924 wm_put_swfw_semaphore(sc, sem);
6925 }
6926
6927 /*
6928 * wm_gmii_statchg: [mii interface function]
6929 *
6930 * Callback from MII layer when media changes.
6931 */
6932 static void
6933 wm_gmii_statchg(struct ifnet *ifp)
6934 {
6935 struct wm_softc *sc = ifp->if_softc;
6936 struct mii_data *mii = &sc->sc_mii;
6937
6938 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6939 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6940 sc->sc_fcrtl &= ~FCRTL_XONE;
6941
6942 /*
6943 * Get flow control negotiation result.
6944 */
6945 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6946 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6947 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6948 mii->mii_media_active &= ~IFM_ETH_FMASK;
6949 }
6950
6951 if (sc->sc_flowflags & IFM_FLOW) {
6952 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6953 sc->sc_ctrl |= CTRL_TFCE;
6954 sc->sc_fcrtl |= FCRTL_XONE;
6955 }
6956 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6957 sc->sc_ctrl |= CTRL_RFCE;
6958 }
6959
6960 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6961 DPRINTF(WM_DEBUG_LINK,
6962 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
6963 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6964 } else {
6965 DPRINTF(WM_DEBUG_LINK,
6966 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
6967 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6968 }
6969
6970 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6971 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6972 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6973 : WMREG_FCRTL, sc->sc_fcrtl);
6974 if (sc->sc_type == WM_T_80003) {
6975 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6976 case IFM_1000_T:
6977 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6978 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6979 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6980 break;
6981 default:
6982 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6983 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6984 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6985 break;
6986 }
6987 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6988 }
6989 }
6990
6991 /*
6992 * wm_kmrn_readreg:
6993 *
6994 * Read a kumeran register
6995 */
6996 static int
6997 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6998 {
6999 int rv;
7000
7001 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7002 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7003 aprint_error_dev(sc->sc_dev,
7004 "%s: failed to get semaphore\n", __func__);
7005 return 0;
7006 }
7007 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7008 if (wm_get_swfwhw_semaphore(sc)) {
7009 aprint_error_dev(sc->sc_dev,
7010 "%s: failed to get semaphore\n", __func__);
7011 return 0;
7012 }
7013 }
7014
7015 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7016 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7017 KUMCTRLSTA_REN);
7018 CSR_WRITE_FLUSH(sc);
7019 delay(2);
7020
7021 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7022
7023 if (sc->sc_flags == WM_F_LOCK_SWFW)
7024 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7025 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7026 wm_put_swfwhw_semaphore(sc);
7027
7028 return rv;
7029 }
7030
7031 /*
7032 * wm_kmrn_writereg:
7033 *
7034 * Write a kumeran register
7035 */
7036 static void
7037 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7038 {
7039
7040 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7041 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7042 aprint_error_dev(sc->sc_dev,
7043 "%s: failed to get semaphore\n", __func__);
7044 return;
7045 }
7046 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7047 if (wm_get_swfwhw_semaphore(sc)) {
7048 aprint_error_dev(sc->sc_dev,
7049 "%s: failed to get semaphore\n", __func__);
7050 return;
7051 }
7052 }
7053
7054 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7055 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7056 (val & KUMCTRLSTA_MASK));
7057
7058 if (sc->sc_flags == WM_F_LOCK_SWFW)
7059 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7060 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7061 wm_put_swfwhw_semaphore(sc);
7062 }
7063
7064 /* SGMII related */
7065
7066 /*
7067 * wm_sgmii_uses_mdio
7068 *
7069 * Check whether the transaction is to the internal PHY or the external
7070 * MDIO interface. Return true if it's MDIO.
7071 */
7072 static bool
7073 wm_sgmii_uses_mdio(struct wm_softc *sc)
7074 {
7075 uint32_t reg;
7076 bool ismdio = false;
7077
7078 switch (sc->sc_type) {
7079 case WM_T_82575:
7080 case WM_T_82576:
7081 reg = CSR_READ(sc, WMREG_MDIC);
7082 ismdio = ((reg & MDIC_DEST) != 0);
7083 break;
7084 case WM_T_82580:
7085 case WM_T_82580ER:
7086 case WM_T_I350:
7087 case WM_T_I354:
7088 case WM_T_I210:
7089 case WM_T_I211:
7090 reg = CSR_READ(sc, WMREG_MDICNFG);
7091 ismdio = ((reg & MDICNFG_DEST) != 0);
7092 break;
7093 default:
7094 break;
7095 }
7096
7097 return ismdio;
7098 }
7099
7100 /*
7101 * wm_sgmii_readreg: [mii interface function]
7102 *
7103 * Read a PHY register on the SGMII
7104 * This could be handled by the PHY layer if we didn't have to lock the
7105 * ressource ...
7106 */
7107 static int
7108 wm_sgmii_readreg(device_t self, int phy, int reg)
7109 {
7110 struct wm_softc *sc = device_private(self);
7111 uint32_t i2ccmd;
7112 int i, rv;
7113
7114 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7115 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7116 __func__);
7117 return 0;
7118 }
7119
7120 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7121 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7122 | I2CCMD_OPCODE_READ;
7123 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7124
7125 /* Poll the ready bit */
7126 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7127 delay(50);
7128 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7129 if (i2ccmd & I2CCMD_READY)
7130 break;
7131 }
7132 if ((i2ccmd & I2CCMD_READY) == 0)
7133 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7134 if ((i2ccmd & I2CCMD_ERROR) != 0)
7135 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7136
7137 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7138
7139 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7140 return rv;
7141 }
7142
7143 /*
7144 * wm_sgmii_writereg: [mii interface function]
7145 *
7146 * Write a PHY register on the SGMII.
7147 * This could be handled by the PHY layer if we didn't have to lock the
7148 * ressource ...
7149 */
7150 static void
7151 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7152 {
7153 struct wm_softc *sc = device_private(self);
7154 uint32_t i2ccmd;
7155 int i;
7156
7157 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7158 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7159 __func__);
7160 return;
7161 }
7162
7163 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7164 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7165 | I2CCMD_OPCODE_WRITE;
7166 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7167
7168 /* Poll the ready bit */
7169 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7170 delay(50);
7171 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7172 if (i2ccmd & I2CCMD_READY)
7173 break;
7174 }
7175 if ((i2ccmd & I2CCMD_READY) == 0)
7176 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7177 if ((i2ccmd & I2CCMD_ERROR) != 0)
7178 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7179
7180 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7181 }
7182
7183 /* TBI related */
7184
7185 /* XXX Currently TBI only */
7186 static int
7187 wm_check_for_link(struct wm_softc *sc)
7188 {
7189 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7190 uint32_t rxcw;
7191 uint32_t ctrl;
7192 uint32_t status;
7193 uint32_t sig;
7194
7195 if (sc->sc_mediatype & WMP_F_SERDES) {
7196 sc->sc_tbi_linkup = 1;
7197 return 0;
7198 }
7199
7200 rxcw = CSR_READ(sc, WMREG_RXCW);
7201 ctrl = CSR_READ(sc, WMREG_CTRL);
7202 status = CSR_READ(sc, WMREG_STATUS);
7203
7204 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7205
7206 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7207 device_xname(sc->sc_dev), __func__,
7208 ((ctrl & CTRL_SWDPIN(1)) == sig),
7209 ((status & STATUS_LU) != 0),
7210 ((rxcw & RXCW_C) != 0)
7211 ));
7212
7213 /*
7214 * SWDPIN LU RXCW
7215 * 0 0 0
7216 * 0 0 1 (should not happen)
7217 * 0 1 0 (should not happen)
7218 * 0 1 1 (should not happen)
7219 * 1 0 0 Disable autonego and force linkup
7220 * 1 0 1 got /C/ but not linkup yet
7221 * 1 1 0 (linkup)
7222 * 1 1 1 If IFM_AUTO, back to autonego
7223 *
7224 */
7225 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7226 && ((status & STATUS_LU) == 0)
7227 && ((rxcw & RXCW_C) == 0)) {
7228 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7229 __func__));
7230 sc->sc_tbi_linkup = 0;
7231 /* Disable auto-negotiation in the TXCW register */
7232 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7233
7234 /*
7235 * Force link-up and also force full-duplex.
7236 *
7237 * NOTE: CTRL was updated TFCE and RFCE automatically,
7238 * so we should update sc->sc_ctrl
7239 */
7240 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7241 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7242 } else if (((status & STATUS_LU) != 0)
7243 && ((rxcw & RXCW_C) != 0)
7244 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7245 sc->sc_tbi_linkup = 1;
7246 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7247 __func__));
7248 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7249 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7250 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7251 && ((rxcw & RXCW_C) != 0)) {
7252 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7253 } else {
7254 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7255 status));
7256 }
7257
7258 return 0;
7259 }
7260
7261 /*
7262 * wm_tbi_mediainit:
7263 *
7264 * Initialize media for use on 1000BASE-X devices.
7265 */
7266 static void
7267 wm_tbi_mediainit(struct wm_softc *sc)
7268 {
7269 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7270 const char *sep = "";
7271
7272 if (sc->sc_type < WM_T_82543)
7273 sc->sc_tipg = TIPG_WM_DFLT;
7274 else
7275 sc->sc_tipg = TIPG_LG_DFLT;
7276
7277 sc->sc_tbi_anegticks = 5;
7278
7279 /* Initialize our media structures */
7280 sc->sc_mii.mii_ifp = ifp;
7281
7282 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7283 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7284 wm_tbi_mediastatus);
7285
7286 /*
7287 * SWD Pins:
7288 *
7289 * 0 = Link LED (output)
7290 * 1 = Loss Of Signal (input)
7291 */
7292 sc->sc_ctrl |= CTRL_SWDPIO(0);
7293 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7294 if (sc->sc_mediatype & WMP_F_SERDES)
7295 sc->sc_ctrl &= ~CTRL_LRST;
7296
7297 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7298
7299 #define ADD(ss, mm, dd) \
7300 do { \
7301 aprint_normal("%s%s", sep, ss); \
7302 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7303 sep = ", "; \
7304 } while (/*CONSTCOND*/0)
7305
7306 aprint_normal_dev(sc->sc_dev, "");
7307
7308 /* Only 82545 is LX */
7309 if (sc->sc_type == WM_T_82545) {
7310 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7311 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7312 } else {
7313 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7314 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7315 }
7316 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7317 aprint_normal("\n");
7318
7319 #undef ADD
7320
7321 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7322 }
7323
7324 /*
7325 * wm_tbi_mediastatus: [ifmedia interface function]
7326 *
7327 * Get the current interface media status on a 1000BASE-X device.
7328 */
7329 static void
7330 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7331 {
7332 struct wm_softc *sc = ifp->if_softc;
7333 uint32_t ctrl, status;
7334
7335 ifmr->ifm_status = IFM_AVALID;
7336 ifmr->ifm_active = IFM_ETHER;
7337
7338 status = CSR_READ(sc, WMREG_STATUS);
7339 if ((status & STATUS_LU) == 0) {
7340 ifmr->ifm_active |= IFM_NONE;
7341 return;
7342 }
7343
7344 ifmr->ifm_status |= IFM_ACTIVE;
7345 /* Only 82545 is LX */
7346 if (sc->sc_type == WM_T_82545)
7347 ifmr->ifm_active |= IFM_1000_LX;
7348 else
7349 ifmr->ifm_active |= IFM_1000_SX;
7350 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7351 ifmr->ifm_active |= IFM_FDX;
7352 else
7353 ifmr->ifm_active |= IFM_HDX;
7354 ctrl = CSR_READ(sc, WMREG_CTRL);
7355 if (ctrl & CTRL_RFCE)
7356 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7357 if (ctrl & CTRL_TFCE)
7358 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7359 }
7360
7361 /*
7362 * wm_tbi_mediachange: [ifmedia interface function]
7363 *
7364 * Set hardware to newly-selected media on a 1000BASE-X device.
7365 */
7366 static int
7367 wm_tbi_mediachange(struct ifnet *ifp)
7368 {
7369 struct wm_softc *sc = ifp->if_softc;
7370 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7371 uint32_t status;
7372 int i;
7373
7374 if (sc->sc_mediatype & WMP_F_SERDES)
7375 return 0;
7376
7377 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7378 || (sc->sc_type >= WM_T_82575))
7379 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7380
7381 /* XXX power_up_serdes_link_82575() */
7382
7383 sc->sc_ctrl &= ~CTRL_LRST;
7384 sc->sc_txcw = TXCW_ANE;
7385 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7386 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7387 else if (ife->ifm_media & IFM_FDX)
7388 sc->sc_txcw |= TXCW_FD;
7389 else
7390 sc->sc_txcw |= TXCW_HD;
7391
7392 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7393 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7394
7395 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7396 device_xname(sc->sc_dev), sc->sc_txcw));
7397 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7398 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7399 CSR_WRITE_FLUSH(sc);
7400 delay(1000);
7401
7402 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7403 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7404
7405 /*
7406 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7407 * optics detect a signal, 0 if they don't.
7408 */
7409 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7410 /* Have signal; wait for the link to come up. */
7411 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7412 delay(10000);
7413 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7414 break;
7415 }
7416
7417 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7418 device_xname(sc->sc_dev),i));
7419
7420 status = CSR_READ(sc, WMREG_STATUS);
7421 DPRINTF(WM_DEBUG_LINK,
7422 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7423 device_xname(sc->sc_dev),status, STATUS_LU));
7424 if (status & STATUS_LU) {
7425 /* Link is up. */
7426 DPRINTF(WM_DEBUG_LINK,
7427 ("%s: LINK: set media -> link up %s\n",
7428 device_xname(sc->sc_dev),
7429 (status & STATUS_FD) ? "FDX" : "HDX"));
7430
7431 /*
7432 * NOTE: CTRL will update TFCE and RFCE automatically,
7433 * so we should update sc->sc_ctrl
7434 */
7435 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7436 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7437 sc->sc_fcrtl &= ~FCRTL_XONE;
7438 if (status & STATUS_FD)
7439 sc->sc_tctl |=
7440 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7441 else
7442 sc->sc_tctl |=
7443 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7444 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7445 sc->sc_fcrtl |= FCRTL_XONE;
7446 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7447 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7448 WMREG_OLD_FCRTL : WMREG_FCRTL,
7449 sc->sc_fcrtl);
7450 sc->sc_tbi_linkup = 1;
7451 } else {
7452 if (i == WM_LINKUP_TIMEOUT)
7453 wm_check_for_link(sc);
7454 /* Link is down. */
7455 DPRINTF(WM_DEBUG_LINK,
7456 ("%s: LINK: set media -> link down\n",
7457 device_xname(sc->sc_dev)));
7458 sc->sc_tbi_linkup = 0;
7459 }
7460 } else {
7461 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7462 device_xname(sc->sc_dev)));
7463 sc->sc_tbi_linkup = 0;
7464 }
7465
7466 wm_tbi_set_linkled(sc);
7467
7468 return 0;
7469 }
7470
7471 /*
7472 * wm_tbi_set_linkled:
7473 *
7474 * Update the link LED on 1000BASE-X devices.
7475 */
7476 static void
7477 wm_tbi_set_linkled(struct wm_softc *sc)
7478 {
7479
7480 if (sc->sc_tbi_linkup)
7481 sc->sc_ctrl |= CTRL_SWDPIN(0);
7482 else
7483 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7484
7485 /* 82540 or newer devices are active low */
7486 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7487
7488 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7489 }
7490
7491 /*
7492 * wm_tbi_check_link:
7493 *
7494 * Check the link on 1000BASE-X devices.
7495 */
7496 static void
7497 wm_tbi_check_link(struct wm_softc *sc)
7498 {
7499 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7500 uint32_t status;
7501
7502 KASSERT(WM_TX_LOCKED(sc));
7503
7504 if (sc->sc_mediatype & WMP_F_SERDES) {
7505 sc->sc_tbi_linkup = 1;
7506 return;
7507 }
7508
7509 status = CSR_READ(sc, WMREG_STATUS);
7510
7511 /* XXX is this needed? */
7512 (void)CSR_READ(sc, WMREG_RXCW);
7513 (void)CSR_READ(sc, WMREG_CTRL);
7514
7515 /* set link status */
7516 if ((status & STATUS_LU) == 0) {
7517 DPRINTF(WM_DEBUG_LINK,
7518 ("%s: LINK: checklink -> down\n",
7519 device_xname(sc->sc_dev)));
7520 sc->sc_tbi_linkup = 0;
7521 } else if (sc->sc_tbi_linkup == 0) {
7522 DPRINTF(WM_DEBUG_LINK,
7523 ("%s: LINK: checklink -> up %s\n",
7524 device_xname(sc->sc_dev),
7525 (status & STATUS_FD) ? "FDX" : "HDX"));
7526 sc->sc_tbi_linkup = 1;
7527 }
7528
7529 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7530 && ((status & STATUS_LU) == 0)) {
7531 sc->sc_tbi_linkup = 0;
7532 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7533 /* If the timer expired, retry autonegotiation */
7534 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7535 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7536 sc->sc_tbi_ticks = 0;
7537 /*
7538 * Reset the link, and let autonegotiation do
7539 * its thing
7540 */
7541 sc->sc_ctrl |= CTRL_LRST;
7542 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7543 CSR_WRITE_FLUSH(sc);
7544 delay(1000);
7545 sc->sc_ctrl &= ~CTRL_LRST;
7546 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7547 CSR_WRITE_FLUSH(sc);
7548 delay(1000);
7549 CSR_WRITE(sc, WMREG_TXCW,
7550 sc->sc_txcw & ~TXCW_ANE);
7551 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7552 }
7553 }
7554 }
7555
7556 wm_tbi_set_linkled(sc);
7557 }
7558
7559 /* SFP related */
7560 static uint32_t
7561 wm_get_sfp_media_type(struct wm_softc *sc)
7562 {
7563
7564 /* XXX */
7565 return WMP_F_SERDES;
7566 }
7567 /*
7568 * NVM related.
7569 * Microwire, SPI (w/wo EERD) and Flash.
7570 */
7571
7572 /* Both spi and uwire */
7573
7574 /*
7575 * wm_eeprom_sendbits:
7576 *
7577 * Send a series of bits to the EEPROM.
7578 */
7579 static void
7580 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7581 {
7582 uint32_t reg;
7583 int x;
7584
7585 reg = CSR_READ(sc, WMREG_EECD);
7586
7587 for (x = nbits; x > 0; x--) {
7588 if (bits & (1U << (x - 1)))
7589 reg |= EECD_DI;
7590 else
7591 reg &= ~EECD_DI;
7592 CSR_WRITE(sc, WMREG_EECD, reg);
7593 CSR_WRITE_FLUSH(sc);
7594 delay(2);
7595 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7596 CSR_WRITE_FLUSH(sc);
7597 delay(2);
7598 CSR_WRITE(sc, WMREG_EECD, reg);
7599 CSR_WRITE_FLUSH(sc);
7600 delay(2);
7601 }
7602 }
7603
7604 /*
7605 * wm_eeprom_recvbits:
7606 *
7607 * Receive a series of bits from the EEPROM.
7608 */
7609 static void
7610 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7611 {
7612 uint32_t reg, val;
7613 int x;
7614
7615 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7616
7617 val = 0;
7618 for (x = nbits; x > 0; x--) {
7619 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7620 CSR_WRITE_FLUSH(sc);
7621 delay(2);
7622 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7623 val |= (1U << (x - 1));
7624 CSR_WRITE(sc, WMREG_EECD, reg);
7625 CSR_WRITE_FLUSH(sc);
7626 delay(2);
7627 }
7628 *valp = val;
7629 }
7630
7631 /* Microwire */
7632
7633 /*
7634 * wm_nvm_read_uwire:
7635 *
7636 * Read a word from the EEPROM using the MicroWire protocol.
7637 */
7638 static int
7639 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7640 {
7641 uint32_t reg, val;
7642 int i;
7643
7644 for (i = 0; i < wordcnt; i++) {
7645 /* Clear SK and DI. */
7646 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7647 CSR_WRITE(sc, WMREG_EECD, reg);
7648
7649 /*
7650 * XXX: workaround for a bug in qemu-0.12.x and prior
7651 * and Xen.
7652 *
7653 * We use this workaround only for 82540 because qemu's
7654 * e1000 act as 82540.
7655 */
7656 if (sc->sc_type == WM_T_82540) {
7657 reg |= EECD_SK;
7658 CSR_WRITE(sc, WMREG_EECD, reg);
7659 reg &= ~EECD_SK;
7660 CSR_WRITE(sc, WMREG_EECD, reg);
7661 CSR_WRITE_FLUSH(sc);
7662 delay(2);
7663 }
7664 /* XXX: end of workaround */
7665
7666 /* Set CHIP SELECT. */
7667 reg |= EECD_CS;
7668 CSR_WRITE(sc, WMREG_EECD, reg);
7669 CSR_WRITE_FLUSH(sc);
7670 delay(2);
7671
7672 /* Shift in the READ command. */
7673 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7674
7675 /* Shift in address. */
7676 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
7677
7678 /* Shift out the data. */
7679 wm_eeprom_recvbits(sc, &val, 16);
7680 data[i] = val & 0xffff;
7681
7682 /* Clear CHIP SELECT. */
7683 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7684 CSR_WRITE(sc, WMREG_EECD, reg);
7685 CSR_WRITE_FLUSH(sc);
7686 delay(2);
7687 }
7688
7689 return 0;
7690 }
7691
7692 /* SPI */
7693
7694 /* Set SPI related information */
7695 static void
7696 wm_set_spiaddrbits(struct wm_softc *sc)
7697 {
7698 uint32_t reg;
7699
7700 sc->sc_flags |= WM_F_EEPROM_SPI;
7701 reg = CSR_READ(sc, WMREG_EECD);
7702 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7703 }
7704
7705 /*
7706 * wm_nvm_ready_spi:
7707 *
7708 * Wait for a SPI EEPROM to be ready for commands.
7709 */
7710 static int
7711 wm_nvm_ready_spi(struct wm_softc *sc)
7712 {
7713 uint32_t val;
7714 int usec;
7715
7716 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7717 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7718 wm_eeprom_recvbits(sc, &val, 8);
7719 if ((val & SPI_SR_RDY) == 0)
7720 break;
7721 }
7722 if (usec >= SPI_MAX_RETRIES) {
7723 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7724 return 1;
7725 }
7726 return 0;
7727 }
7728
7729 /*
7730 * wm_nvm_read_spi:
7731 *
7732 * Read a work from the EEPROM using the SPI protocol.
7733 */
7734 static int
7735 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7736 {
7737 uint32_t reg, val;
7738 int i;
7739 uint8_t opc;
7740
7741 /* Clear SK and CS. */
7742 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7743 CSR_WRITE(sc, WMREG_EECD, reg);
7744 CSR_WRITE_FLUSH(sc);
7745 delay(2);
7746
7747 if (wm_nvm_ready_spi(sc))
7748 return 1;
7749
7750 /* Toggle CS to flush commands. */
7751 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7752 CSR_WRITE_FLUSH(sc);
7753 delay(2);
7754 CSR_WRITE(sc, WMREG_EECD, reg);
7755 CSR_WRITE_FLUSH(sc);
7756 delay(2);
7757
7758 opc = SPI_OPC_READ;
7759 if (sc->sc_ee_addrbits == 8 && word >= 128)
7760 opc |= SPI_OPC_A8;
7761
7762 wm_eeprom_sendbits(sc, opc, 8);
7763 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
7764
7765 for (i = 0; i < wordcnt; i++) {
7766 wm_eeprom_recvbits(sc, &val, 16);
7767 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7768 }
7769
7770 /* Raise CS and clear SK. */
7771 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7772 CSR_WRITE(sc, WMREG_EECD, reg);
7773 CSR_WRITE_FLUSH(sc);
7774 delay(2);
7775
7776 return 0;
7777 }
7778
7779 /* Using with EERD */
7780
7781 static int
7782 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7783 {
7784 uint32_t attempts = 100000;
7785 uint32_t i, reg = 0;
7786 int32_t done = -1;
7787
7788 for (i = 0; i < attempts; i++) {
7789 reg = CSR_READ(sc, rw);
7790
7791 if (reg & EERD_DONE) {
7792 done = 0;
7793 break;
7794 }
7795 delay(5);
7796 }
7797
7798 return done;
7799 }
7800
7801 static int
7802 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7803 uint16_t *data)
7804 {
7805 int i, eerd = 0;
7806 int error = 0;
7807
7808 for (i = 0; i < wordcnt; i++) {
7809 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7810
7811 CSR_WRITE(sc, WMREG_EERD, eerd);
7812 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
7813 if (error != 0)
7814 break;
7815
7816 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
7817 }
7818
7819 return error;
7820 }
7821
7822 /* Flash */
7823
7824 static int
7825 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7826 {
7827 uint32_t eecd;
7828 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7829 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7830 uint8_t sig_byte = 0;
7831
7832 switch (sc->sc_type) {
7833 case WM_T_ICH8:
7834 case WM_T_ICH9:
7835 eecd = CSR_READ(sc, WMREG_EECD);
7836 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7837 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7838 return 0;
7839 }
7840 /* FALLTHROUGH */
7841 default:
7842 /* Default to 0 */
7843 *bank = 0;
7844
7845 /* Check bank 0 */
7846 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7847 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7848 *bank = 0;
7849 return 0;
7850 }
7851
7852 /* Check bank 1 */
7853 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7854 &sig_byte);
7855 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7856 *bank = 1;
7857 return 0;
7858 }
7859 }
7860
7861 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7862 device_xname(sc->sc_dev)));
7863 return -1;
7864 }
7865
7866 /******************************************************************************
7867 * This function does initial flash setup so that a new read/write/erase cycle
7868 * can be started.
7869 *
7870 * sc - The pointer to the hw structure
7871 ****************************************************************************/
7872 static int32_t
7873 wm_ich8_cycle_init(struct wm_softc *sc)
7874 {
7875 uint16_t hsfsts;
7876 int32_t error = 1;
7877 int32_t i = 0;
7878
7879 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7880
7881 /* May be check the Flash Des Valid bit in Hw status */
7882 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7883 return error;
7884 }
7885
7886 /* Clear FCERR in Hw status by writing 1 */
7887 /* Clear DAEL in Hw status by writing a 1 */
7888 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7889
7890 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7891
7892 /*
7893 * Either we should have a hardware SPI cycle in progress bit to check
7894 * against, in order to start a new cycle or FDONE bit should be
7895 * changed in the hardware so that it is 1 after harware reset, which
7896 * can then be used as an indication whether a cycle is in progress or
7897 * has been completed .. we should also have some software semaphore
7898 * mechanism to guard FDONE or the cycle in progress bit so that two
7899 * threads access to those bits can be sequentiallized or a way so that
7900 * 2 threads dont start the cycle at the same time
7901 */
7902
7903 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7904 /*
7905 * There is no cycle running at present, so we can start a
7906 * cycle
7907 */
7908
7909 /* Begin by setting Flash Cycle Done. */
7910 hsfsts |= HSFSTS_DONE;
7911 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7912 error = 0;
7913 } else {
7914 /*
7915 * otherwise poll for sometime so the current cycle has a
7916 * chance to end before giving up.
7917 */
7918 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7919 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7920 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7921 error = 0;
7922 break;
7923 }
7924 delay(1);
7925 }
7926 if (error == 0) {
7927 /*
7928 * Successful in waiting for previous cycle to timeout,
7929 * now set the Flash Cycle Done.
7930 */
7931 hsfsts |= HSFSTS_DONE;
7932 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7933 }
7934 }
7935 return error;
7936 }
7937
7938 /******************************************************************************
7939 * This function starts a flash cycle and waits for its completion
7940 *
7941 * sc - The pointer to the hw structure
7942 ****************************************************************************/
7943 static int32_t
7944 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7945 {
7946 uint16_t hsflctl;
7947 uint16_t hsfsts;
7948 int32_t error = 1;
7949 uint32_t i = 0;
7950
7951 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7952 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7953 hsflctl |= HSFCTL_GO;
7954 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7955
7956 /* Wait till FDONE bit is set to 1 */
7957 do {
7958 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7959 if (hsfsts & HSFSTS_DONE)
7960 break;
7961 delay(1);
7962 i++;
7963 } while (i < timeout);
7964 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7965 error = 0;
7966
7967 return error;
7968 }
7969
7970 /******************************************************************************
7971 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7972 *
7973 * sc - The pointer to the hw structure
7974 * index - The index of the byte or word to read.
7975 * size - Size of data to read, 1=byte 2=word
7976 * data - Pointer to the word to store the value read.
7977 *****************************************************************************/
7978 static int32_t
7979 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7980 uint32_t size, uint16_t *data)
7981 {
7982 uint16_t hsfsts;
7983 uint16_t hsflctl;
7984 uint32_t flash_linear_address;
7985 uint32_t flash_data = 0;
7986 int32_t error = 1;
7987 int32_t count = 0;
7988
7989 if (size < 1 || size > 2 || data == 0x0 ||
7990 index > ICH_FLASH_LINEAR_ADDR_MASK)
7991 return error;
7992
7993 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7994 sc->sc_ich8_flash_base;
7995
7996 do {
7997 delay(1);
7998 /* Steps */
7999 error = wm_ich8_cycle_init(sc);
8000 if (error)
8001 break;
8002
8003 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8004 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8005 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8006 & HSFCTL_BCOUNT_MASK;
8007 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8008 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8009
8010 /*
8011 * Write the last 24 bits of index into Flash Linear address
8012 * field in Flash Address
8013 */
8014 /* TODO: TBD maybe check the index against the size of flash */
8015
8016 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8017
8018 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8019
8020 /*
8021 * Check if FCERR is set to 1, if set to 1, clear it and try
8022 * the whole sequence a few more times, else read in (shift in)
8023 * the Flash Data0, the order is least significant byte first
8024 * msb to lsb
8025 */
8026 if (error == 0) {
8027 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8028 if (size == 1)
8029 *data = (uint8_t)(flash_data & 0x000000FF);
8030 else if (size == 2)
8031 *data = (uint16_t)(flash_data & 0x0000FFFF);
8032 break;
8033 } else {
8034 /*
8035 * If we've gotten here, then things are probably
8036 * completely hosed, but if the error condition is
8037 * detected, it won't hurt to give it another try...
8038 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8039 */
8040 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8041 if (hsfsts & HSFSTS_ERR) {
8042 /* Repeat for some time before giving up. */
8043 continue;
8044 } else if ((hsfsts & HSFSTS_DONE) == 0)
8045 break;
8046 }
8047 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8048
8049 return error;
8050 }
8051
8052 /******************************************************************************
8053 * Reads a single byte from the NVM using the ICH8 flash access registers.
8054 *
8055 * sc - pointer to wm_hw structure
8056 * index - The index of the byte to read.
8057 * data - Pointer to a byte to store the value read.
8058 *****************************************************************************/
8059 static int32_t
8060 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8061 {
8062 int32_t status;
8063 uint16_t word = 0;
8064
8065 status = wm_read_ich8_data(sc, index, 1, &word);
8066 if (status == 0)
8067 *data = (uint8_t)word;
8068 else
8069 *data = 0;
8070
8071 return status;
8072 }
8073
8074 /******************************************************************************
8075 * Reads a word from the NVM using the ICH8 flash access registers.
8076 *
8077 * sc - pointer to wm_hw structure
8078 * index - The starting byte index of the word to read.
8079 * data - Pointer to a word to store the value read.
8080 *****************************************************************************/
8081 static int32_t
8082 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8083 {
8084 int32_t status;
8085
8086 status = wm_read_ich8_data(sc, index, 2, data);
8087 return status;
8088 }
8089
8090 /******************************************************************************
8091 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8092 * register.
8093 *
8094 * sc - Struct containing variables accessed by shared code
8095 * offset - offset of word in the EEPROM to read
8096 * data - word read from the EEPROM
8097 * words - number of words to read
8098 *****************************************************************************/
8099 static int
8100 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8101 {
8102 int32_t error = 0;
8103 uint32_t flash_bank = 0;
8104 uint32_t act_offset = 0;
8105 uint32_t bank_offset = 0;
8106 uint16_t word = 0;
8107 uint16_t i = 0;
8108
8109 /*
8110 * We need to know which is the valid flash bank. In the event
8111 * that we didn't allocate eeprom_shadow_ram, we may not be
8112 * managing flash_bank. So it cannot be trusted and needs
8113 * to be updated with each read.
8114 */
8115 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8116 if (error) {
8117 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
8118 __func__);
8119 flash_bank = 0;
8120 }
8121
8122 /*
8123 * Adjust offset appropriately if we're on bank 1 - adjust for word
8124 * size
8125 */
8126 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8127
8128 error = wm_get_swfwhw_semaphore(sc);
8129 if (error) {
8130 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8131 __func__);
8132 return error;
8133 }
8134
8135 for (i = 0; i < words; i++) {
8136 /* The NVM part needs a byte offset, hence * 2 */
8137 act_offset = bank_offset + ((offset + i) * 2);
8138 error = wm_read_ich8_word(sc, act_offset, &word);
8139 if (error) {
8140 aprint_error_dev(sc->sc_dev,
8141 "%s: failed to read NVM\n", __func__);
8142 break;
8143 }
8144 data[i] = word;
8145 }
8146
8147 wm_put_swfwhw_semaphore(sc);
8148 return error;
8149 }
8150
8151 /* Lock, detecting NVM type, validate checksum and read */
8152
8153 /*
8154 * wm_nvm_acquire:
8155 *
8156 * Perform the EEPROM handshake required on some chips.
8157 */
8158 static int
8159 wm_nvm_acquire(struct wm_softc *sc)
8160 {
8161 uint32_t reg;
8162 int x;
8163 int ret = 0;
8164
8165 /* always success */
8166 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8167 return 0;
8168
8169 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8170 ret = wm_get_swfwhw_semaphore(sc);
8171 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8172 /* This will also do wm_get_swsm_semaphore() if needed */
8173 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8174 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8175 ret = wm_get_swsm_semaphore(sc);
8176 }
8177
8178 if (ret) {
8179 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8180 __func__);
8181 return 1;
8182 }
8183
8184 if (sc->sc_flags & WM_F_LOCK_EECD) {
8185 reg = CSR_READ(sc, WMREG_EECD);
8186
8187 /* Request EEPROM access. */
8188 reg |= EECD_EE_REQ;
8189 CSR_WRITE(sc, WMREG_EECD, reg);
8190
8191 /* ..and wait for it to be granted. */
8192 for (x = 0; x < 1000; x++) {
8193 reg = CSR_READ(sc, WMREG_EECD);
8194 if (reg & EECD_EE_GNT)
8195 break;
8196 delay(5);
8197 }
8198 if ((reg & EECD_EE_GNT) == 0) {
8199 aprint_error_dev(sc->sc_dev,
8200 "could not acquire EEPROM GNT\n");
8201 reg &= ~EECD_EE_REQ;
8202 CSR_WRITE(sc, WMREG_EECD, reg);
8203 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8204 wm_put_swfwhw_semaphore(sc);
8205 if (sc->sc_flags & WM_F_LOCK_SWFW)
8206 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8207 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8208 wm_put_swsm_semaphore(sc);
8209 return 1;
8210 }
8211 }
8212
8213 return 0;
8214 }
8215
8216 /*
8217 * wm_nvm_release:
8218 *
8219 * Release the EEPROM mutex.
8220 */
8221 static void
8222 wm_nvm_release(struct wm_softc *sc)
8223 {
8224 uint32_t reg;
8225
8226 /* always success */
8227 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8228 return;
8229
8230 if (sc->sc_flags & WM_F_LOCK_EECD) {
8231 reg = CSR_READ(sc, WMREG_EECD);
8232 reg &= ~EECD_EE_REQ;
8233 CSR_WRITE(sc, WMREG_EECD, reg);
8234 }
8235
8236 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8237 wm_put_swfwhw_semaphore(sc);
8238 if (sc->sc_flags & WM_F_LOCK_SWFW)
8239 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8240 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8241 wm_put_swsm_semaphore(sc);
8242 }
8243
8244 static int
8245 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8246 {
8247 uint32_t eecd = 0;
8248
8249 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8250 || sc->sc_type == WM_T_82583) {
8251 eecd = CSR_READ(sc, WMREG_EECD);
8252
8253 /* Isolate bits 15 & 16 */
8254 eecd = ((eecd >> 15) & 0x03);
8255
8256 /* If both bits are set, device is Flash type */
8257 if (eecd == 0x03)
8258 return 0;
8259 }
8260 return 1;
8261 }
8262
8263 #define NVM_CHECKSUM 0xBABA
8264 #define EEPROM_SIZE 0x0040
8265 #define NVM_COMPAT 0x0003
8266 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
8267 #define NVM_FUTURE_INIT_WORD1 0x0019
8268 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
8269
8270 /*
8271 * wm_nvm_validate_checksum
8272 *
8273 * The checksum is defined as the sum of the first 64 (16 bit) words.
8274 */
8275 static int
8276 wm_nvm_validate_checksum(struct wm_softc *sc)
8277 {
8278 uint16_t checksum;
8279 uint16_t eeprom_data;
8280 #ifdef WM_DEBUG
8281 uint16_t csum_wordaddr, valid_checksum;
8282 #endif
8283 int i;
8284
8285 checksum = 0;
8286
8287 /* Don't check for I211 */
8288 if (sc->sc_type == WM_T_I211)
8289 return 0;
8290
8291 #ifdef WM_DEBUG
8292 if (sc->sc_type == WM_T_PCH_LPT) {
8293 csum_wordaddr = NVM_COMPAT;
8294 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8295 } else {
8296 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
8297 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8298 }
8299
8300 /* Dump EEPROM image for debug */
8301 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8302 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8303 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8304 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8305 if ((eeprom_data & valid_checksum) == 0) {
8306 DPRINTF(WM_DEBUG_NVM,
8307 ("%s: NVM need to be updated (%04x != %04x)\n",
8308 device_xname(sc->sc_dev), eeprom_data,
8309 valid_checksum));
8310 }
8311 }
8312
8313 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8314 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8315 for (i = 0; i < EEPROM_SIZE; i++) {
8316 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8317 printf("XX ");
8318 else
8319 printf("%04x ", eeprom_data);
8320 if (i % 8 == 7)
8321 printf("\n");
8322 }
8323 }
8324
8325 #endif /* WM_DEBUG */
8326
8327 for (i = 0; i < EEPROM_SIZE; i++) {
8328 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8329 return 1;
8330 checksum += eeprom_data;
8331 }
8332
8333 if (checksum != (uint16_t) NVM_CHECKSUM) {
8334 #ifdef WM_DEBUG
8335 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8336 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8337 #endif
8338 }
8339
8340 return 0;
8341 }
8342
8343 /*
8344 * wm_nvm_read:
8345 *
8346 * Read data from the serial EEPROM.
8347 */
8348 static int
8349 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8350 {
8351 int rv;
8352
8353 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8354 return 1;
8355
8356 if (wm_nvm_acquire(sc))
8357 return 1;
8358
8359 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8360 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8361 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8362 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8363 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8364 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8365 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8366 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8367 else
8368 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8369
8370 wm_nvm_release(sc);
8371 return rv;
8372 }
8373
8374 /*
8375 * Hardware semaphores.
8376 * Very complexed...
8377 */
8378
8379 static int
8380 wm_get_swsm_semaphore(struct wm_softc *sc)
8381 {
8382 int32_t timeout;
8383 uint32_t swsm;
8384
8385 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8386 /* Get the SW semaphore. */
8387 timeout = 1000 + 1; /* XXX */
8388 while (timeout) {
8389 swsm = CSR_READ(sc, WMREG_SWSM);
8390
8391 if ((swsm & SWSM_SMBI) == 0)
8392 break;
8393
8394 delay(50);
8395 timeout--;
8396 }
8397
8398 if (timeout == 0) {
8399 aprint_error_dev(sc->sc_dev,
8400 "could not acquire SWSM SMBI\n");
8401 return 1;
8402 }
8403 }
8404
8405 /* Get the FW semaphore. */
8406 timeout = 1000 + 1; /* XXX */
8407 while (timeout) {
8408 swsm = CSR_READ(sc, WMREG_SWSM);
8409 swsm |= SWSM_SWESMBI;
8410 CSR_WRITE(sc, WMREG_SWSM, swsm);
8411 /* If we managed to set the bit we got the semaphore. */
8412 swsm = CSR_READ(sc, WMREG_SWSM);
8413 if (swsm & SWSM_SWESMBI)
8414 break;
8415
8416 delay(50);
8417 timeout--;
8418 }
8419
8420 if (timeout == 0) {
8421 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8422 /* Release semaphores */
8423 wm_put_swsm_semaphore(sc);
8424 return 1;
8425 }
8426 return 0;
8427 }
8428
8429 static void
8430 wm_put_swsm_semaphore(struct wm_softc *sc)
8431 {
8432 uint32_t swsm;
8433
8434 swsm = CSR_READ(sc, WMREG_SWSM);
8435 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8436 CSR_WRITE(sc, WMREG_SWSM, swsm);
8437 }
8438
8439 static int
8440 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8441 {
8442 uint32_t swfw_sync;
8443 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8444 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8445 int timeout = 200;
8446
8447 for (timeout = 0; timeout < 200; timeout++) {
8448 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8449 if (wm_get_swsm_semaphore(sc)) {
8450 aprint_error_dev(sc->sc_dev,
8451 "%s: failed to get semaphore\n",
8452 __func__);
8453 return 1;
8454 }
8455 }
8456 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8457 if ((swfw_sync & (swmask | fwmask)) == 0) {
8458 swfw_sync |= swmask;
8459 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8460 if (sc->sc_flags & WM_F_LOCK_SWSM)
8461 wm_put_swsm_semaphore(sc);
8462 return 0;
8463 }
8464 if (sc->sc_flags & WM_F_LOCK_SWSM)
8465 wm_put_swsm_semaphore(sc);
8466 delay(5000);
8467 }
8468 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8469 device_xname(sc->sc_dev), mask, swfw_sync);
8470 return 1;
8471 }
8472
8473 static void
8474 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8475 {
8476 uint32_t swfw_sync;
8477
8478 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8479 while (wm_get_swsm_semaphore(sc) != 0)
8480 continue;
8481 }
8482 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8483 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8484 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8485 if (sc->sc_flags & WM_F_LOCK_SWSM)
8486 wm_put_swsm_semaphore(sc);
8487 }
8488
8489 static int
8490 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8491 {
8492 uint32_t ext_ctrl;
8493 int timeout = 200;
8494
8495 for (timeout = 0; timeout < 200; timeout++) {
8496 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8497 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8498 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8499
8500 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8501 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8502 return 0;
8503 delay(5000);
8504 }
8505 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8506 device_xname(sc->sc_dev), ext_ctrl);
8507 return 1;
8508 }
8509
8510 static void
8511 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8512 {
8513 uint32_t ext_ctrl;
8514 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8515 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8516 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8517 }
8518
8519 static int
8520 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8521 {
8522 int i = 0;
8523 uint32_t reg;
8524
8525 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8526 do {
8527 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8528 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8529 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8530 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8531 break;
8532 delay(2*1000);
8533 i++;
8534 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8535
8536 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8537 wm_put_hw_semaphore_82573(sc);
8538 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8539 device_xname(sc->sc_dev));
8540 return -1;
8541 }
8542
8543 return 0;
8544 }
8545
8546 static void
8547 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8548 {
8549 uint32_t reg;
8550
8551 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8552 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8553 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8554 }
8555
8556 /*
8557 * Management mode and power management related subroutines.
8558 * BMC, AMT, suspend/resume and EEE.
8559 */
8560
8561 static int
8562 wm_check_mng_mode(struct wm_softc *sc)
8563 {
8564 int rv;
8565
8566 switch (sc->sc_type) {
8567 case WM_T_ICH8:
8568 case WM_T_ICH9:
8569 case WM_T_ICH10:
8570 case WM_T_PCH:
8571 case WM_T_PCH2:
8572 case WM_T_PCH_LPT:
8573 rv = wm_check_mng_mode_ich8lan(sc);
8574 break;
8575 case WM_T_82574:
8576 case WM_T_82583:
8577 rv = wm_check_mng_mode_82574(sc);
8578 break;
8579 case WM_T_82571:
8580 case WM_T_82572:
8581 case WM_T_82573:
8582 case WM_T_80003:
8583 rv = wm_check_mng_mode_generic(sc);
8584 break;
8585 default:
8586 /* noting to do */
8587 rv = 0;
8588 break;
8589 }
8590
8591 return rv;
8592 }
8593
8594 static int
8595 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8596 {
8597 uint32_t fwsm;
8598
8599 fwsm = CSR_READ(sc, WMREG_FWSM);
8600
8601 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8602 return 1;
8603
8604 return 0;
8605 }
8606
8607 static int
8608 wm_check_mng_mode_82574(struct wm_softc *sc)
8609 {
8610 uint16_t data;
8611
8612 wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8613
8614 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8615 return 1;
8616
8617 return 0;
8618 }
8619
8620 static int
8621 wm_check_mng_mode_generic(struct wm_softc *sc)
8622 {
8623 uint32_t fwsm;
8624
8625 fwsm = CSR_READ(sc, WMREG_FWSM);
8626
8627 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8628 return 1;
8629
8630 return 0;
8631 }
8632
8633 static int
8634 wm_enable_mng_pass_thru(struct wm_softc *sc)
8635 {
8636 uint32_t manc, fwsm, factps;
8637
8638 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8639 return 0;
8640
8641 manc = CSR_READ(sc, WMREG_MANC);
8642
8643 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8644 device_xname(sc->sc_dev), manc));
8645 if ((manc & MANC_RECV_TCO_EN) == 0)
8646 return 0;
8647
8648 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8649 fwsm = CSR_READ(sc, WMREG_FWSM);
8650 factps = CSR_READ(sc, WMREG_FACTPS);
8651 if (((factps & FACTPS_MNGCG) == 0)
8652 && ((fwsm & FWSM_MODE_MASK)
8653 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8654 return 1;
8655 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8656 uint16_t data;
8657
8658 factps = CSR_READ(sc, WMREG_FACTPS);
8659 wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8660 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8661 device_xname(sc->sc_dev), factps, data));
8662 if (((factps & FACTPS_MNGCG) == 0)
8663 && ((data & EEPROM_CFG2_MNGM_MASK)
8664 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8665 return 1;
8666 } else if (((manc & MANC_SMBUS_EN) != 0)
8667 && ((manc & MANC_ASF_EN) == 0))
8668 return 1;
8669
8670 return 0;
8671 }
8672
8673 static int
8674 wm_check_reset_block(struct wm_softc *sc)
8675 {
8676 uint32_t reg;
8677
8678 switch (sc->sc_type) {
8679 case WM_T_ICH8:
8680 case WM_T_ICH9:
8681 case WM_T_ICH10:
8682 case WM_T_PCH:
8683 case WM_T_PCH2:
8684 case WM_T_PCH_LPT:
8685 reg = CSR_READ(sc, WMREG_FWSM);
8686 if ((reg & FWSM_RSPCIPHY) != 0)
8687 return 0;
8688 else
8689 return -1;
8690 break;
8691 case WM_T_82571:
8692 case WM_T_82572:
8693 case WM_T_82573:
8694 case WM_T_82574:
8695 case WM_T_82583:
8696 case WM_T_80003:
8697 reg = CSR_READ(sc, WMREG_MANC);
8698 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8699 return -1;
8700 else
8701 return 0;
8702 break;
8703 default:
8704 /* no problem */
8705 break;
8706 }
8707
8708 return 0;
8709 }
8710
8711 static void
8712 wm_get_hw_control(struct wm_softc *sc)
8713 {
8714 uint32_t reg;
8715
8716 switch (sc->sc_type) {
8717 case WM_T_82573:
8718 reg = CSR_READ(sc, WMREG_SWSM);
8719 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8720 break;
8721 case WM_T_82571:
8722 case WM_T_82572:
8723 case WM_T_82574:
8724 case WM_T_82583:
8725 case WM_T_80003:
8726 case WM_T_ICH8:
8727 case WM_T_ICH9:
8728 case WM_T_ICH10:
8729 case WM_T_PCH:
8730 case WM_T_PCH2:
8731 case WM_T_PCH_LPT:
8732 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8733 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8734 break;
8735 default:
8736 break;
8737 }
8738 }
8739
8740 static void
8741 wm_release_hw_control(struct wm_softc *sc)
8742 {
8743 uint32_t reg;
8744
8745 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8746 return;
8747
8748 if (sc->sc_type == WM_T_82573) {
8749 reg = CSR_READ(sc, WMREG_SWSM);
8750 reg &= ~SWSM_DRV_LOAD;
8751 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8752 } else {
8753 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8754 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8755 }
8756 }
8757
8758 static void
8759 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8760 {
8761 uint32_t reg;
8762
8763 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8764
8765 if (on != 0)
8766 reg |= EXTCNFCTR_GATE_PHY_CFG;
8767 else
8768 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8769
8770 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8771 }
8772
8773 static void
8774 wm_smbustopci(struct wm_softc *sc)
8775 {
8776 uint32_t fwsm;
8777
8778 fwsm = CSR_READ(sc, WMREG_FWSM);
8779 if (((fwsm & FWSM_FW_VALID) == 0)
8780 && ((wm_check_reset_block(sc) == 0))) {
8781 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8782 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8783 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8784 CSR_WRITE_FLUSH(sc);
8785 delay(10);
8786 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8787 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8788 CSR_WRITE_FLUSH(sc);
8789 delay(50*1000);
8790
8791 /*
8792 * Gate automatic PHY configuration by hardware on non-managed
8793 * 82579
8794 */
8795 if (sc->sc_type == WM_T_PCH2)
8796 wm_gate_hw_phy_config_ich8lan(sc, 1);
8797 }
8798 }
8799
8800 static void
8801 wm_init_manageability(struct wm_softc *sc)
8802 {
8803
8804 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8805 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8806 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8807
8808 /* Disable hardware interception of ARP */
8809 manc &= ~MANC_ARP_EN;
8810
8811 /* Enable receiving management packets to the host */
8812 if (sc->sc_type >= WM_T_82571) {
8813 manc |= MANC_EN_MNG2HOST;
8814 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8815 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8816
8817 }
8818
8819 CSR_WRITE(sc, WMREG_MANC, manc);
8820 }
8821 }
8822
8823 static void
8824 wm_release_manageability(struct wm_softc *sc)
8825 {
8826
8827 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8828 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8829
8830 manc |= MANC_ARP_EN;
8831 if (sc->sc_type >= WM_T_82571)
8832 manc &= ~MANC_EN_MNG2HOST;
8833
8834 CSR_WRITE(sc, WMREG_MANC, manc);
8835 }
8836 }
8837
8838 static void
8839 wm_get_wakeup(struct wm_softc *sc)
8840 {
8841
8842 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8843 switch (sc->sc_type) {
8844 case WM_T_82573:
8845 case WM_T_82583:
8846 sc->sc_flags |= WM_F_HAS_AMT;
8847 /* FALLTHROUGH */
8848 case WM_T_80003:
8849 case WM_T_82541:
8850 case WM_T_82547:
8851 case WM_T_82571:
8852 case WM_T_82572:
8853 case WM_T_82574:
8854 case WM_T_82575:
8855 case WM_T_82576:
8856 case WM_T_82580:
8857 case WM_T_82580ER:
8858 case WM_T_I350:
8859 case WM_T_I354:
8860 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8861 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8862 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8863 break;
8864 case WM_T_ICH8:
8865 case WM_T_ICH9:
8866 case WM_T_ICH10:
8867 case WM_T_PCH:
8868 case WM_T_PCH2:
8869 case WM_T_PCH_LPT:
8870 sc->sc_flags |= WM_F_HAS_AMT;
8871 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8872 break;
8873 default:
8874 break;
8875 }
8876
8877 /* 1: HAS_MANAGE */
8878 if (wm_enable_mng_pass_thru(sc) != 0)
8879 sc->sc_flags |= WM_F_HAS_MANAGE;
8880
8881 #ifdef WM_DEBUG
8882 printf("\n");
8883 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8884 printf("HAS_AMT,");
8885 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8886 printf("ARC_SUBSYS_VALID,");
8887 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8888 printf("ASF_FIRMWARE_PRES,");
8889 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8890 printf("HAS_MANAGE,");
8891 printf("\n");
8892 #endif
8893 /*
8894 * Note that the WOL flags is set after the resetting of the eeprom
8895 * stuff
8896 */
8897 }
8898
8899 #ifdef WM_WOL
8900 /* WOL in the newer chipset interfaces (pchlan) */
8901 static void
8902 wm_enable_phy_wakeup(struct wm_softc *sc)
8903 {
8904 #if 0
8905 uint16_t preg;
8906
8907 /* Copy MAC RARs to PHY RARs */
8908
8909 /* Copy MAC MTA to PHY MTA */
8910
8911 /* Configure PHY Rx Control register */
8912
8913 /* Enable PHY wakeup in MAC register */
8914
8915 /* Configure and enable PHY wakeup in PHY registers */
8916
8917 /* Activate PHY wakeup */
8918
8919 /* XXX */
8920 #endif
8921 }
8922
8923 /* Power down workaround on D3 */
8924 static void
8925 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8926 {
8927 uint32_t reg;
8928 int i;
8929
8930 for (i = 0; i < 2; i++) {
8931 /* Disable link */
8932 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8933 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8934 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8935
8936 /*
8937 * Call gig speed drop workaround on Gig disable before
8938 * accessing any PHY registers
8939 */
8940 if (sc->sc_type == WM_T_ICH8)
8941 wm_gig_downshift_workaround_ich8lan(sc);
8942
8943 /* Write VR power-down enable */
8944 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8945 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8946 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8947 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8948
8949 /* Read it back and test */
8950 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8951 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8952 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8953 break;
8954
8955 /* Issue PHY reset and repeat at most one more time */
8956 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8957 }
8958 }
8959
8960 static void
8961 wm_enable_wakeup(struct wm_softc *sc)
8962 {
8963 uint32_t reg, pmreg;
8964 pcireg_t pmode;
8965
8966 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8967 &pmreg, NULL) == 0)
8968 return;
8969
8970 /* Advertise the wakeup capability */
8971 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8972 | CTRL_SWDPIN(3));
8973 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8974
8975 /* ICH workaround */
8976 switch (sc->sc_type) {
8977 case WM_T_ICH8:
8978 case WM_T_ICH9:
8979 case WM_T_ICH10:
8980 case WM_T_PCH:
8981 case WM_T_PCH2:
8982 case WM_T_PCH_LPT:
8983 /* Disable gig during WOL */
8984 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8985 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8986 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8987 if (sc->sc_type == WM_T_PCH)
8988 wm_gmii_reset(sc);
8989
8990 /* Power down workaround */
8991 if (sc->sc_phytype == WMPHY_82577) {
8992 struct mii_softc *child;
8993
8994 /* Assume that the PHY is copper */
8995 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8996 if (child->mii_mpd_rev <= 2)
8997 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8998 (768 << 5) | 25, 0x0444); /* magic num */
8999 }
9000 break;
9001 default:
9002 break;
9003 }
9004
9005 /* Keep the laser running on fiber adapters */
9006 if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
9007 || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
9008 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9009 reg |= CTRL_EXT_SWDPIN(3);
9010 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9011 }
9012
9013 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9014 #if 0 /* for the multicast packet */
9015 reg |= WUFC_MC;
9016 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9017 #endif
9018
9019 if (sc->sc_type == WM_T_PCH) {
9020 wm_enable_phy_wakeup(sc);
9021 } else {
9022 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9023 CSR_WRITE(sc, WMREG_WUFC, reg);
9024 }
9025
9026 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9027 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9028 || (sc->sc_type == WM_T_PCH2))
9029 && (sc->sc_phytype == WMPHY_IGP_3))
9030 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9031
9032 /* Request PME */
9033 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9034 #if 0
9035 /* Disable WOL */
9036 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9037 #else
9038 /* For WOL */
9039 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9040 #endif
9041 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9042 }
9043 #endif /* WM_WOL */
9044
9045 /* EEE */
9046
9047 static void
9048 wm_set_eee_i350(struct wm_softc *sc)
9049 {
9050 uint32_t ipcnfg, eeer;
9051
9052 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9053 eeer = CSR_READ(sc, WMREG_EEER);
9054
9055 if ((sc->sc_flags & WM_F_EEE) != 0) {
9056 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9057 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9058 | EEER_LPI_FC);
9059 } else {
9060 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9061 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9062 | EEER_LPI_FC);
9063 }
9064
9065 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9066 CSR_WRITE(sc, WMREG_EEER, eeer);
9067 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9068 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9069 }
9070
9071 /*
9072 * Workarounds (mainly PHY related).
9073 * Basically, PHY's workarounds are in the PHY drivers.
9074 */
9075
9076 /* Work-around for 82566 Kumeran PCS lock loss */
9077 static void
9078 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9079 {
9080 int miistatus, active, i;
9081 int reg;
9082
9083 miistatus = sc->sc_mii.mii_media_status;
9084
9085 /* If the link is not up, do nothing */
9086 if ((miistatus & IFM_ACTIVE) != 0)
9087 return;
9088
9089 active = sc->sc_mii.mii_media_active;
9090
9091 /* Nothing to do if the link is other than 1Gbps */
9092 if (IFM_SUBTYPE(active) != IFM_1000_T)
9093 return;
9094
9095 for (i = 0; i < 10; i++) {
9096 /* read twice */
9097 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9098 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9099 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9100 goto out; /* GOOD! */
9101
9102 /* Reset the PHY */
9103 wm_gmii_reset(sc);
9104 delay(5*1000);
9105 }
9106
9107 /* Disable GigE link negotiation */
9108 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9109 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9110 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9111
9112 /*
9113 * Call gig speed drop workaround on Gig disable before accessing
9114 * any PHY registers.
9115 */
9116 wm_gig_downshift_workaround_ich8lan(sc);
9117
9118 out:
9119 return;
9120 }
9121
9122 /* WOL from S5 stops working */
9123 static void
9124 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9125 {
9126 uint16_t kmrn_reg;
9127
9128 /* Only for igp3 */
9129 if (sc->sc_phytype == WMPHY_IGP_3) {
9130 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9131 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9132 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9133 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9134 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9135 }
9136 }
9137
9138 /*
9139 * Workaround for pch's PHYs
9140 * XXX should be moved to new PHY driver?
9141 */
9142 static void
9143 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9144 {
9145 if (sc->sc_phytype == WMPHY_82577)
9146 wm_set_mdio_slow_mode_hv(sc);
9147
9148 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9149
9150 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9151
9152 /* 82578 */
9153 if (sc->sc_phytype == WMPHY_82578) {
9154 /* PCH rev. < 3 */
9155 if (sc->sc_rev < 3) {
9156 /* XXX 6 bit shift? Why? Is it page2? */
9157 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9158 0x66c0);
9159 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9160 0xffff);
9161 }
9162
9163 /* XXX phy rev. < 2 */
9164 }
9165
9166 /* Select page 0 */
9167
9168 /* XXX acquire semaphore */
9169 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9170 /* XXX release semaphore */
9171
9172 /*
9173 * Configure the K1 Si workaround during phy reset assuming there is
9174 * link so that it disables K1 if link is in 1Gbps.
9175 */
9176 wm_k1_gig_workaround_hv(sc, 1);
9177 }
9178
9179 static void
9180 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9181 {
9182
9183 wm_set_mdio_slow_mode_hv(sc);
9184 }
9185
9186 static void
9187 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9188 {
9189 int k1_enable = sc->sc_nvm_k1_enabled;
9190
9191 /* XXX acquire semaphore */
9192
9193 if (link) {
9194 k1_enable = 0;
9195
9196 /* Link stall fix for link up */
9197 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9198 } else {
9199 /* Link stall fix for link down */
9200 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9201 }
9202
9203 wm_configure_k1_ich8lan(sc, k1_enable);
9204
9205 /* XXX release semaphore */
9206 }
9207
9208 static void
9209 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9210 {
9211 uint32_t reg;
9212
9213 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9214 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9215 reg | HV_KMRN_MDIO_SLOW);
9216 }
9217
9218 static void
9219 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9220 {
9221 uint32_t ctrl, ctrl_ext, tmp;
9222 uint16_t kmrn_reg;
9223
9224 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9225
9226 if (k1_enable)
9227 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9228 else
9229 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9230
9231 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9232
9233 delay(20);
9234
9235 ctrl = CSR_READ(sc, WMREG_CTRL);
9236 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9237
9238 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9239 tmp |= CTRL_FRCSPD;
9240
9241 CSR_WRITE(sc, WMREG_CTRL, tmp);
9242 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9243 CSR_WRITE_FLUSH(sc);
9244 delay(20);
9245
9246 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9247 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9248 CSR_WRITE_FLUSH(sc);
9249 delay(20);
9250 }
9251
9252 /* special case - for 82575 - need to do manual init ... */
9253 static void
9254 wm_reset_init_script_82575(struct wm_softc *sc)
9255 {
9256 /*
9257 * remark: this is untested code - we have no board without EEPROM
9258 * same setup as mentioned int the freeBSD driver for the i82575
9259 */
9260
9261 /* SerDes configuration via SERDESCTRL */
9262 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9263 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9264 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9265 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9266
9267 /* CCM configuration via CCMCTL register */
9268 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9269 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9270
9271 /* PCIe lanes configuration */
9272 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9273 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9274 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9275 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9276
9277 /* PCIe PLL Configuration */
9278 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9279 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9280 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9281 }
9282