if_wm.c revision 1.311 1 /* $NetBSD: if_wm.c,v 1.311 2015/02/13 09:00:50 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.311 2015/02/13 09:00:50 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rnd.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 /*
158 * Transmit descriptor list size. Due to errata, we can only have
159 * 256 hardware descriptors in the ring on < 82544, but we use 4096
160 * on >= 82544. We tell the upper layers that they can queue a lot
161 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162 * of them at a time.
163 *
164 * We allow up to 256 (!) DMA segments per packet. Pathological packet
165 * chains containing many small mbufs have been observed in zero-copy
166 * situations with jumbo frames.
167 */
168 #define WM_NTXSEGS 256
169 #define WM_IFQUEUELEN 256
170 #define WM_TXQUEUELEN_MAX 64
171 #define WM_TXQUEUELEN_MAX_82547 16
172 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
173 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
174 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
175 #define WM_NTXDESC_82542 256
176 #define WM_NTXDESC_82544 4096
177 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
178 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
179 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182
183 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
184
185 /*
186 * Receive descriptor list size. We have one Rx buffer for normal
187 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
188 * packet. We allocate 256 receive descriptors, each with a 2k
189 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190 */
191 #define WM_NRXDESC 256
192 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
193 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
194 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
195
196 /*
197 * Control structures are DMA'd to the i82542 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make several things
199 * easier.
200 */
201 struct wm_control_data_82544 {
202 /*
203 * The receive descriptors.
204 */
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206
207 /*
208 * The transmit descriptors. Put these at the end, because
209 * we might use a smaller number of them.
210 */
211 union {
212 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
214 } wdc_u;
215 };
216
217 struct wm_control_data_82542 {
218 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221
222 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
223 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
225
226 /*
227 * Software state for transmit jobs.
228 */
229 struct wm_txsoft {
230 struct mbuf *txs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t txs_dmamap; /* our DMA map */
232 int txs_firstdesc; /* first descriptor in packet */
233 int txs_lastdesc; /* last descriptor in packet */
234 int txs_ndesc; /* # of descriptors used */
235 };
236
237 /*
238 * Software state for receive buffers. Each descriptor gets a
239 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
240 * more than one buffer, we chain them together.
241 */
242 struct wm_rxsoft {
243 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
244 bus_dmamap_t rxs_dmamap; /* our DMA map */
245 };
246
247 #define WM_LINKUP_TIMEOUT 50
248
249 static uint16_t swfwphysem[] = {
250 SWFW_PHY0_SM,
251 SWFW_PHY1_SM,
252 SWFW_PHY2_SM,
253 SWFW_PHY3_SM
254 };
255
256 /*
257 * Software state per device.
258 */
259 struct wm_softc {
260 device_t sc_dev; /* generic device information */
261 bus_space_tag_t sc_st; /* bus space tag */
262 bus_space_handle_t sc_sh; /* bus space handle */
263 bus_size_t sc_ss; /* bus space size */
264 bus_space_tag_t sc_iot; /* I/O space tag */
265 bus_space_handle_t sc_ioh; /* I/O space handle */
266 bus_size_t sc_ios; /* I/O space size */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270
271 struct ethercom sc_ethercom; /* ethernet common data */
272 struct mii_data sc_mii; /* MII/media information */
273
274 pci_chipset_tag_t sc_pc;
275 pcitag_t sc_pcitag;
276 int sc_bus_speed; /* PCI/PCIX bus speed */
277 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
278
279 uint16_t sc_pcidevid; /* PCI device ID */
280 wm_chip_type sc_type; /* MAC type */
281 int sc_rev; /* MAC revision */
282 wm_phy_type sc_phytype; /* PHY type */
283 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
284 #define WM_MEDIATYPE_UNKNOWN 0x00
285 #define WM_MEDIATYPE_FIBER 0x01
286 #define WM_MEDIATYPE_COPPER 0x02
287 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
288 int sc_funcid; /* unit number of the chip (0 to 3) */
289 int sc_flags; /* flags; see below */
290 int sc_if_flags; /* last if_flags */
291 int sc_flowflags; /* 802.3x flow control flags */
292 int sc_align_tweak;
293
294 void *sc_ih; /* interrupt cookie */
295 callout_t sc_tick_ch; /* tick callout */
296 bool sc_stopping;
297
298 int sc_nvm_addrbits; /* NVM address bits */
299 unsigned int sc_nvm_wordsize; /* NVM word size */
300 int sc_ich8_flash_base;
301 int sc_ich8_flash_bank_size;
302 int sc_nvm_k1_enabled;
303
304 /* Software state for the transmit and receive descriptors. */
305 int sc_txnum; /* must be a power of two */
306 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
307 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
308
309 /* Control data structures. */
310 int sc_ntxdesc; /* must be a power of two */
311 struct wm_control_data_82544 *sc_control_data;
312 bus_dmamap_t sc_cddmamap; /* control data DMA map */
313 bus_dma_segment_t sc_cd_seg; /* control data segment */
314 int sc_cd_rseg; /* real number of control segment */
315 size_t sc_cd_size; /* control data size */
316 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
317 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
318 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
319 #define sc_rxdescs sc_control_data->wcd_rxdescs
320
321 #ifdef WM_EVENT_COUNTERS
322 /* Event counters. */
323 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
324 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
325 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
326 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
327 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
328 struct evcnt sc_ev_rxintr; /* Rx interrupts */
329 struct evcnt sc_ev_linkintr; /* Link interrupts */
330
331 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
332 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
333 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
334 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
335 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
336 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
337 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
338 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
339
340 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
341 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
342
343 struct evcnt sc_ev_tu; /* Tx underrun */
344
345 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
346 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
347 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
348 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
349 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
350 #endif /* WM_EVENT_COUNTERS */
351
352 bus_addr_t sc_tdt_reg; /* offset of TDT register */
353
354 int sc_txfree; /* number of free Tx descriptors */
355 int sc_txnext; /* next ready Tx descriptor */
356
357 int sc_txsfree; /* number of free Tx jobs */
358 int sc_txsnext; /* next free Tx job */
359 int sc_txsdirty; /* dirty Tx jobs */
360
361 /* These 5 variables are used only on the 82547. */
362 int sc_txfifo_size; /* Tx FIFO size */
363 int sc_txfifo_head; /* current head of FIFO */
364 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
365 int sc_txfifo_stall; /* Tx FIFO is stalled */
366 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
367
368 bus_addr_t sc_rdt_reg; /* offset of RDT register */
369
370 int sc_rxptr; /* next ready Rx descriptor/queue ent */
371 int sc_rxdiscard;
372 int sc_rxlen;
373 struct mbuf *sc_rxhead;
374 struct mbuf *sc_rxtail;
375 struct mbuf **sc_rxtailp;
376
377 uint32_t sc_ctrl; /* prototype CTRL register */
378 #if 0
379 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
380 #endif
381 uint32_t sc_icr; /* prototype interrupt bits */
382 uint32_t sc_itr; /* prototype intr throttling reg */
383 uint32_t sc_tctl; /* prototype TCTL register */
384 uint32_t sc_rctl; /* prototype RCTL register */
385 uint32_t sc_txcw; /* prototype TXCW register */
386 uint32_t sc_tipg; /* prototype TIPG register */
387 uint32_t sc_fcrtl; /* prototype FCRTL register */
388 uint32_t sc_pba; /* prototype PBA register */
389
390 int sc_tbi_linkup; /* TBI link status */
391 int sc_tbi_anegticks; /* autonegotiation ticks */
392 int sc_tbi_ticks; /* tbi ticks */
393
394 int sc_mchash_type; /* multicast filter offset */
395
396 krndsource_t rnd_source; /* random source */
397
398 kmutex_t *sc_tx_lock; /* lock for tx operations */
399 kmutex_t *sc_rx_lock; /* lock for rx operations */
400 };
401
402 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
403 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
404 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
405 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
406 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
407 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
408 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
409 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
410 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
411
412 #ifdef WM_MPSAFE
413 #define CALLOUT_FLAGS CALLOUT_MPSAFE
414 #else
415 #define CALLOUT_FLAGS 0
416 #endif
417
418 #define WM_RXCHAIN_RESET(sc) \
419 do { \
420 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
421 *(sc)->sc_rxtailp = NULL; \
422 (sc)->sc_rxlen = 0; \
423 } while (/*CONSTCOND*/0)
424
425 #define WM_RXCHAIN_LINK(sc, m) \
426 do { \
427 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
428 (sc)->sc_rxtailp = &(m)->m_next; \
429 } while (/*CONSTCOND*/0)
430
431 #ifdef WM_EVENT_COUNTERS
432 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
433 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
434 #else
435 #define WM_EVCNT_INCR(ev) /* nothing */
436 #define WM_EVCNT_ADD(ev, val) /* nothing */
437 #endif
438
439 #define CSR_READ(sc, reg) \
440 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
441 #define CSR_WRITE(sc, reg, val) \
442 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
443 #define CSR_WRITE_FLUSH(sc) \
444 (void) CSR_READ((sc), WMREG_STATUS)
445
446 #define ICH8_FLASH_READ32(sc, reg) \
447 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
448 #define ICH8_FLASH_WRITE32(sc, reg, data) \
449 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
450
451 #define ICH8_FLASH_READ16(sc, reg) \
452 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
453 #define ICH8_FLASH_WRITE16(sc, reg, data) \
454 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
455
456 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
457 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
458
459 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
460 #define WM_CDTXADDR_HI(sc, x) \
461 (sizeof(bus_addr_t) == 8 ? \
462 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
463
464 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
465 #define WM_CDRXADDR_HI(sc, x) \
466 (sizeof(bus_addr_t) == 8 ? \
467 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
468
469 #define WM_CDTXSYNC(sc, x, n, ops) \
470 do { \
471 int __x, __n; \
472 \
473 __x = (x); \
474 __n = (n); \
475 \
476 /* If it will wrap around, sync to the end of the ring. */ \
477 if ((__x + __n) > WM_NTXDESC(sc)) { \
478 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
479 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
480 (WM_NTXDESC(sc) - __x), (ops)); \
481 __n -= (WM_NTXDESC(sc) - __x); \
482 __x = 0; \
483 } \
484 \
485 /* Now sync whatever is left. */ \
486 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
487 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
488 } while (/*CONSTCOND*/0)
489
490 #define WM_CDRXSYNC(sc, x, ops) \
491 do { \
492 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
493 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
494 } while (/*CONSTCOND*/0)
495
496 #define WM_INIT_RXDESC(sc, x) \
497 do { \
498 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
499 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
500 struct mbuf *__m = __rxs->rxs_mbuf; \
501 \
502 /* \
503 * Note: We scoot the packet forward 2 bytes in the buffer \
504 * so that the payload after the Ethernet header is aligned \
505 * to a 4-byte boundary. \
506 * \
507 * XXX BRAINDAMAGE ALERT! \
508 * The stupid chip uses the same size for every buffer, which \
509 * is set in the Receive Control register. We are using the 2K \
510 * size option, but what we REALLY want is (2K - 2)! For this \
511 * reason, we can't "scoot" packets longer than the standard \
512 * Ethernet MTU. On strict-alignment platforms, if the total \
513 * size exceeds (2K - 2) we set align_tweak to 0 and let \
514 * the upper layer copy the headers. \
515 */ \
516 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
517 \
518 wm_set_dma_addr(&__rxd->wrx_addr, \
519 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
520 __rxd->wrx_len = 0; \
521 __rxd->wrx_cksum = 0; \
522 __rxd->wrx_status = 0; \
523 __rxd->wrx_errors = 0; \
524 __rxd->wrx_special = 0; \
525 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
526 \
527 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
528 } while (/*CONSTCOND*/0)
529
530 /*
531 * Register read/write functions.
532 * Other than CSR_{READ|WRITE}().
533 */
534 #if 0
535 static inline uint32_t wm_io_read(struct wm_softc *, int);
536 #endif
537 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
538 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
539 uint32_t, uint32_t);
540 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
541
542 /*
543 * Device driver interface functions and commonly used functions.
544 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
545 */
546 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
547 static int wm_match(device_t, cfdata_t, void *);
548 static void wm_attach(device_t, device_t, void *);
549 static int wm_detach(device_t, int);
550 static bool wm_suspend(device_t, const pmf_qual_t *);
551 static bool wm_resume(device_t, const pmf_qual_t *);
552 static void wm_watchdog(struct ifnet *);
553 static void wm_tick(void *);
554 static int wm_ifflags_cb(struct ethercom *);
555 static int wm_ioctl(struct ifnet *, u_long, void *);
556 /* MAC address related */
557 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
558 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
559 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
560 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
561 static void wm_set_filter(struct wm_softc *);
562 /* Reset and init related */
563 static void wm_set_vlan(struct wm_softc *);
564 static void wm_set_pcie_completion_timeout(struct wm_softc *);
565 static void wm_get_auto_rd_done(struct wm_softc *);
566 static void wm_lan_init_done(struct wm_softc *);
567 static void wm_get_cfg_done(struct wm_softc *);
568 static void wm_reset(struct wm_softc *);
569 static int wm_add_rxbuf(struct wm_softc *, int);
570 static void wm_rxdrain(struct wm_softc *);
571 static int wm_init(struct ifnet *);
572 static int wm_init_locked(struct ifnet *);
573 static void wm_stop(struct ifnet *, int);
574 static void wm_stop_locked(struct ifnet *, int);
575 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
576 uint32_t *, uint8_t *);
577 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
578 static void wm_82547_txfifo_stall(void *);
579 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
580 /* Start */
581 static void wm_start(struct ifnet *);
582 static void wm_start_locked(struct ifnet *);
583 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
584 uint32_t *, uint32_t *, bool *);
585 static void wm_nq_start(struct ifnet *);
586 static void wm_nq_start_locked(struct ifnet *);
587 /* Interrupt */
588 static void wm_txintr(struct wm_softc *);
589 static void wm_rxintr(struct wm_softc *);
590 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
591 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
592 static void wm_linkintr(struct wm_softc *, uint32_t);
593 static int wm_intr(void *);
594
595 /*
596 * Media related.
597 * GMII, SGMII, TBI, SERDES and SFP.
598 */
599 /* GMII related */
600 static void wm_gmii_reset(struct wm_softc *);
601 static int wm_get_phy_id_82575(struct wm_softc *);
602 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
603 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
604 static int wm_gmii_mediachange(struct ifnet *);
605 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
606 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
607 static int wm_gmii_i82543_readreg(device_t, int, int);
608 static void wm_gmii_i82543_writereg(device_t, int, int, int);
609 static int wm_gmii_i82544_readreg(device_t, int, int);
610 static void wm_gmii_i82544_writereg(device_t, int, int, int);
611 static int wm_gmii_i80003_readreg(device_t, int, int);
612 static void wm_gmii_i80003_writereg(device_t, int, int, int);
613 static int wm_gmii_bm_readreg(device_t, int, int);
614 static void wm_gmii_bm_writereg(device_t, int, int, int);
615 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
616 static int wm_gmii_hv_readreg(device_t, int, int);
617 static void wm_gmii_hv_writereg(device_t, int, int, int);
618 static int wm_gmii_82580_readreg(device_t, int, int);
619 static void wm_gmii_82580_writereg(device_t, int, int, int);
620 static void wm_gmii_statchg(struct ifnet *);
621 static int wm_kmrn_readreg(struct wm_softc *, int);
622 static void wm_kmrn_writereg(struct wm_softc *, int, int);
623 /* SGMII */
624 static bool wm_sgmii_uses_mdio(struct wm_softc *);
625 static int wm_sgmii_readreg(device_t, int, int);
626 static void wm_sgmii_writereg(device_t, int, int, int);
627 /* TBI related */
628 static int wm_check_for_link(struct wm_softc *);
629 static void wm_tbi_mediainit(struct wm_softc *);
630 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
631 static int wm_tbi_mediachange(struct ifnet *);
632 static void wm_tbi_set_linkled(struct wm_softc *);
633 static void wm_tbi_check_link(struct wm_softc *);
634 /* SFP related */
635 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
636 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
637
638 /*
639 * NVM related.
640 * Microwire, SPI (w/wo EERD) and Flash.
641 */
642 /* Misc functions */
643 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
644 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
645 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
646 /* Microwire */
647 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
648 /* SPI */
649 static int wm_nvm_ready_spi(struct wm_softc *);
650 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
651 /* Using with EERD */
652 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
653 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
654 /* Flash */
655 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
656 unsigned int *);
657 static int32_t wm_ich8_cycle_init(struct wm_softc *);
658 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
659 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
660 uint16_t *);
661 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
662 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
663 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
664 /* Lock, detecting NVM type, validate checksum and read */
665 static int wm_nvm_acquire(struct wm_softc *);
666 static void wm_nvm_release(struct wm_softc *);
667 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
668 static int wm_nvm_validate_checksum(struct wm_softc *);
669 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
670
671 /*
672 * Hardware semaphores.
673 * Very complexed...
674 */
675 static int wm_get_swsm_semaphore(struct wm_softc *);
676 static void wm_put_swsm_semaphore(struct wm_softc *);
677 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
678 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
679 static int wm_get_swfwhw_semaphore(struct wm_softc *);
680 static void wm_put_swfwhw_semaphore(struct wm_softc *);
681 static int wm_get_hw_semaphore_82573(struct wm_softc *);
682 static void wm_put_hw_semaphore_82573(struct wm_softc *);
683
684 /*
685 * Management mode and power management related subroutines.
686 * BMC, AMT, suspend/resume and EEE.
687 */
688 static int wm_check_mng_mode(struct wm_softc *);
689 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
690 static int wm_check_mng_mode_82574(struct wm_softc *);
691 static int wm_check_mng_mode_generic(struct wm_softc *);
692 static int wm_enable_mng_pass_thru(struct wm_softc *);
693 static int wm_check_reset_block(struct wm_softc *);
694 static void wm_get_hw_control(struct wm_softc *);
695 static void wm_release_hw_control(struct wm_softc *);
696 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
697 static void wm_smbustopci(struct wm_softc *);
698 static void wm_init_manageability(struct wm_softc *);
699 static void wm_release_manageability(struct wm_softc *);
700 static void wm_get_wakeup(struct wm_softc *);
701 #ifdef WM_WOL
702 static void wm_enable_phy_wakeup(struct wm_softc *);
703 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
704 static void wm_enable_wakeup(struct wm_softc *);
705 #endif
706 /* EEE */
707 static void wm_set_eee_i350(struct wm_softc *);
708
709 /*
710 * Workarounds (mainly PHY related).
711 * Basically, PHY's workarounds are in the PHY drivers.
712 */
713 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
714 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
715 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
716 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
717 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
718 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
719 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
720 static void wm_reset_init_script_82575(struct wm_softc *);
721
722 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
723 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
724
725 /*
726 * Devices supported by this driver.
727 */
728 static const struct wm_product {
729 pci_vendor_id_t wmp_vendor;
730 pci_product_id_t wmp_product;
731 const char *wmp_name;
732 wm_chip_type wmp_type;
733 uint32_t wmp_flags;
734 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
735 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
736 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
737 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
738 #define WMP_MEDIATYPE(x) ((x) & 0x03)
739 } wm_products[] = {
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
741 "Intel i82542 1000BASE-X Ethernet",
742 WM_T_82542_2_1, WMP_F_FIBER },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
745 "Intel i82543GC 1000BASE-X Ethernet",
746 WM_T_82543, WMP_F_FIBER },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
749 "Intel i82543GC 1000BASE-T Ethernet",
750 WM_T_82543, WMP_F_COPPER },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
753 "Intel i82544EI 1000BASE-T Ethernet",
754 WM_T_82544, WMP_F_COPPER },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
757 "Intel i82544EI 1000BASE-X Ethernet",
758 WM_T_82544, WMP_F_FIBER },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
761 "Intel i82544GC 1000BASE-T Ethernet",
762 WM_T_82544, WMP_F_COPPER },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
765 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
766 WM_T_82544, WMP_F_COPPER },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
769 "Intel i82540EM 1000BASE-T Ethernet",
770 WM_T_82540, WMP_F_COPPER },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
773 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
774 WM_T_82540, WMP_F_COPPER },
775
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
777 "Intel i82540EP 1000BASE-T Ethernet",
778 WM_T_82540, WMP_F_COPPER },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
781 "Intel i82540EP 1000BASE-T Ethernet",
782 WM_T_82540, WMP_F_COPPER },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
785 "Intel i82540EP 1000BASE-T Ethernet",
786 WM_T_82540, WMP_F_COPPER },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
789 "Intel i82545EM 1000BASE-T Ethernet",
790 WM_T_82545, WMP_F_COPPER },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
793 "Intel i82545GM 1000BASE-T Ethernet",
794 WM_T_82545_3, WMP_F_COPPER },
795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
797 "Intel i82545GM 1000BASE-X Ethernet",
798 WM_T_82545_3, WMP_F_FIBER },
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
801 "Intel i82545GM Gigabit Ethernet (SERDES)",
802 WM_T_82545_3, WMP_F_SERDES },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
805 "Intel i82546EB 1000BASE-T Ethernet",
806 WM_T_82546, WMP_F_COPPER },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
809 "Intel i82546EB 1000BASE-T Ethernet",
810 WM_T_82546, WMP_F_COPPER },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
813 "Intel i82545EM 1000BASE-X Ethernet",
814 WM_T_82545, WMP_F_FIBER },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
817 "Intel i82546EB 1000BASE-X Ethernet",
818 WM_T_82546, WMP_F_FIBER },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
821 "Intel i82546GB 1000BASE-T Ethernet",
822 WM_T_82546_3, WMP_F_COPPER },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
825 "Intel i82546GB 1000BASE-X Ethernet",
826 WM_T_82546_3, WMP_F_FIBER },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
829 "Intel i82546GB Gigabit Ethernet (SERDES)",
830 WM_T_82546_3, WMP_F_SERDES },
831
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
833 "i82546GB quad-port Gigabit Ethernet",
834 WM_T_82546_3, WMP_F_COPPER },
835
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
837 "i82546GB quad-port Gigabit Ethernet (KSP3)",
838 WM_T_82546_3, WMP_F_COPPER },
839
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
841 "Intel PRO/1000MT (82546GB)",
842 WM_T_82546_3, WMP_F_COPPER },
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
845 "Intel i82541EI 1000BASE-T Ethernet",
846 WM_T_82541, WMP_F_COPPER },
847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
849 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
850 WM_T_82541, WMP_F_COPPER },
851
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
853 "Intel i82541EI Mobile 1000BASE-T Ethernet",
854 WM_T_82541, WMP_F_COPPER },
855
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
857 "Intel i82541ER 1000BASE-T Ethernet",
858 WM_T_82541_2, WMP_F_COPPER },
859
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
861 "Intel i82541GI 1000BASE-T Ethernet",
862 WM_T_82541_2, WMP_F_COPPER },
863
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
865 "Intel i82541GI Mobile 1000BASE-T Ethernet",
866 WM_T_82541_2, WMP_F_COPPER },
867
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
869 "Intel i82541PI 1000BASE-T Ethernet",
870 WM_T_82541_2, WMP_F_COPPER },
871
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
873 "Intel i82547EI 1000BASE-T Ethernet",
874 WM_T_82547, WMP_F_COPPER },
875
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
877 "Intel i82547EI Mobile 1000BASE-T Ethernet",
878 WM_T_82547, WMP_F_COPPER },
879
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
881 "Intel i82547GI 1000BASE-T Ethernet",
882 WM_T_82547_2, WMP_F_COPPER },
883
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
885 "Intel PRO/1000 PT (82571EB)",
886 WM_T_82571, WMP_F_COPPER },
887
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
889 "Intel PRO/1000 PF (82571EB)",
890 WM_T_82571, WMP_F_FIBER },
891
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
893 "Intel PRO/1000 PB (82571EB)",
894 WM_T_82571, WMP_F_SERDES },
895
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
897 "Intel PRO/1000 QT (82571EB)",
898 WM_T_82571, WMP_F_COPPER },
899
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
901 "Intel PRO/1000 PT Quad Port Server Adapter",
902 WM_T_82571, WMP_F_COPPER, },
903
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
905 "Intel Gigabit PT Quad Port Server ExpressModule",
906 WM_T_82571, WMP_F_COPPER, },
907
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
909 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
910 WM_T_82571, WMP_F_SERDES, },
911
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
913 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
914 WM_T_82571, WMP_F_SERDES, },
915
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
917 "Intel 82571EB Quad 1000baseX Ethernet",
918 WM_T_82571, WMP_F_FIBER, },
919
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
921 "Intel i82572EI 1000baseT Ethernet",
922 WM_T_82572, WMP_F_COPPER },
923
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
925 "Intel i82572EI 1000baseX Ethernet",
926 WM_T_82572, WMP_F_FIBER },
927
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
929 "Intel i82572EI Gigabit Ethernet (SERDES)",
930 WM_T_82572, WMP_F_SERDES },
931
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
933 "Intel i82572EI 1000baseT Ethernet",
934 WM_T_82572, WMP_F_COPPER },
935
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
937 "Intel i82573E",
938 WM_T_82573, WMP_F_COPPER },
939
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
941 "Intel i82573E IAMT",
942 WM_T_82573, WMP_F_COPPER },
943
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
945 "Intel i82573L Gigabit Ethernet",
946 WM_T_82573, WMP_F_COPPER },
947
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
949 "Intel i82574L",
950 WM_T_82574, WMP_F_COPPER },
951
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
953 "Intel i82574L",
954 WM_T_82574, WMP_F_COPPER },
955
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
957 "Intel i82583V",
958 WM_T_82583, WMP_F_COPPER },
959
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
961 "i80003 dual 1000baseT Ethernet",
962 WM_T_80003, WMP_F_COPPER },
963
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
965 "i80003 dual 1000baseX Ethernet",
966 WM_T_80003, WMP_F_COPPER },
967
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
969 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
970 WM_T_80003, WMP_F_SERDES },
971
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
973 "Intel i80003 1000baseT Ethernet",
974 WM_T_80003, WMP_F_COPPER },
975
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
977 "Intel i80003 Gigabit Ethernet (SERDES)",
978 WM_T_80003, WMP_F_SERDES },
979
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
981 "Intel i82801H (M_AMT) LAN Controller",
982 WM_T_ICH8, WMP_F_COPPER },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
984 "Intel i82801H (AMT) LAN Controller",
985 WM_T_ICH8, WMP_F_COPPER },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
987 "Intel i82801H LAN Controller",
988 WM_T_ICH8, WMP_F_COPPER },
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
990 "Intel i82801H (IFE) LAN Controller",
991 WM_T_ICH8, WMP_F_COPPER },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
993 "Intel i82801H (M) LAN Controller",
994 WM_T_ICH8, WMP_F_COPPER },
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
996 "Intel i82801H IFE (GT) LAN Controller",
997 WM_T_ICH8, WMP_F_COPPER },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
999 "Intel i82801H IFE (G) LAN Controller",
1000 WM_T_ICH8, WMP_F_COPPER },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1002 "82801I (AMT) LAN Controller",
1003 WM_T_ICH9, WMP_F_COPPER },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1005 "82801I LAN Controller",
1006 WM_T_ICH9, WMP_F_COPPER },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1008 "82801I (G) LAN Controller",
1009 WM_T_ICH9, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1011 "82801I (GT) LAN Controller",
1012 WM_T_ICH9, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1014 "82801I (C) LAN Controller",
1015 WM_T_ICH9, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1017 "82801I mobile LAN Controller",
1018 WM_T_ICH9, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1020 "82801I mobile (V) LAN Controller",
1021 WM_T_ICH9, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1023 "82801I mobile (AMT) LAN Controller",
1024 WM_T_ICH9, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1026 "82567LM-4 LAN Controller",
1027 WM_T_ICH9, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1029 "82567V-3 LAN Controller",
1030 WM_T_ICH9, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1032 "82567LM-2 LAN Controller",
1033 WM_T_ICH10, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1035 "82567LF-2 LAN Controller",
1036 WM_T_ICH10, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1038 "82567LM-3 LAN Controller",
1039 WM_T_ICH10, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1041 "82567LF-3 LAN Controller",
1042 WM_T_ICH10, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1044 "82567V-2 LAN Controller",
1045 WM_T_ICH10, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1047 "82567V-3? LAN Controller",
1048 WM_T_ICH10, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1050 "HANKSVILLE LAN Controller",
1051 WM_T_ICH10, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1053 "PCH LAN (82577LM) Controller",
1054 WM_T_PCH, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1056 "PCH LAN (82577LC) Controller",
1057 WM_T_PCH, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1059 "PCH LAN (82578DM) Controller",
1060 WM_T_PCH, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1062 "PCH LAN (82578DC) Controller",
1063 WM_T_PCH, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1065 "PCH2 LAN (82579LM) Controller",
1066 WM_T_PCH2, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1068 "PCH2 LAN (82579V) Controller",
1069 WM_T_PCH2, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1071 "82575EB dual-1000baseT Ethernet",
1072 WM_T_82575, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1074 "82575EB dual-1000baseX Ethernet (SERDES)",
1075 WM_T_82575, WMP_F_SERDES },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1077 "82575GB quad-1000baseT Ethernet",
1078 WM_T_82575, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1080 "82575GB quad-1000baseT Ethernet (PM)",
1081 WM_T_82575, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1083 "82576 1000BaseT Ethernet",
1084 WM_T_82576, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1086 "82576 1000BaseX Ethernet",
1087 WM_T_82576, WMP_F_FIBER },
1088
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1090 "82576 gigabit Ethernet (SERDES)",
1091 WM_T_82576, WMP_F_SERDES },
1092
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1094 "82576 quad-1000BaseT Ethernet",
1095 WM_T_82576, WMP_F_COPPER },
1096
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1098 "82576 Gigabit ET2 Quad Port Server Adapter",
1099 WM_T_82576, WMP_F_COPPER },
1100
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1102 "82576 gigabit Ethernet",
1103 WM_T_82576, WMP_F_COPPER },
1104
1105 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1106 "82576 gigabit Ethernet (SERDES)",
1107 WM_T_82576, WMP_F_SERDES },
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1109 "82576 quad-gigabit Ethernet (SERDES)",
1110 WM_T_82576, WMP_F_SERDES },
1111
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1113 "82580 1000BaseT Ethernet",
1114 WM_T_82580, WMP_F_COPPER },
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1116 "82580 1000BaseX Ethernet",
1117 WM_T_82580, WMP_F_FIBER },
1118
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1120 "82580 1000BaseT Ethernet (SERDES)",
1121 WM_T_82580, WMP_F_SERDES },
1122
1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1124 "82580 gigabit Ethernet (SGMII)",
1125 WM_T_82580, WMP_F_COPPER },
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1127 "82580 dual-1000BaseT Ethernet",
1128 WM_T_82580, WMP_F_COPPER },
1129
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1131 "82580 quad-1000BaseX Ethernet",
1132 WM_T_82580, WMP_F_FIBER },
1133
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1135 "DH89XXCC Gigabit Ethernet (SGMII)",
1136 WM_T_82580, WMP_F_COPPER },
1137
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1139 "DH89XXCC Gigabit Ethernet (SERDES)",
1140 WM_T_82580, WMP_F_SERDES },
1141
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1143 "DH89XXCC 1000BASE-KX Ethernet",
1144 WM_T_82580, WMP_F_SERDES },
1145
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1147 "DH89XXCC Gigabit Ethernet (SFP)",
1148 WM_T_82580, WMP_F_SERDES },
1149
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1151 "I350 Gigabit Network Connection",
1152 WM_T_I350, WMP_F_COPPER },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1155 "I350 Gigabit Fiber Network Connection",
1156 WM_T_I350, WMP_F_FIBER },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1159 "I350 Gigabit Backplane Connection",
1160 WM_T_I350, WMP_F_SERDES },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1163 "I350 Quad Port Gigabit Ethernet",
1164 WM_T_I350, WMP_F_SERDES },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1167 "I350 Gigabit Connection",
1168 WM_T_I350, WMP_F_COPPER },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1171 "I354 Gigabit Ethernet (KX)",
1172 WM_T_I354, WMP_F_SERDES },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1175 "I354 Gigabit Ethernet (SGMII)",
1176 WM_T_I354, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1179 "I354 Gigabit Ethernet (2.5G)",
1180 WM_T_I354, WMP_F_COPPER },
1181
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1183 "I210-T1 Ethernet Server Adapter",
1184 WM_T_I210, WMP_F_COPPER },
1185
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1187 "I210 Ethernet (Copper OEM)",
1188 WM_T_I210, WMP_F_COPPER },
1189
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1191 "I210 Ethernet (Copper IT)",
1192 WM_T_I210, WMP_F_COPPER },
1193
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1195 "I210 Ethernet (FLASH less)",
1196 WM_T_I210, WMP_F_COPPER },
1197
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1199 "I210 Gigabit Ethernet (Fiber)",
1200 WM_T_I210, WMP_F_FIBER },
1201
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1203 "I210 Gigabit Ethernet (SERDES)",
1204 WM_T_I210, WMP_F_SERDES },
1205
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1207 "I210 Gigabit Ethernet (FLASH less)",
1208 WM_T_I210, WMP_F_SERDES },
1209
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1211 "I210 Gigabit Ethernet (SGMII)",
1212 WM_T_I210, WMP_F_COPPER },
1213
1214 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1215 "I211 Ethernet (COPPER)",
1216 WM_T_I211, WMP_F_COPPER },
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1218 "I217 V Ethernet Connection",
1219 WM_T_PCH_LPT, WMP_F_COPPER },
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1221 "I217 LM Ethernet Connection",
1222 WM_T_PCH_LPT, WMP_F_COPPER },
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1224 "I218 V Ethernet Connection",
1225 WM_T_PCH_LPT, WMP_F_COPPER },
1226 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1227 "I218 V Ethernet Connection",
1228 WM_T_PCH_LPT, WMP_F_COPPER },
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1230 "I218 V Ethernet Connection",
1231 WM_T_PCH_LPT, WMP_F_COPPER },
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1233 "I218 LM Ethernet Connection",
1234 WM_T_PCH_LPT, WMP_F_COPPER },
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1236 "I218 LM Ethernet Connection",
1237 WM_T_PCH_LPT, WMP_F_COPPER },
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1239 "I218 LM Ethernet Connection",
1240 WM_T_PCH_LPT, WMP_F_COPPER },
1241 { 0, 0,
1242 NULL,
1243 0, 0 },
1244 };
1245
1246 #ifdef WM_EVENT_COUNTERS
1247 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1248 #endif /* WM_EVENT_COUNTERS */
1249
1250
1251 /*
1252 * Register read/write functions.
1253 * Other than CSR_{READ|WRITE}().
1254 */
1255
1256 #if 0 /* Not currently used */
1257 static inline uint32_t
1258 wm_io_read(struct wm_softc *sc, int reg)
1259 {
1260
1261 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1262 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1263 }
1264 #endif
1265
1266 static inline void
1267 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1268 {
1269
1270 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1271 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1272 }
1273
1274 static inline void
1275 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1276 uint32_t data)
1277 {
1278 uint32_t regval;
1279 int i;
1280
1281 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1282
1283 CSR_WRITE(sc, reg, regval);
1284
1285 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1286 delay(5);
1287 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1288 break;
1289 }
1290 if (i == SCTL_CTL_POLL_TIMEOUT) {
1291 aprint_error("%s: WARNING:"
1292 " i82575 reg 0x%08x setup did not indicate ready\n",
1293 device_xname(sc->sc_dev), reg);
1294 }
1295 }
1296
1297 static inline void
1298 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1299 {
1300 wa->wa_low = htole32(v & 0xffffffffU);
1301 if (sizeof(bus_addr_t) == 8)
1302 wa->wa_high = htole32((uint64_t) v >> 32);
1303 else
1304 wa->wa_high = 0;
1305 }
1306
1307 /*
1308 * Device driver interface functions and commonly used functions.
1309 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1310 */
1311
1312 /* Lookup supported device table */
1313 static const struct wm_product *
1314 wm_lookup(const struct pci_attach_args *pa)
1315 {
1316 const struct wm_product *wmp;
1317
1318 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1319 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1320 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1321 return wmp;
1322 }
1323 return NULL;
1324 }
1325
1326 /* The match function (ca_match) */
1327 static int
1328 wm_match(device_t parent, cfdata_t cf, void *aux)
1329 {
1330 struct pci_attach_args *pa = aux;
1331
1332 if (wm_lookup(pa) != NULL)
1333 return 1;
1334
1335 return 0;
1336 }
1337
1338 /* The attach function (ca_attach) */
1339 static void
1340 wm_attach(device_t parent, device_t self, void *aux)
1341 {
1342 struct wm_softc *sc = device_private(self);
1343 struct pci_attach_args *pa = aux;
1344 prop_dictionary_t dict;
1345 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1346 pci_chipset_tag_t pc = pa->pa_pc;
1347 pci_intr_handle_t ih;
1348 const char *intrstr = NULL;
1349 const char *eetype, *xname;
1350 bus_space_tag_t memt;
1351 bus_space_handle_t memh;
1352 bus_size_t memsize;
1353 int memh_valid;
1354 int i, error;
1355 const struct wm_product *wmp;
1356 prop_data_t ea;
1357 prop_number_t pn;
1358 uint8_t enaddr[ETHER_ADDR_LEN];
1359 uint16_t cfg1, cfg2, swdpin, io3;
1360 pcireg_t preg, memtype;
1361 uint16_t eeprom_data, apme_mask;
1362 bool force_clear_smbi;
1363 uint32_t link_mode;
1364 uint32_t reg;
1365 char intrbuf[PCI_INTRSTR_LEN];
1366
1367 sc->sc_dev = self;
1368 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1369 sc->sc_stopping = false;
1370
1371 wmp = wm_lookup(pa);
1372 #ifdef DIAGNOSTIC
1373 if (wmp == NULL) {
1374 printf("\n");
1375 panic("wm_attach: impossible");
1376 }
1377 #endif
1378 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1379
1380 sc->sc_pc = pa->pa_pc;
1381 sc->sc_pcitag = pa->pa_tag;
1382
1383 if (pci_dma64_available(pa))
1384 sc->sc_dmat = pa->pa_dmat64;
1385 else
1386 sc->sc_dmat = pa->pa_dmat;
1387
1388 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1389 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1390 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1391
1392 sc->sc_type = wmp->wmp_type;
1393 if (sc->sc_type < WM_T_82543) {
1394 if (sc->sc_rev < 2) {
1395 aprint_error_dev(sc->sc_dev,
1396 "i82542 must be at least rev. 2\n");
1397 return;
1398 }
1399 if (sc->sc_rev < 3)
1400 sc->sc_type = WM_T_82542_2_0;
1401 }
1402
1403 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1404 || (sc->sc_type == WM_T_82580)
1405 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1406 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1407 sc->sc_flags |= WM_F_NEWQUEUE;
1408
1409 /* Set device properties (mactype) */
1410 dict = device_properties(sc->sc_dev);
1411 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1412
1413 /*
1414 * Map the device. All devices support memory-mapped acccess,
1415 * and it is really required for normal operation.
1416 */
1417 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1418 switch (memtype) {
1419 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1420 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1421 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1422 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1423 break;
1424 default:
1425 memh_valid = 0;
1426 break;
1427 }
1428
1429 if (memh_valid) {
1430 sc->sc_st = memt;
1431 sc->sc_sh = memh;
1432 sc->sc_ss = memsize;
1433 } else {
1434 aprint_error_dev(sc->sc_dev,
1435 "unable to map device registers\n");
1436 return;
1437 }
1438
1439 /*
1440 * In addition, i82544 and later support I/O mapped indirect
1441 * register access. It is not desirable (nor supported in
1442 * this driver) to use it for normal operation, though it is
1443 * required to work around bugs in some chip versions.
1444 */
1445 if (sc->sc_type >= WM_T_82544) {
1446 /* First we have to find the I/O BAR. */
1447 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1448 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1449 if (memtype == PCI_MAPREG_TYPE_IO)
1450 break;
1451 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1452 PCI_MAPREG_MEM_TYPE_64BIT)
1453 i += 4; /* skip high bits, too */
1454 }
1455 if (i < PCI_MAPREG_END) {
1456 /*
1457 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1458 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1459 * It's no problem because newer chips has no this
1460 * bug.
1461 *
1462 * The i8254x doesn't apparently respond when the
1463 * I/O BAR is 0, which looks somewhat like it's not
1464 * been configured.
1465 */
1466 preg = pci_conf_read(pc, pa->pa_tag, i);
1467 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1468 aprint_error_dev(sc->sc_dev,
1469 "WARNING: I/O BAR at zero.\n");
1470 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1471 0, &sc->sc_iot, &sc->sc_ioh,
1472 NULL, &sc->sc_ios) == 0) {
1473 sc->sc_flags |= WM_F_IOH_VALID;
1474 } else {
1475 aprint_error_dev(sc->sc_dev,
1476 "WARNING: unable to map I/O space\n");
1477 }
1478 }
1479
1480 }
1481
1482 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1483 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1484 preg |= PCI_COMMAND_MASTER_ENABLE;
1485 if (sc->sc_type < WM_T_82542_2_1)
1486 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1487 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1488
1489 /* power up chip */
1490 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1491 NULL)) && error != EOPNOTSUPP) {
1492 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1493 return;
1494 }
1495
1496 /*
1497 * Map and establish our interrupt.
1498 */
1499 if (pci_intr_map(pa, &ih)) {
1500 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1501 return;
1502 }
1503 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1504 #ifdef WM_MPSAFE
1505 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1506 #endif
1507 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1508 if (sc->sc_ih == NULL) {
1509 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1510 if (intrstr != NULL)
1511 aprint_error(" at %s", intrstr);
1512 aprint_error("\n");
1513 return;
1514 }
1515 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1516
1517 /*
1518 * Check the function ID (unit number of the chip).
1519 */
1520 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1521 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1522 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1523 || (sc->sc_type == WM_T_82580)
1524 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1525 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1526 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1527 else
1528 sc->sc_funcid = 0;
1529
1530 /*
1531 * Determine a few things about the bus we're connected to.
1532 */
1533 if (sc->sc_type < WM_T_82543) {
1534 /* We don't really know the bus characteristics here. */
1535 sc->sc_bus_speed = 33;
1536 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1537 /*
1538 * CSA (Communication Streaming Architecture) is about as fast
1539 * a 32-bit 66MHz PCI Bus.
1540 */
1541 sc->sc_flags |= WM_F_CSA;
1542 sc->sc_bus_speed = 66;
1543 aprint_verbose_dev(sc->sc_dev,
1544 "Communication Streaming Architecture\n");
1545 if (sc->sc_type == WM_T_82547) {
1546 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1547 callout_setfunc(&sc->sc_txfifo_ch,
1548 wm_82547_txfifo_stall, sc);
1549 aprint_verbose_dev(sc->sc_dev,
1550 "using 82547 Tx FIFO stall work-around\n");
1551 }
1552 } else if (sc->sc_type >= WM_T_82571) {
1553 sc->sc_flags |= WM_F_PCIE;
1554 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1555 && (sc->sc_type != WM_T_ICH10)
1556 && (sc->sc_type != WM_T_PCH)
1557 && (sc->sc_type != WM_T_PCH2)
1558 && (sc->sc_type != WM_T_PCH_LPT)) {
1559 /* ICH* and PCH* have no PCIe capability registers */
1560 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1561 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1562 NULL) == 0)
1563 aprint_error_dev(sc->sc_dev,
1564 "unable to find PCIe capability\n");
1565 }
1566 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1567 } else {
1568 reg = CSR_READ(sc, WMREG_STATUS);
1569 if (reg & STATUS_BUS64)
1570 sc->sc_flags |= WM_F_BUS64;
1571 if ((reg & STATUS_PCIX_MODE) != 0) {
1572 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1573
1574 sc->sc_flags |= WM_F_PCIX;
1575 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1576 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1577 aprint_error_dev(sc->sc_dev,
1578 "unable to find PCIX capability\n");
1579 else if (sc->sc_type != WM_T_82545_3 &&
1580 sc->sc_type != WM_T_82546_3) {
1581 /*
1582 * Work around a problem caused by the BIOS
1583 * setting the max memory read byte count
1584 * incorrectly.
1585 */
1586 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1587 sc->sc_pcixe_capoff + PCIX_CMD);
1588 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1589 sc->sc_pcixe_capoff + PCIX_STATUS);
1590
1591 bytecnt =
1592 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1593 PCIX_CMD_BYTECNT_SHIFT;
1594 maxb =
1595 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1596 PCIX_STATUS_MAXB_SHIFT;
1597 if (bytecnt > maxb) {
1598 aprint_verbose_dev(sc->sc_dev,
1599 "resetting PCI-X MMRBC: %d -> %d\n",
1600 512 << bytecnt, 512 << maxb);
1601 pcix_cmd = (pcix_cmd &
1602 ~PCIX_CMD_BYTECNT_MASK) |
1603 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1604 pci_conf_write(pa->pa_pc, pa->pa_tag,
1605 sc->sc_pcixe_capoff + PCIX_CMD,
1606 pcix_cmd);
1607 }
1608 }
1609 }
1610 /*
1611 * The quad port adapter is special; it has a PCIX-PCIX
1612 * bridge on the board, and can run the secondary bus at
1613 * a higher speed.
1614 */
1615 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1616 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1617 : 66;
1618 } else if (sc->sc_flags & WM_F_PCIX) {
1619 switch (reg & STATUS_PCIXSPD_MASK) {
1620 case STATUS_PCIXSPD_50_66:
1621 sc->sc_bus_speed = 66;
1622 break;
1623 case STATUS_PCIXSPD_66_100:
1624 sc->sc_bus_speed = 100;
1625 break;
1626 case STATUS_PCIXSPD_100_133:
1627 sc->sc_bus_speed = 133;
1628 break;
1629 default:
1630 aprint_error_dev(sc->sc_dev,
1631 "unknown PCIXSPD %d; assuming 66MHz\n",
1632 reg & STATUS_PCIXSPD_MASK);
1633 sc->sc_bus_speed = 66;
1634 break;
1635 }
1636 } else
1637 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1638 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1639 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1640 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1641 }
1642
1643 /*
1644 * Allocate the control data structures, and create and load the
1645 * DMA map for it.
1646 *
1647 * NOTE: All Tx descriptors must be in the same 4G segment of
1648 * memory. So must Rx descriptors. We simplify by allocating
1649 * both sets within the same 4G segment.
1650 */
1651 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1652 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1653 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1654 sizeof(struct wm_control_data_82542) :
1655 sizeof(struct wm_control_data_82544);
1656 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1657 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1658 &sc->sc_cd_rseg, 0)) != 0) {
1659 aprint_error_dev(sc->sc_dev,
1660 "unable to allocate control data, error = %d\n",
1661 error);
1662 goto fail_0;
1663 }
1664
1665 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1666 sc->sc_cd_rseg, sc->sc_cd_size,
1667 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1668 aprint_error_dev(sc->sc_dev,
1669 "unable to map control data, error = %d\n", error);
1670 goto fail_1;
1671 }
1672
1673 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1674 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1675 aprint_error_dev(sc->sc_dev,
1676 "unable to create control data DMA map, error = %d\n",
1677 error);
1678 goto fail_2;
1679 }
1680
1681 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1682 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1683 aprint_error_dev(sc->sc_dev,
1684 "unable to load control data DMA map, error = %d\n",
1685 error);
1686 goto fail_3;
1687 }
1688
1689 /* Create the transmit buffer DMA maps. */
1690 WM_TXQUEUELEN(sc) =
1691 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1692 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1693 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1694 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1695 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1696 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1697 aprint_error_dev(sc->sc_dev,
1698 "unable to create Tx DMA map %d, error = %d\n",
1699 i, error);
1700 goto fail_4;
1701 }
1702 }
1703
1704 /* Create the receive buffer DMA maps. */
1705 for (i = 0; i < WM_NRXDESC; i++) {
1706 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1707 MCLBYTES, 0, 0,
1708 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1709 aprint_error_dev(sc->sc_dev,
1710 "unable to create Rx DMA map %d error = %d\n",
1711 i, error);
1712 goto fail_5;
1713 }
1714 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1715 }
1716
1717 /* clear interesting stat counters */
1718 CSR_READ(sc, WMREG_COLC);
1719 CSR_READ(sc, WMREG_RXERRC);
1720
1721 /* get PHY control from SMBus to PCIe */
1722 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1723 || (sc->sc_type == WM_T_PCH_LPT))
1724 wm_smbustopci(sc);
1725
1726 /* Reset the chip to a known state. */
1727 wm_reset(sc);
1728
1729 /* Get some information about the EEPROM. */
1730 switch (sc->sc_type) {
1731 case WM_T_82542_2_0:
1732 case WM_T_82542_2_1:
1733 case WM_T_82543:
1734 case WM_T_82544:
1735 /* Microwire */
1736 sc->sc_nvm_wordsize = 64;
1737 sc->sc_nvm_addrbits = 6;
1738 break;
1739 case WM_T_82540:
1740 case WM_T_82545:
1741 case WM_T_82545_3:
1742 case WM_T_82546:
1743 case WM_T_82546_3:
1744 /* Microwire */
1745 reg = CSR_READ(sc, WMREG_EECD);
1746 if (reg & EECD_EE_SIZE) {
1747 sc->sc_nvm_wordsize = 256;
1748 sc->sc_nvm_addrbits = 8;
1749 } else {
1750 sc->sc_nvm_wordsize = 64;
1751 sc->sc_nvm_addrbits = 6;
1752 }
1753 sc->sc_flags |= WM_F_LOCK_EECD;
1754 break;
1755 case WM_T_82541:
1756 case WM_T_82541_2:
1757 case WM_T_82547:
1758 case WM_T_82547_2:
1759 reg = CSR_READ(sc, WMREG_EECD);
1760 if (reg & EECD_EE_TYPE) {
1761 /* SPI */
1762 sc->sc_flags |= WM_F_EEPROM_SPI;
1763 wm_nvm_set_addrbits_size_eecd(sc);
1764 } else {
1765 /* Microwire */
1766 if ((reg & EECD_EE_ABITS) != 0) {
1767 sc->sc_nvm_wordsize = 256;
1768 sc->sc_nvm_addrbits = 8;
1769 } else {
1770 sc->sc_nvm_wordsize = 64;
1771 sc->sc_nvm_addrbits = 6;
1772 }
1773 }
1774 sc->sc_flags |= WM_F_LOCK_EECD;
1775 break;
1776 case WM_T_82571:
1777 case WM_T_82572:
1778 /* SPI */
1779 sc->sc_flags |= WM_F_EEPROM_SPI;
1780 wm_nvm_set_addrbits_size_eecd(sc);
1781 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1782 break;
1783 case WM_T_82573:
1784 sc->sc_flags |= WM_F_LOCK_SWSM;
1785 /* FALLTHROUGH */
1786 case WM_T_82574:
1787 case WM_T_82583:
1788 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1789 sc->sc_flags |= WM_F_EEPROM_FLASH;
1790 sc->sc_nvm_wordsize = 2048;
1791 } else {
1792 /* SPI */
1793 sc->sc_flags |= WM_F_EEPROM_SPI;
1794 wm_nvm_set_addrbits_size_eecd(sc);
1795 }
1796 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1797 break;
1798 case WM_T_82575:
1799 case WM_T_82576:
1800 case WM_T_82580:
1801 case WM_T_I350:
1802 case WM_T_I354:
1803 case WM_T_80003:
1804 /* SPI */
1805 sc->sc_flags |= WM_F_EEPROM_SPI;
1806 wm_nvm_set_addrbits_size_eecd(sc);
1807 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1808 | WM_F_LOCK_SWSM;
1809 break;
1810 case WM_T_ICH8:
1811 case WM_T_ICH9:
1812 case WM_T_ICH10:
1813 case WM_T_PCH:
1814 case WM_T_PCH2:
1815 case WM_T_PCH_LPT:
1816 /* FLASH */
1817 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1818 sc->sc_nvm_wordsize = 2048;
1819 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1820 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1821 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1822 aprint_error_dev(sc->sc_dev,
1823 "can't map FLASH registers\n");
1824 goto fail_5;
1825 }
1826 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1827 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1828 ICH_FLASH_SECTOR_SIZE;
1829 sc->sc_ich8_flash_bank_size =
1830 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1831 sc->sc_ich8_flash_bank_size -=
1832 (reg & ICH_GFPREG_BASE_MASK);
1833 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1834 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1835 break;
1836 case WM_T_I210:
1837 case WM_T_I211:
1838 wm_nvm_set_addrbits_size_eecd(sc);
1839 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1840 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1841 break;
1842 default:
1843 break;
1844 }
1845
1846 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1847 switch (sc->sc_type) {
1848 case WM_T_82571:
1849 case WM_T_82572:
1850 reg = CSR_READ(sc, WMREG_SWSM2);
1851 if ((reg & SWSM2_LOCK) == 0) {
1852 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1853 force_clear_smbi = true;
1854 } else
1855 force_clear_smbi = false;
1856 break;
1857 case WM_T_82573:
1858 case WM_T_82574:
1859 case WM_T_82583:
1860 force_clear_smbi = true;
1861 break;
1862 default:
1863 force_clear_smbi = false;
1864 break;
1865 }
1866 if (force_clear_smbi) {
1867 reg = CSR_READ(sc, WMREG_SWSM);
1868 if ((reg & SWSM_SMBI) != 0)
1869 aprint_error_dev(sc->sc_dev,
1870 "Please update the Bootagent\n");
1871 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1872 }
1873
1874 /*
1875 * Defer printing the EEPROM type until after verifying the checksum
1876 * This allows the EEPROM type to be printed correctly in the case
1877 * that no EEPROM is attached.
1878 */
1879 /*
1880 * Validate the EEPROM checksum. If the checksum fails, flag
1881 * this for later, so we can fail future reads from the EEPROM.
1882 */
1883 if (wm_nvm_validate_checksum(sc)) {
1884 /*
1885 * Read twice again because some PCI-e parts fail the
1886 * first check due to the link being in sleep state.
1887 */
1888 if (wm_nvm_validate_checksum(sc))
1889 sc->sc_flags |= WM_F_EEPROM_INVALID;
1890 }
1891
1892 /* Set device properties (macflags) */
1893 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1894
1895 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1896 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1897 else {
1898 aprint_verbose_dev(sc->sc_dev, "%u words ",
1899 sc->sc_nvm_wordsize);
1900 if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1901 aprint_verbose("FLASH(HW)\n");
1902 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1903 aprint_verbose("FLASH\n");
1904 } else {
1905 if (sc->sc_flags & WM_F_EEPROM_SPI)
1906 eetype = "SPI";
1907 else
1908 eetype = "MicroWire";
1909 aprint_verbose("(%d address bits) %s EEPROM\n",
1910 sc->sc_nvm_addrbits, eetype);
1911 }
1912 }
1913
1914 switch (sc->sc_type) {
1915 case WM_T_82571:
1916 case WM_T_82572:
1917 case WM_T_82573:
1918 case WM_T_82574:
1919 case WM_T_82583:
1920 case WM_T_80003:
1921 case WM_T_ICH8:
1922 case WM_T_ICH9:
1923 case WM_T_ICH10:
1924 case WM_T_PCH:
1925 case WM_T_PCH2:
1926 case WM_T_PCH_LPT:
1927 if (wm_check_mng_mode(sc) != 0)
1928 wm_get_hw_control(sc);
1929 break;
1930 default:
1931 break;
1932 }
1933 wm_get_wakeup(sc);
1934 /*
1935 * Read the Ethernet address from the EEPROM, if not first found
1936 * in device properties.
1937 */
1938 ea = prop_dictionary_get(dict, "mac-address");
1939 if (ea != NULL) {
1940 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1941 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1942 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1943 } else {
1944 if (wm_read_mac_addr(sc, enaddr) != 0) {
1945 aprint_error_dev(sc->sc_dev,
1946 "unable to read Ethernet address\n");
1947 goto fail_5;
1948 }
1949 }
1950
1951 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1952 ether_sprintf(enaddr));
1953
1954 /*
1955 * Read the config info from the EEPROM, and set up various
1956 * bits in the control registers based on their contents.
1957 */
1958 pn = prop_dictionary_get(dict, "i82543-cfg1");
1959 if (pn != NULL) {
1960 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1961 cfg1 = (uint16_t) prop_number_integer_value(pn);
1962 } else {
1963 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1964 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1965 goto fail_5;
1966 }
1967 }
1968
1969 pn = prop_dictionary_get(dict, "i82543-cfg2");
1970 if (pn != NULL) {
1971 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1972 cfg2 = (uint16_t) prop_number_integer_value(pn);
1973 } else {
1974 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1975 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1976 goto fail_5;
1977 }
1978 }
1979
1980 /* check for WM_F_WOL */
1981 switch (sc->sc_type) {
1982 case WM_T_82542_2_0:
1983 case WM_T_82542_2_1:
1984 case WM_T_82543:
1985 /* dummy? */
1986 eeprom_data = 0;
1987 apme_mask = NVM_CFG3_APME;
1988 break;
1989 case WM_T_82544:
1990 apme_mask = NVM_CFG2_82544_APM_EN;
1991 eeprom_data = cfg2;
1992 break;
1993 case WM_T_82546:
1994 case WM_T_82546_3:
1995 case WM_T_82571:
1996 case WM_T_82572:
1997 case WM_T_82573:
1998 case WM_T_82574:
1999 case WM_T_82583:
2000 case WM_T_80003:
2001 default:
2002 apme_mask = NVM_CFG3_APME;
2003 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2004 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2005 break;
2006 case WM_T_82575:
2007 case WM_T_82576:
2008 case WM_T_82580:
2009 case WM_T_I350:
2010 case WM_T_I354: /* XXX ok? */
2011 case WM_T_ICH8:
2012 case WM_T_ICH9:
2013 case WM_T_ICH10:
2014 case WM_T_PCH:
2015 case WM_T_PCH2:
2016 case WM_T_PCH_LPT:
2017 /* XXX The funcid should be checked on some devices */
2018 apme_mask = WUC_APME;
2019 eeprom_data = CSR_READ(sc, WMREG_WUC);
2020 break;
2021 }
2022
2023 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2024 if ((eeprom_data & apme_mask) != 0)
2025 sc->sc_flags |= WM_F_WOL;
2026 #ifdef WM_DEBUG
2027 if ((sc->sc_flags & WM_F_WOL) != 0)
2028 printf("WOL\n");
2029 #endif
2030
2031 /*
2032 * XXX need special handling for some multiple port cards
2033 * to disable a paticular port.
2034 */
2035
2036 if (sc->sc_type >= WM_T_82544) {
2037 pn = prop_dictionary_get(dict, "i82543-swdpin");
2038 if (pn != NULL) {
2039 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2040 swdpin = (uint16_t) prop_number_integer_value(pn);
2041 } else {
2042 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2043 aprint_error_dev(sc->sc_dev,
2044 "unable to read SWDPIN\n");
2045 goto fail_5;
2046 }
2047 }
2048 }
2049
2050 if (cfg1 & NVM_CFG1_ILOS)
2051 sc->sc_ctrl |= CTRL_ILOS;
2052 if (sc->sc_type >= WM_T_82544) {
2053 sc->sc_ctrl |=
2054 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2055 CTRL_SWDPIO_SHIFT;
2056 sc->sc_ctrl |=
2057 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2058 CTRL_SWDPINS_SHIFT;
2059 } else {
2060 sc->sc_ctrl |=
2061 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2062 CTRL_SWDPIO_SHIFT;
2063 }
2064
2065 #if 0
2066 if (sc->sc_type >= WM_T_82544) {
2067 if (cfg1 & NVM_CFG1_IPS0)
2068 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2069 if (cfg1 & NVM_CFG1_IPS1)
2070 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2071 sc->sc_ctrl_ext |=
2072 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2073 CTRL_EXT_SWDPIO_SHIFT;
2074 sc->sc_ctrl_ext |=
2075 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2076 CTRL_EXT_SWDPINS_SHIFT;
2077 } else {
2078 sc->sc_ctrl_ext |=
2079 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2080 CTRL_EXT_SWDPIO_SHIFT;
2081 }
2082 #endif
2083
2084 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2085 #if 0
2086 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2087 #endif
2088
2089 /*
2090 * Set up some register offsets that are different between
2091 * the i82542 and the i82543 and later chips.
2092 */
2093 if (sc->sc_type < WM_T_82543) {
2094 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2095 sc->sc_tdt_reg = WMREG_OLD_TDT;
2096 } else {
2097 sc->sc_rdt_reg = WMREG_RDT;
2098 sc->sc_tdt_reg = WMREG_TDT;
2099 }
2100
2101 if (sc->sc_type == WM_T_PCH) {
2102 uint16_t val;
2103
2104 /* Save the NVM K1 bit setting */
2105 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2106
2107 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2108 sc->sc_nvm_k1_enabled = 1;
2109 else
2110 sc->sc_nvm_k1_enabled = 0;
2111 }
2112
2113 /*
2114 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2115 * media structures accordingly.
2116 */
2117 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2118 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2119 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2120 || sc->sc_type == WM_T_82573
2121 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2122 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2123 wm_gmii_mediainit(sc, wmp->wmp_product);
2124 } else if (sc->sc_type < WM_T_82543 ||
2125 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2126 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2127 aprint_error_dev(sc->sc_dev,
2128 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2129 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2130 }
2131 wm_tbi_mediainit(sc);
2132 } else {
2133 switch (sc->sc_type) {
2134 case WM_T_82575:
2135 case WM_T_82576:
2136 case WM_T_82580:
2137 case WM_T_I350:
2138 case WM_T_I354:
2139 case WM_T_I210:
2140 case WM_T_I211:
2141 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2142 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2143 switch (link_mode) {
2144 case CTRL_EXT_LINK_MODE_1000KX:
2145 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2146 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2147 break;
2148 case CTRL_EXT_LINK_MODE_SGMII:
2149 if (wm_sgmii_uses_mdio(sc)) {
2150 aprint_verbose_dev(sc->sc_dev,
2151 "SGMII(MDIO)\n");
2152 sc->sc_flags |= WM_F_SGMII;
2153 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2154 break;
2155 }
2156 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2157 /*FALLTHROUGH*/
2158 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2159 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2160 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2161 if (link_mode
2162 == CTRL_EXT_LINK_MODE_SGMII) {
2163 sc->sc_mediatype
2164 = WM_MEDIATYPE_COPPER;
2165 sc->sc_flags |= WM_F_SGMII;
2166 } else {
2167 sc->sc_mediatype
2168 = WM_MEDIATYPE_SERDES;
2169 aprint_verbose_dev(sc->sc_dev,
2170 "SERDES\n");
2171 }
2172 break;
2173 }
2174 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2175 aprint_verbose_dev(sc->sc_dev,
2176 "SERDES\n");
2177
2178 /* Change current link mode setting */
2179 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2180 switch (sc->sc_mediatype) {
2181 case WM_MEDIATYPE_COPPER:
2182 reg |= CTRL_EXT_LINK_MODE_SGMII;
2183 break;
2184 case WM_MEDIATYPE_SERDES:
2185 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2186 break;
2187 default:
2188 break;
2189 }
2190 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2191 break;
2192 case CTRL_EXT_LINK_MODE_GMII:
2193 default:
2194 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2195 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2196 break;
2197 }
2198
2199 reg &= ~CTRL_EXT_I2C_ENA;
2200 if ((sc->sc_flags & WM_F_SGMII) != 0)
2201 reg |= CTRL_EXT_I2C_ENA;
2202 else
2203 reg &= ~CTRL_EXT_I2C_ENA;
2204 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2205
2206 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2207 wm_gmii_mediainit(sc, wmp->wmp_product);
2208 else
2209 wm_tbi_mediainit(sc);
2210 break;
2211 default:
2212 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2213 aprint_error_dev(sc->sc_dev,
2214 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2215 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2216 wm_gmii_mediainit(sc, wmp->wmp_product);
2217 }
2218 }
2219
2220 ifp = &sc->sc_ethercom.ec_if;
2221 xname = device_xname(sc->sc_dev);
2222 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2223 ifp->if_softc = sc;
2224 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2225 ifp->if_ioctl = wm_ioctl;
2226 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2227 ifp->if_start = wm_nq_start;
2228 else
2229 ifp->if_start = wm_start;
2230 ifp->if_watchdog = wm_watchdog;
2231 ifp->if_init = wm_init;
2232 ifp->if_stop = wm_stop;
2233 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2234 IFQ_SET_READY(&ifp->if_snd);
2235
2236 /* Check for jumbo frame */
2237 switch (sc->sc_type) {
2238 case WM_T_82573:
2239 /* XXX limited to 9234 if ASPM is disabled */
2240 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2241 if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2242 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2243 break;
2244 case WM_T_82571:
2245 case WM_T_82572:
2246 case WM_T_82574:
2247 case WM_T_82575:
2248 case WM_T_82576:
2249 case WM_T_82580:
2250 case WM_T_I350:
2251 case WM_T_I354: /* XXXX ok? */
2252 case WM_T_I210:
2253 case WM_T_I211:
2254 case WM_T_80003:
2255 case WM_T_ICH9:
2256 case WM_T_ICH10:
2257 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2258 case WM_T_PCH_LPT:
2259 /* XXX limited to 9234 */
2260 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2261 break;
2262 case WM_T_PCH:
2263 /* XXX limited to 4096 */
2264 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2265 break;
2266 case WM_T_82542_2_0:
2267 case WM_T_82542_2_1:
2268 case WM_T_82583:
2269 case WM_T_ICH8:
2270 /* No support for jumbo frame */
2271 break;
2272 default:
2273 /* ETHER_MAX_LEN_JUMBO */
2274 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2275 break;
2276 }
2277
2278 /* If we're a i82543 or greater, we can support VLANs. */
2279 if (sc->sc_type >= WM_T_82543)
2280 sc->sc_ethercom.ec_capabilities |=
2281 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2282
2283 /*
2284 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2285 * on i82543 and later.
2286 */
2287 if (sc->sc_type >= WM_T_82543) {
2288 ifp->if_capabilities |=
2289 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2290 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2291 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2292 IFCAP_CSUM_TCPv6_Tx |
2293 IFCAP_CSUM_UDPv6_Tx;
2294 }
2295
2296 /*
2297 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2298 *
2299 * 82541GI (8086:1076) ... no
2300 * 82572EI (8086:10b9) ... yes
2301 */
2302 if (sc->sc_type >= WM_T_82571) {
2303 ifp->if_capabilities |=
2304 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2305 }
2306
2307 /*
2308 * If we're a i82544 or greater (except i82547), we can do
2309 * TCP segmentation offload.
2310 */
2311 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2312 ifp->if_capabilities |= IFCAP_TSOv4;
2313 }
2314
2315 if (sc->sc_type >= WM_T_82571) {
2316 ifp->if_capabilities |= IFCAP_TSOv6;
2317 }
2318
2319 #ifdef WM_MPSAFE
2320 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2321 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2322 #else
2323 sc->sc_tx_lock = NULL;
2324 sc->sc_rx_lock = NULL;
2325 #endif
2326
2327 /* Attach the interface. */
2328 if_attach(ifp);
2329 ether_ifattach(ifp, enaddr);
2330 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2331 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2332 RND_FLAG_DEFAULT);
2333
2334 #ifdef WM_EVENT_COUNTERS
2335 /* Attach event counters. */
2336 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2337 NULL, xname, "txsstall");
2338 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2339 NULL, xname, "txdstall");
2340 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2341 NULL, xname, "txfifo_stall");
2342 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2343 NULL, xname, "txdw");
2344 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2345 NULL, xname, "txqe");
2346 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2347 NULL, xname, "rxintr");
2348 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2349 NULL, xname, "linkintr");
2350
2351 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2352 NULL, xname, "rxipsum");
2353 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2354 NULL, xname, "rxtusum");
2355 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2356 NULL, xname, "txipsum");
2357 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2358 NULL, xname, "txtusum");
2359 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2360 NULL, xname, "txtusum6");
2361
2362 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2363 NULL, xname, "txtso");
2364 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2365 NULL, xname, "txtso6");
2366 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2367 NULL, xname, "txtsopain");
2368
2369 for (i = 0; i < WM_NTXSEGS; i++) {
2370 snprintf(wm_txseg_evcnt_names[i],
2371 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2372 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2373 NULL, xname, wm_txseg_evcnt_names[i]);
2374 }
2375
2376 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2377 NULL, xname, "txdrop");
2378
2379 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2380 NULL, xname, "tu");
2381
2382 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2383 NULL, xname, "tx_xoff");
2384 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2385 NULL, xname, "tx_xon");
2386 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2387 NULL, xname, "rx_xoff");
2388 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2389 NULL, xname, "rx_xon");
2390 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2391 NULL, xname, "rx_macctl");
2392 #endif /* WM_EVENT_COUNTERS */
2393
2394 if (pmf_device_register(self, wm_suspend, wm_resume))
2395 pmf_class_network_register(self, ifp);
2396 else
2397 aprint_error_dev(self, "couldn't establish power handler\n");
2398
2399 sc->sc_flags |= WM_F_ATTACHED;
2400 return;
2401
2402 /*
2403 * Free any resources we've allocated during the failed attach
2404 * attempt. Do this in reverse order and fall through.
2405 */
2406 fail_5:
2407 for (i = 0; i < WM_NRXDESC; i++) {
2408 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2409 bus_dmamap_destroy(sc->sc_dmat,
2410 sc->sc_rxsoft[i].rxs_dmamap);
2411 }
2412 fail_4:
2413 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2414 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2415 bus_dmamap_destroy(sc->sc_dmat,
2416 sc->sc_txsoft[i].txs_dmamap);
2417 }
2418 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2419 fail_3:
2420 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2421 fail_2:
2422 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2423 sc->sc_cd_size);
2424 fail_1:
2425 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2426 fail_0:
2427 return;
2428 }
2429
2430 /* The detach function (ca_detach) */
2431 static int
2432 wm_detach(device_t self, int flags __unused)
2433 {
2434 struct wm_softc *sc = device_private(self);
2435 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2436 int i;
2437 #ifndef WM_MPSAFE
2438 int s;
2439 #endif
2440
2441 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2442 return 0;
2443
2444 #ifndef WM_MPSAFE
2445 s = splnet();
2446 #endif
2447 /* Stop the interface. Callouts are stopped in it. */
2448 wm_stop(ifp, 1);
2449
2450 #ifndef WM_MPSAFE
2451 splx(s);
2452 #endif
2453
2454 pmf_device_deregister(self);
2455
2456 /* Tell the firmware about the release */
2457 WM_BOTH_LOCK(sc);
2458 wm_release_manageability(sc);
2459 wm_release_hw_control(sc);
2460 WM_BOTH_UNLOCK(sc);
2461
2462 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2463
2464 /* Delete all remaining media. */
2465 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2466
2467 ether_ifdetach(ifp);
2468 if_detach(ifp);
2469
2470
2471 /* Unload RX dmamaps and free mbufs */
2472 WM_RX_LOCK(sc);
2473 wm_rxdrain(sc);
2474 WM_RX_UNLOCK(sc);
2475 /* Must unlock here */
2476
2477 /* Free dmamap. It's the same as the end of the wm_attach() function */
2478 for (i = 0; i < WM_NRXDESC; i++) {
2479 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2480 bus_dmamap_destroy(sc->sc_dmat,
2481 sc->sc_rxsoft[i].rxs_dmamap);
2482 }
2483 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2484 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2485 bus_dmamap_destroy(sc->sc_dmat,
2486 sc->sc_txsoft[i].txs_dmamap);
2487 }
2488 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2489 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2490 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2491 sc->sc_cd_size);
2492 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2493
2494 /* Disestablish the interrupt handler */
2495 if (sc->sc_ih != NULL) {
2496 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2497 sc->sc_ih = NULL;
2498 }
2499
2500 /* Unmap the registers */
2501 if (sc->sc_ss) {
2502 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2503 sc->sc_ss = 0;
2504 }
2505
2506 if (sc->sc_ios) {
2507 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2508 sc->sc_ios = 0;
2509 }
2510
2511 if (sc->sc_tx_lock)
2512 mutex_obj_free(sc->sc_tx_lock);
2513 if (sc->sc_rx_lock)
2514 mutex_obj_free(sc->sc_rx_lock);
2515
2516 return 0;
2517 }
2518
2519 static bool
2520 wm_suspend(device_t self, const pmf_qual_t *qual)
2521 {
2522 struct wm_softc *sc = device_private(self);
2523
2524 wm_release_manageability(sc);
2525 wm_release_hw_control(sc);
2526 #ifdef WM_WOL
2527 wm_enable_wakeup(sc);
2528 #endif
2529
2530 return true;
2531 }
2532
2533 static bool
2534 wm_resume(device_t self, const pmf_qual_t *qual)
2535 {
2536 struct wm_softc *sc = device_private(self);
2537
2538 wm_init_manageability(sc);
2539
2540 return true;
2541 }
2542
2543 /*
2544 * wm_watchdog: [ifnet interface function]
2545 *
2546 * Watchdog timer handler.
2547 */
2548 static void
2549 wm_watchdog(struct ifnet *ifp)
2550 {
2551 struct wm_softc *sc = ifp->if_softc;
2552
2553 /*
2554 * Since we're using delayed interrupts, sweep up
2555 * before we report an error.
2556 */
2557 WM_TX_LOCK(sc);
2558 wm_txintr(sc);
2559 WM_TX_UNLOCK(sc);
2560
2561 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2562 #ifdef WM_DEBUG
2563 int i, j;
2564 struct wm_txsoft *txs;
2565 #endif
2566 log(LOG_ERR,
2567 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2568 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2569 sc->sc_txnext);
2570 ifp->if_oerrors++;
2571 #ifdef WM_DEBUG
2572 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2573 i = WM_NEXTTXS(sc, i)) {
2574 txs = &sc->sc_txsoft[i];
2575 printf("txs %d tx %d -> %d\n",
2576 i, txs->txs_firstdesc, txs->txs_lastdesc);
2577 for (j = txs->txs_firstdesc; ;
2578 j = WM_NEXTTX(sc, j)) {
2579 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2580 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2581 printf("\t %#08x%08x\n",
2582 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2583 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2584 if (j == txs->txs_lastdesc)
2585 break;
2586 }
2587 }
2588 #endif
2589 /* Reset the interface. */
2590 (void) wm_init(ifp);
2591 }
2592
2593 /* Try to get more packets going. */
2594 ifp->if_start(ifp);
2595 }
2596
2597 /*
2598 * wm_tick:
2599 *
2600 * One second timer, used to check link status, sweep up
2601 * completed transmit jobs, etc.
2602 */
2603 static void
2604 wm_tick(void *arg)
2605 {
2606 struct wm_softc *sc = arg;
2607 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2608 #ifndef WM_MPSAFE
2609 int s;
2610
2611 s = splnet();
2612 #endif
2613
2614 WM_TX_LOCK(sc);
2615
2616 if (sc->sc_stopping)
2617 goto out;
2618
2619 if (sc->sc_type >= WM_T_82542_2_1) {
2620 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2621 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2622 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2623 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2624 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2625 }
2626
2627 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2628 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2629 + CSR_READ(sc, WMREG_CRCERRS)
2630 + CSR_READ(sc, WMREG_ALGNERRC)
2631 + CSR_READ(sc, WMREG_SYMERRC)
2632 + CSR_READ(sc, WMREG_RXERRC)
2633 + CSR_READ(sc, WMREG_SEC)
2634 + CSR_READ(sc, WMREG_CEXTERR)
2635 + CSR_READ(sc, WMREG_RLEC);
2636 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2637
2638 if (sc->sc_flags & WM_F_HAS_MII)
2639 mii_tick(&sc->sc_mii);
2640 else
2641 wm_tbi_check_link(sc);
2642
2643 out:
2644 WM_TX_UNLOCK(sc);
2645 #ifndef WM_MPSAFE
2646 splx(s);
2647 #endif
2648
2649 if (!sc->sc_stopping)
2650 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2651 }
2652
2653 static int
2654 wm_ifflags_cb(struct ethercom *ec)
2655 {
2656 struct ifnet *ifp = &ec->ec_if;
2657 struct wm_softc *sc = ifp->if_softc;
2658 int change = ifp->if_flags ^ sc->sc_if_flags;
2659 int rc = 0;
2660
2661 WM_BOTH_LOCK(sc);
2662
2663 if (change != 0)
2664 sc->sc_if_flags = ifp->if_flags;
2665
2666 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2667 rc = ENETRESET;
2668 goto out;
2669 }
2670
2671 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2672 wm_set_filter(sc);
2673
2674 wm_set_vlan(sc);
2675
2676 out:
2677 WM_BOTH_UNLOCK(sc);
2678
2679 return rc;
2680 }
2681
2682 /*
2683 * wm_ioctl: [ifnet interface function]
2684 *
2685 * Handle control requests from the operator.
2686 */
2687 static int
2688 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2689 {
2690 struct wm_softc *sc = ifp->if_softc;
2691 struct ifreq *ifr = (struct ifreq *) data;
2692 struct ifaddr *ifa = (struct ifaddr *)data;
2693 struct sockaddr_dl *sdl;
2694 int s, error;
2695
2696 #ifndef WM_MPSAFE
2697 s = splnet();
2698 #endif
2699 switch (cmd) {
2700 case SIOCSIFMEDIA:
2701 case SIOCGIFMEDIA:
2702 WM_BOTH_LOCK(sc);
2703 /* Flow control requires full-duplex mode. */
2704 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2705 (ifr->ifr_media & IFM_FDX) == 0)
2706 ifr->ifr_media &= ~IFM_ETH_FMASK;
2707 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2708 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2709 /* We can do both TXPAUSE and RXPAUSE. */
2710 ifr->ifr_media |=
2711 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2712 }
2713 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2714 }
2715 WM_BOTH_UNLOCK(sc);
2716 #ifdef WM_MPSAFE
2717 s = splnet();
2718 #endif
2719 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2720 #ifdef WM_MPSAFE
2721 splx(s);
2722 #endif
2723 break;
2724 case SIOCINITIFADDR:
2725 WM_BOTH_LOCK(sc);
2726 if (ifa->ifa_addr->sa_family == AF_LINK) {
2727 sdl = satosdl(ifp->if_dl->ifa_addr);
2728 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2729 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2730 /* unicast address is first multicast entry */
2731 wm_set_filter(sc);
2732 error = 0;
2733 WM_BOTH_UNLOCK(sc);
2734 break;
2735 }
2736 WM_BOTH_UNLOCK(sc);
2737 /*FALLTHROUGH*/
2738 default:
2739 #ifdef WM_MPSAFE
2740 s = splnet();
2741 #endif
2742 /* It may call wm_start, so unlock here */
2743 error = ether_ioctl(ifp, cmd, data);
2744 #ifdef WM_MPSAFE
2745 splx(s);
2746 #endif
2747 if (error != ENETRESET)
2748 break;
2749
2750 error = 0;
2751
2752 if (cmd == SIOCSIFCAP) {
2753 error = (*ifp->if_init)(ifp);
2754 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2755 ;
2756 else if (ifp->if_flags & IFF_RUNNING) {
2757 /*
2758 * Multicast list has changed; set the hardware filter
2759 * accordingly.
2760 */
2761 WM_BOTH_LOCK(sc);
2762 wm_set_filter(sc);
2763 WM_BOTH_UNLOCK(sc);
2764 }
2765 break;
2766 }
2767
2768 /* Try to get more packets going. */
2769 ifp->if_start(ifp);
2770
2771 #ifndef WM_MPSAFE
2772 splx(s);
2773 #endif
2774 return error;
2775 }
2776
2777 /* MAC address related */
2778
2779 /*
2780 * Get the offset of MAC address and return it.
2781 * If error occured, use offset 0.
2782 */
2783 static uint16_t
2784 wm_check_alt_mac_addr(struct wm_softc *sc)
2785 {
2786 uint16_t myea[ETHER_ADDR_LEN / 2];
2787 uint16_t offset = NVM_OFF_MACADDR;
2788
2789 /* Try to read alternative MAC address pointer */
2790 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2791 return 0;
2792
2793 /* Check pointer if it's valid or not. */
2794 if ((offset == 0x0000) || (offset == 0xffff))
2795 return 0;
2796
2797 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2798 /*
2799 * Check whether alternative MAC address is valid or not.
2800 * Some cards have non 0xffff pointer but those don't use
2801 * alternative MAC address in reality.
2802 *
2803 * Check whether the broadcast bit is set or not.
2804 */
2805 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2806 if (((myea[0] & 0xff) & 0x01) == 0)
2807 return offset; /* Found */
2808
2809 /* Not found */
2810 return 0;
2811 }
2812
2813 static int
2814 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2815 {
2816 uint16_t myea[ETHER_ADDR_LEN / 2];
2817 uint16_t offset = NVM_OFF_MACADDR;
2818 int do_invert = 0;
2819
2820 switch (sc->sc_type) {
2821 case WM_T_82580:
2822 case WM_T_I350:
2823 case WM_T_I354:
2824 /* EEPROM Top Level Partitioning */
2825 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2826 break;
2827 case WM_T_82571:
2828 case WM_T_82575:
2829 case WM_T_82576:
2830 case WM_T_80003:
2831 case WM_T_I210:
2832 case WM_T_I211:
2833 offset = wm_check_alt_mac_addr(sc);
2834 if (offset == 0)
2835 if ((sc->sc_funcid & 0x01) == 1)
2836 do_invert = 1;
2837 break;
2838 default:
2839 if ((sc->sc_funcid & 0x01) == 1)
2840 do_invert = 1;
2841 break;
2842 }
2843
2844 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2845 myea) != 0)
2846 goto bad;
2847
2848 enaddr[0] = myea[0] & 0xff;
2849 enaddr[1] = myea[0] >> 8;
2850 enaddr[2] = myea[1] & 0xff;
2851 enaddr[3] = myea[1] >> 8;
2852 enaddr[4] = myea[2] & 0xff;
2853 enaddr[5] = myea[2] >> 8;
2854
2855 /*
2856 * Toggle the LSB of the MAC address on the second port
2857 * of some dual port cards.
2858 */
2859 if (do_invert != 0)
2860 enaddr[5] ^= 1;
2861
2862 return 0;
2863
2864 bad:
2865 return -1;
2866 }
2867
2868 /*
2869 * wm_set_ral:
2870 *
2871 * Set an entery in the receive address list.
2872 */
2873 static void
2874 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2875 {
2876 uint32_t ral_lo, ral_hi;
2877
2878 if (enaddr != NULL) {
2879 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2880 (enaddr[3] << 24);
2881 ral_hi = enaddr[4] | (enaddr[5] << 8);
2882 ral_hi |= RAL_AV;
2883 } else {
2884 ral_lo = 0;
2885 ral_hi = 0;
2886 }
2887
2888 if (sc->sc_type >= WM_T_82544) {
2889 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2890 ral_lo);
2891 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2892 ral_hi);
2893 } else {
2894 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2895 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2896 }
2897 }
2898
2899 /*
2900 * wm_mchash:
2901 *
2902 * Compute the hash of the multicast address for the 4096-bit
2903 * multicast filter.
2904 */
2905 static uint32_t
2906 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2907 {
2908 static const int lo_shift[4] = { 4, 3, 2, 0 };
2909 static const int hi_shift[4] = { 4, 5, 6, 8 };
2910 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2911 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2912 uint32_t hash;
2913
2914 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2915 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2916 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2917 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2918 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2919 return (hash & 0x3ff);
2920 }
2921 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2922 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2923
2924 return (hash & 0xfff);
2925 }
2926
2927 /*
2928 * wm_set_filter:
2929 *
2930 * Set up the receive filter.
2931 */
2932 static void
2933 wm_set_filter(struct wm_softc *sc)
2934 {
2935 struct ethercom *ec = &sc->sc_ethercom;
2936 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2937 struct ether_multi *enm;
2938 struct ether_multistep step;
2939 bus_addr_t mta_reg;
2940 uint32_t hash, reg, bit;
2941 int i, size;
2942
2943 if (sc->sc_type >= WM_T_82544)
2944 mta_reg = WMREG_CORDOVA_MTA;
2945 else
2946 mta_reg = WMREG_MTA;
2947
2948 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2949
2950 if (ifp->if_flags & IFF_BROADCAST)
2951 sc->sc_rctl |= RCTL_BAM;
2952 if (ifp->if_flags & IFF_PROMISC) {
2953 sc->sc_rctl |= RCTL_UPE;
2954 goto allmulti;
2955 }
2956
2957 /*
2958 * Set the station address in the first RAL slot, and
2959 * clear the remaining slots.
2960 */
2961 if (sc->sc_type == WM_T_ICH8)
2962 size = WM_RAL_TABSIZE_ICH8 -1;
2963 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2964 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2965 || (sc->sc_type == WM_T_PCH_LPT))
2966 size = WM_RAL_TABSIZE_ICH8;
2967 else if (sc->sc_type == WM_T_82575)
2968 size = WM_RAL_TABSIZE_82575;
2969 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2970 size = WM_RAL_TABSIZE_82576;
2971 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2972 size = WM_RAL_TABSIZE_I350;
2973 else
2974 size = WM_RAL_TABSIZE;
2975 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2976 for (i = 1; i < size; i++)
2977 wm_set_ral(sc, NULL, i);
2978
2979 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2980 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2981 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2982 size = WM_ICH8_MC_TABSIZE;
2983 else
2984 size = WM_MC_TABSIZE;
2985 /* Clear out the multicast table. */
2986 for (i = 0; i < size; i++)
2987 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2988
2989 ETHER_FIRST_MULTI(step, ec, enm);
2990 while (enm != NULL) {
2991 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2992 /*
2993 * We must listen to a range of multicast addresses.
2994 * For now, just accept all multicasts, rather than
2995 * trying to set only those filter bits needed to match
2996 * the range. (At this time, the only use of address
2997 * ranges is for IP multicast routing, for which the
2998 * range is big enough to require all bits set.)
2999 */
3000 goto allmulti;
3001 }
3002
3003 hash = wm_mchash(sc, enm->enm_addrlo);
3004
3005 reg = (hash >> 5);
3006 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3007 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3008 || (sc->sc_type == WM_T_PCH2)
3009 || (sc->sc_type == WM_T_PCH_LPT))
3010 reg &= 0x1f;
3011 else
3012 reg &= 0x7f;
3013 bit = hash & 0x1f;
3014
3015 hash = CSR_READ(sc, mta_reg + (reg << 2));
3016 hash |= 1U << bit;
3017
3018 /* XXX Hardware bug?? */
3019 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3020 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3021 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3022 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3023 } else
3024 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3025
3026 ETHER_NEXT_MULTI(step, enm);
3027 }
3028
3029 ifp->if_flags &= ~IFF_ALLMULTI;
3030 goto setit;
3031
3032 allmulti:
3033 ifp->if_flags |= IFF_ALLMULTI;
3034 sc->sc_rctl |= RCTL_MPE;
3035
3036 setit:
3037 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3038 }
3039
3040 /* Reset and init related */
3041
3042 static void
3043 wm_set_vlan(struct wm_softc *sc)
3044 {
3045 /* Deal with VLAN enables. */
3046 if (VLAN_ATTACHED(&sc->sc_ethercom))
3047 sc->sc_ctrl |= CTRL_VME;
3048 else
3049 sc->sc_ctrl &= ~CTRL_VME;
3050
3051 /* Write the control registers. */
3052 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3053 }
3054
3055 static void
3056 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3057 {
3058 uint32_t gcr;
3059 pcireg_t ctrl2;
3060
3061 gcr = CSR_READ(sc, WMREG_GCR);
3062
3063 /* Only take action if timeout value is defaulted to 0 */
3064 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3065 goto out;
3066
3067 if ((gcr & GCR_CAP_VER2) == 0) {
3068 gcr |= GCR_CMPL_TMOUT_10MS;
3069 goto out;
3070 }
3071
3072 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3073 sc->sc_pcixe_capoff + PCIE_DCSR2);
3074 ctrl2 |= WM_PCIE_DCSR2_16MS;
3075 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3076 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3077
3078 out:
3079 /* Disable completion timeout resend */
3080 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3081
3082 CSR_WRITE(sc, WMREG_GCR, gcr);
3083 }
3084
3085 void
3086 wm_get_auto_rd_done(struct wm_softc *sc)
3087 {
3088 int i;
3089
3090 /* wait for eeprom to reload */
3091 switch (sc->sc_type) {
3092 case WM_T_82571:
3093 case WM_T_82572:
3094 case WM_T_82573:
3095 case WM_T_82574:
3096 case WM_T_82583:
3097 case WM_T_82575:
3098 case WM_T_82576:
3099 case WM_T_82580:
3100 case WM_T_I350:
3101 case WM_T_I354:
3102 case WM_T_I210:
3103 case WM_T_I211:
3104 case WM_T_80003:
3105 case WM_T_ICH8:
3106 case WM_T_ICH9:
3107 for (i = 0; i < 10; i++) {
3108 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3109 break;
3110 delay(1000);
3111 }
3112 if (i == 10) {
3113 log(LOG_ERR, "%s: auto read from eeprom failed to "
3114 "complete\n", device_xname(sc->sc_dev));
3115 }
3116 break;
3117 default:
3118 break;
3119 }
3120 }
3121
3122 void
3123 wm_lan_init_done(struct wm_softc *sc)
3124 {
3125 uint32_t reg = 0;
3126 int i;
3127
3128 /* wait for eeprom to reload */
3129 switch (sc->sc_type) {
3130 case WM_T_ICH10:
3131 case WM_T_PCH:
3132 case WM_T_PCH2:
3133 case WM_T_PCH_LPT:
3134 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3135 reg = CSR_READ(sc, WMREG_STATUS);
3136 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3137 break;
3138 delay(100);
3139 }
3140 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3141 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3142 "complete\n", device_xname(sc->sc_dev), __func__);
3143 }
3144 break;
3145 default:
3146 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3147 __func__);
3148 break;
3149 }
3150
3151 reg &= ~STATUS_LAN_INIT_DONE;
3152 CSR_WRITE(sc, WMREG_STATUS, reg);
3153 }
3154
3155 void
3156 wm_get_cfg_done(struct wm_softc *sc)
3157 {
3158 int mask;
3159 uint32_t reg;
3160 int i;
3161
3162 /* wait for eeprom to reload */
3163 switch (sc->sc_type) {
3164 case WM_T_82542_2_0:
3165 case WM_T_82542_2_1:
3166 /* null */
3167 break;
3168 case WM_T_82543:
3169 case WM_T_82544:
3170 case WM_T_82540:
3171 case WM_T_82545:
3172 case WM_T_82545_3:
3173 case WM_T_82546:
3174 case WM_T_82546_3:
3175 case WM_T_82541:
3176 case WM_T_82541_2:
3177 case WM_T_82547:
3178 case WM_T_82547_2:
3179 case WM_T_82573:
3180 case WM_T_82574:
3181 case WM_T_82583:
3182 /* generic */
3183 delay(10*1000);
3184 break;
3185 case WM_T_80003:
3186 case WM_T_82571:
3187 case WM_T_82572:
3188 case WM_T_82575:
3189 case WM_T_82576:
3190 case WM_T_82580:
3191 case WM_T_I350:
3192 case WM_T_I354:
3193 case WM_T_I210:
3194 case WM_T_I211:
3195 if (sc->sc_type == WM_T_82571) {
3196 /* Only 82571 shares port 0 */
3197 mask = EEMNGCTL_CFGDONE_0;
3198 } else
3199 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3200 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3201 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3202 break;
3203 delay(1000);
3204 }
3205 if (i >= WM_PHY_CFG_TIMEOUT) {
3206 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3207 device_xname(sc->sc_dev), __func__));
3208 }
3209 break;
3210 case WM_T_ICH8:
3211 case WM_T_ICH9:
3212 case WM_T_ICH10:
3213 case WM_T_PCH:
3214 case WM_T_PCH2:
3215 case WM_T_PCH_LPT:
3216 delay(10*1000);
3217 if (sc->sc_type >= WM_T_ICH10)
3218 wm_lan_init_done(sc);
3219 else
3220 wm_get_auto_rd_done(sc);
3221
3222 reg = CSR_READ(sc, WMREG_STATUS);
3223 if ((reg & STATUS_PHYRA) != 0)
3224 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3225 break;
3226 default:
3227 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3228 __func__);
3229 break;
3230 }
3231 }
3232
3233 /*
3234 * wm_reset:
3235 *
3236 * Reset the i82542 chip.
3237 */
3238 static void
3239 wm_reset(struct wm_softc *sc)
3240 {
3241 int phy_reset = 0;
3242 int error = 0;
3243 uint32_t reg, mask;
3244
3245 /*
3246 * Allocate on-chip memory according to the MTU size.
3247 * The Packet Buffer Allocation register must be written
3248 * before the chip is reset.
3249 */
3250 switch (sc->sc_type) {
3251 case WM_T_82547:
3252 case WM_T_82547_2:
3253 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3254 PBA_22K : PBA_30K;
3255 sc->sc_txfifo_head = 0;
3256 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3257 sc->sc_txfifo_size =
3258 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3259 sc->sc_txfifo_stall = 0;
3260 break;
3261 case WM_T_82571:
3262 case WM_T_82572:
3263 case WM_T_82575: /* XXX need special handing for jumbo frames */
3264 case WM_T_I350:
3265 case WM_T_I354:
3266 case WM_T_80003:
3267 sc->sc_pba = PBA_32K;
3268 break;
3269 case WM_T_82580:
3270 sc->sc_pba = PBA_35K;
3271 break;
3272 case WM_T_I210:
3273 case WM_T_I211:
3274 sc->sc_pba = PBA_34K;
3275 break;
3276 case WM_T_82576:
3277 sc->sc_pba = PBA_64K;
3278 break;
3279 case WM_T_82573:
3280 sc->sc_pba = PBA_12K;
3281 break;
3282 case WM_T_82574:
3283 case WM_T_82583:
3284 sc->sc_pba = PBA_20K;
3285 break;
3286 case WM_T_ICH8:
3287 sc->sc_pba = PBA_8K;
3288 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3289 break;
3290 case WM_T_ICH9:
3291 case WM_T_ICH10:
3292 sc->sc_pba = PBA_10K;
3293 break;
3294 case WM_T_PCH:
3295 case WM_T_PCH2:
3296 case WM_T_PCH_LPT:
3297 sc->sc_pba = PBA_26K;
3298 break;
3299 default:
3300 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3301 PBA_40K : PBA_48K;
3302 break;
3303 }
3304 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3305
3306 /* Prevent the PCI-E bus from sticking */
3307 if (sc->sc_flags & WM_F_PCIE) {
3308 int timeout = 800;
3309
3310 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3311 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3312
3313 while (timeout--) {
3314 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3315 == 0)
3316 break;
3317 delay(100);
3318 }
3319 }
3320
3321 /* Set the completion timeout for interface */
3322 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3323 || (sc->sc_type == WM_T_82580)
3324 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3325 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3326 wm_set_pcie_completion_timeout(sc);
3327
3328 /* Clear interrupt */
3329 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3330
3331 /* Stop the transmit and receive processes. */
3332 CSR_WRITE(sc, WMREG_RCTL, 0);
3333 sc->sc_rctl &= ~RCTL_EN;
3334 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3335 CSR_WRITE_FLUSH(sc);
3336
3337 /* XXX set_tbi_sbp_82543() */
3338
3339 delay(10*1000);
3340
3341 /* Must acquire the MDIO ownership before MAC reset */
3342 switch (sc->sc_type) {
3343 case WM_T_82573:
3344 case WM_T_82574:
3345 case WM_T_82583:
3346 error = wm_get_hw_semaphore_82573(sc);
3347 break;
3348 default:
3349 break;
3350 }
3351
3352 /*
3353 * 82541 Errata 29? & 82547 Errata 28?
3354 * See also the description about PHY_RST bit in CTRL register
3355 * in 8254x_GBe_SDM.pdf.
3356 */
3357 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3358 CSR_WRITE(sc, WMREG_CTRL,
3359 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3360 CSR_WRITE_FLUSH(sc);
3361 delay(5000);
3362 }
3363
3364 switch (sc->sc_type) {
3365 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3366 case WM_T_82541:
3367 case WM_T_82541_2:
3368 case WM_T_82547:
3369 case WM_T_82547_2:
3370 /*
3371 * On some chipsets, a reset through a memory-mapped write
3372 * cycle can cause the chip to reset before completing the
3373 * write cycle. This causes major headache that can be
3374 * avoided by issuing the reset via indirect register writes
3375 * through I/O space.
3376 *
3377 * So, if we successfully mapped the I/O BAR at attach time,
3378 * use that. Otherwise, try our luck with a memory-mapped
3379 * reset.
3380 */
3381 if (sc->sc_flags & WM_F_IOH_VALID)
3382 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3383 else
3384 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3385 break;
3386 case WM_T_82545_3:
3387 case WM_T_82546_3:
3388 /* Use the shadow control register on these chips. */
3389 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3390 break;
3391 case WM_T_80003:
3392 mask = swfwphysem[sc->sc_funcid];
3393 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3394 wm_get_swfw_semaphore(sc, mask);
3395 CSR_WRITE(sc, WMREG_CTRL, reg);
3396 wm_put_swfw_semaphore(sc, mask);
3397 break;
3398 case WM_T_ICH8:
3399 case WM_T_ICH9:
3400 case WM_T_ICH10:
3401 case WM_T_PCH:
3402 case WM_T_PCH2:
3403 case WM_T_PCH_LPT:
3404 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3405 if (wm_check_reset_block(sc) == 0) {
3406 /*
3407 * Gate automatic PHY configuration by hardware on
3408 * non-managed 82579
3409 */
3410 if ((sc->sc_type == WM_T_PCH2)
3411 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3412 != 0))
3413 wm_gate_hw_phy_config_ich8lan(sc, 1);
3414
3415
3416 reg |= CTRL_PHY_RESET;
3417 phy_reset = 1;
3418 }
3419 wm_get_swfwhw_semaphore(sc);
3420 CSR_WRITE(sc, WMREG_CTRL, reg);
3421 /* Don't insert a completion barrier when reset */
3422 delay(20*1000);
3423 wm_put_swfwhw_semaphore(sc);
3424 break;
3425 case WM_T_82580:
3426 case WM_T_I350:
3427 case WM_T_I354:
3428 case WM_T_I210:
3429 case WM_T_I211:
3430 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3431 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3432 CSR_WRITE_FLUSH(sc);
3433 delay(5000);
3434 break;
3435 case WM_T_82542_2_0:
3436 case WM_T_82542_2_1:
3437 case WM_T_82543:
3438 case WM_T_82540:
3439 case WM_T_82545:
3440 case WM_T_82546:
3441 case WM_T_82571:
3442 case WM_T_82572:
3443 case WM_T_82573:
3444 case WM_T_82574:
3445 case WM_T_82575:
3446 case WM_T_82576:
3447 case WM_T_82583:
3448 default:
3449 /* Everything else can safely use the documented method. */
3450 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3451 break;
3452 }
3453
3454 /* Must release the MDIO ownership after MAC reset */
3455 switch (sc->sc_type) {
3456 case WM_T_82573:
3457 case WM_T_82574:
3458 case WM_T_82583:
3459 if (error == 0)
3460 wm_put_hw_semaphore_82573(sc);
3461 break;
3462 default:
3463 break;
3464 }
3465
3466 if (phy_reset != 0)
3467 wm_get_cfg_done(sc);
3468
3469 /* reload EEPROM */
3470 switch (sc->sc_type) {
3471 case WM_T_82542_2_0:
3472 case WM_T_82542_2_1:
3473 case WM_T_82543:
3474 case WM_T_82544:
3475 delay(10);
3476 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3477 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3478 CSR_WRITE_FLUSH(sc);
3479 delay(2000);
3480 break;
3481 case WM_T_82540:
3482 case WM_T_82545:
3483 case WM_T_82545_3:
3484 case WM_T_82546:
3485 case WM_T_82546_3:
3486 delay(5*1000);
3487 /* XXX Disable HW ARPs on ASF enabled adapters */
3488 break;
3489 case WM_T_82541:
3490 case WM_T_82541_2:
3491 case WM_T_82547:
3492 case WM_T_82547_2:
3493 delay(20000);
3494 /* XXX Disable HW ARPs on ASF enabled adapters */
3495 break;
3496 case WM_T_82571:
3497 case WM_T_82572:
3498 case WM_T_82573:
3499 case WM_T_82574:
3500 case WM_T_82583:
3501 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3502 delay(10);
3503 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3504 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3505 CSR_WRITE_FLUSH(sc);
3506 }
3507 /* check EECD_EE_AUTORD */
3508 wm_get_auto_rd_done(sc);
3509 /*
3510 * Phy configuration from NVM just starts after EECD_AUTO_RD
3511 * is set.
3512 */
3513 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3514 || (sc->sc_type == WM_T_82583))
3515 delay(25*1000);
3516 break;
3517 case WM_T_82575:
3518 case WM_T_82576:
3519 case WM_T_82580:
3520 case WM_T_I350:
3521 case WM_T_I354:
3522 case WM_T_I210:
3523 case WM_T_I211:
3524 case WM_T_80003:
3525 /* check EECD_EE_AUTORD */
3526 wm_get_auto_rd_done(sc);
3527 break;
3528 case WM_T_ICH8:
3529 case WM_T_ICH9:
3530 case WM_T_ICH10:
3531 case WM_T_PCH:
3532 case WM_T_PCH2:
3533 case WM_T_PCH_LPT:
3534 break;
3535 default:
3536 panic("%s: unknown type\n", __func__);
3537 }
3538
3539 /* Check whether EEPROM is present or not */
3540 switch (sc->sc_type) {
3541 case WM_T_82575:
3542 case WM_T_82576:
3543 #if 0 /* XXX */
3544 case WM_T_82580:
3545 #endif
3546 case WM_T_I350:
3547 case WM_T_I354:
3548 case WM_T_ICH8:
3549 case WM_T_ICH9:
3550 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3551 /* Not found */
3552 sc->sc_flags |= WM_F_EEPROM_INVALID;
3553 if ((sc->sc_type == WM_T_82575)
3554 || (sc->sc_type == WM_T_82576)
3555 || (sc->sc_type == WM_T_82580)
3556 || (sc->sc_type == WM_T_I350)
3557 || (sc->sc_type == WM_T_I354))
3558 wm_reset_init_script_82575(sc);
3559 }
3560 break;
3561 default:
3562 break;
3563 }
3564
3565 if ((sc->sc_type == WM_T_82580)
3566 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3567 /* clear global device reset status bit */
3568 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3569 }
3570
3571 /* Clear any pending interrupt events. */
3572 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3573 reg = CSR_READ(sc, WMREG_ICR);
3574
3575 /* reload sc_ctrl */
3576 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3577
3578 if (sc->sc_type == WM_T_I350)
3579 wm_set_eee_i350(sc);
3580
3581 /* dummy read from WUC */
3582 if (sc->sc_type == WM_T_PCH)
3583 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3584 /*
3585 * For PCH, this write will make sure that any noise will be detected
3586 * as a CRC error and be dropped rather than show up as a bad packet
3587 * to the DMA engine
3588 */
3589 if (sc->sc_type == WM_T_PCH)
3590 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3591
3592 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3593 CSR_WRITE(sc, WMREG_WUC, 0);
3594
3595 /* XXX need special handling for 82580 */
3596 }
3597
3598 /*
3599 * wm_add_rxbuf:
3600 *
3601 * Add a receive buffer to the indiciated descriptor.
3602 */
3603 static int
3604 wm_add_rxbuf(struct wm_softc *sc, int idx)
3605 {
3606 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3607 struct mbuf *m;
3608 int error;
3609
3610 KASSERT(WM_RX_LOCKED(sc));
3611
3612 MGETHDR(m, M_DONTWAIT, MT_DATA);
3613 if (m == NULL)
3614 return ENOBUFS;
3615
3616 MCLGET(m, M_DONTWAIT);
3617 if ((m->m_flags & M_EXT) == 0) {
3618 m_freem(m);
3619 return ENOBUFS;
3620 }
3621
3622 if (rxs->rxs_mbuf != NULL)
3623 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3624
3625 rxs->rxs_mbuf = m;
3626
3627 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3628 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3629 BUS_DMA_READ|BUS_DMA_NOWAIT);
3630 if (error) {
3631 /* XXX XXX XXX */
3632 aprint_error_dev(sc->sc_dev,
3633 "unable to load rx DMA map %d, error = %d\n",
3634 idx, error);
3635 panic("wm_add_rxbuf");
3636 }
3637
3638 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3639 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3640
3641 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3642 if ((sc->sc_rctl & RCTL_EN) != 0)
3643 WM_INIT_RXDESC(sc, idx);
3644 } else
3645 WM_INIT_RXDESC(sc, idx);
3646
3647 return 0;
3648 }
3649
3650 /*
3651 * wm_rxdrain:
3652 *
3653 * Drain the receive queue.
3654 */
3655 static void
3656 wm_rxdrain(struct wm_softc *sc)
3657 {
3658 struct wm_rxsoft *rxs;
3659 int i;
3660
3661 KASSERT(WM_RX_LOCKED(sc));
3662
3663 for (i = 0; i < WM_NRXDESC; i++) {
3664 rxs = &sc->sc_rxsoft[i];
3665 if (rxs->rxs_mbuf != NULL) {
3666 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3667 m_freem(rxs->rxs_mbuf);
3668 rxs->rxs_mbuf = NULL;
3669 }
3670 }
3671 }
3672
3673 /*
3674 * wm_init: [ifnet interface function]
3675 *
3676 * Initialize the interface.
3677 */
3678 static int
3679 wm_init(struct ifnet *ifp)
3680 {
3681 struct wm_softc *sc = ifp->if_softc;
3682 int ret;
3683
3684 WM_BOTH_LOCK(sc);
3685 ret = wm_init_locked(ifp);
3686 WM_BOTH_UNLOCK(sc);
3687
3688 return ret;
3689 }
3690
3691 static int
3692 wm_init_locked(struct ifnet *ifp)
3693 {
3694 struct wm_softc *sc = ifp->if_softc;
3695 struct wm_rxsoft *rxs;
3696 int i, j, trynum, error = 0;
3697 uint32_t reg;
3698
3699 KASSERT(WM_BOTH_LOCKED(sc));
3700 /*
3701 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3702 * There is a small but measurable benefit to avoiding the adjusment
3703 * of the descriptor so that the headers are aligned, for normal mtu,
3704 * on such platforms. One possibility is that the DMA itself is
3705 * slightly more efficient if the front of the entire packet (instead
3706 * of the front of the headers) is aligned.
3707 *
3708 * Note we must always set align_tweak to 0 if we are using
3709 * jumbo frames.
3710 */
3711 #ifdef __NO_STRICT_ALIGNMENT
3712 sc->sc_align_tweak = 0;
3713 #else
3714 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3715 sc->sc_align_tweak = 0;
3716 else
3717 sc->sc_align_tweak = 2;
3718 #endif /* __NO_STRICT_ALIGNMENT */
3719
3720 /* Cancel any pending I/O. */
3721 wm_stop_locked(ifp, 0);
3722
3723 /* update statistics before reset */
3724 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3725 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3726
3727 /* Reset the chip to a known state. */
3728 wm_reset(sc);
3729
3730 switch (sc->sc_type) {
3731 case WM_T_82571:
3732 case WM_T_82572:
3733 case WM_T_82573:
3734 case WM_T_82574:
3735 case WM_T_82583:
3736 case WM_T_80003:
3737 case WM_T_ICH8:
3738 case WM_T_ICH9:
3739 case WM_T_ICH10:
3740 case WM_T_PCH:
3741 case WM_T_PCH2:
3742 case WM_T_PCH_LPT:
3743 if (wm_check_mng_mode(sc) != 0)
3744 wm_get_hw_control(sc);
3745 break;
3746 default:
3747 break;
3748 }
3749
3750 /* Reset the PHY. */
3751 if (sc->sc_flags & WM_F_HAS_MII)
3752 wm_gmii_reset(sc);
3753
3754 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3755 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3756 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3757 || (sc->sc_type == WM_T_PCH_LPT))
3758 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3759
3760 /* Initialize the transmit descriptor ring. */
3761 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3762 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3763 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3764 sc->sc_txfree = WM_NTXDESC(sc);
3765 sc->sc_txnext = 0;
3766
3767 if (sc->sc_type < WM_T_82543) {
3768 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3769 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3770 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3771 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3772 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3773 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3774 } else {
3775 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3776 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3777 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3778 CSR_WRITE(sc, WMREG_TDH, 0);
3779 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3780 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3781
3782 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3783 /*
3784 * Don't write TDT before TCTL.EN is set.
3785 * See the document.
3786 */
3787 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3788 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3789 | TXDCTL_WTHRESH(0));
3790 else {
3791 CSR_WRITE(sc, WMREG_TDT, 0);
3792 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3793 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3794 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3795 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3796 }
3797 }
3798 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3799 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3800
3801 /* Initialize the transmit job descriptors. */
3802 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3803 sc->sc_txsoft[i].txs_mbuf = NULL;
3804 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3805 sc->sc_txsnext = 0;
3806 sc->sc_txsdirty = 0;
3807
3808 /*
3809 * Initialize the receive descriptor and receive job
3810 * descriptor rings.
3811 */
3812 if (sc->sc_type < WM_T_82543) {
3813 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3814 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3815 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3816 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3817 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3818 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3819
3820 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3821 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3822 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3823 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3824 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3825 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3826 } else {
3827 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3828 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3829 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3830 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3831 CSR_WRITE(sc, WMREG_EITR(0), 450);
3832 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3833 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3834 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3835 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3836 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3837 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3838 | RXDCTL_WTHRESH(1));
3839 } else {
3840 CSR_WRITE(sc, WMREG_RDH, 0);
3841 CSR_WRITE(sc, WMREG_RDT, 0);
3842 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3843 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3844 }
3845 }
3846 for (i = 0; i < WM_NRXDESC; i++) {
3847 rxs = &sc->sc_rxsoft[i];
3848 if (rxs->rxs_mbuf == NULL) {
3849 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3850 log(LOG_ERR, "%s: unable to allocate or map "
3851 "rx buffer %d, error = %d\n",
3852 device_xname(sc->sc_dev), i, error);
3853 /*
3854 * XXX Should attempt to run with fewer receive
3855 * XXX buffers instead of just failing.
3856 */
3857 wm_rxdrain(sc);
3858 goto out;
3859 }
3860 } else {
3861 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3862 WM_INIT_RXDESC(sc, i);
3863 /*
3864 * For 82575 and newer device, the RX descriptors
3865 * must be initialized after the setting of RCTL.EN in
3866 * wm_set_filter()
3867 */
3868 }
3869 }
3870 sc->sc_rxptr = 0;
3871 sc->sc_rxdiscard = 0;
3872 WM_RXCHAIN_RESET(sc);
3873
3874 /*
3875 * Clear out the VLAN table -- we don't use it (yet).
3876 */
3877 CSR_WRITE(sc, WMREG_VET, 0);
3878 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3879 trynum = 10; /* Due to hw errata */
3880 else
3881 trynum = 1;
3882 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3883 for (j = 0; j < trynum; j++)
3884 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3885
3886 /*
3887 * Set up flow-control parameters.
3888 *
3889 * XXX Values could probably stand some tuning.
3890 */
3891 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3892 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3893 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3894 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3895 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3896 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3897 }
3898
3899 sc->sc_fcrtl = FCRTL_DFLT;
3900 if (sc->sc_type < WM_T_82543) {
3901 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3902 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3903 } else {
3904 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3905 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3906 }
3907
3908 if (sc->sc_type == WM_T_80003)
3909 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3910 else
3911 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3912
3913 /* Writes the control register. */
3914 wm_set_vlan(sc);
3915
3916 if (sc->sc_flags & WM_F_HAS_MII) {
3917 int val;
3918
3919 switch (sc->sc_type) {
3920 case WM_T_80003:
3921 case WM_T_ICH8:
3922 case WM_T_ICH9:
3923 case WM_T_ICH10:
3924 case WM_T_PCH:
3925 case WM_T_PCH2:
3926 case WM_T_PCH_LPT:
3927 /*
3928 * Set the mac to wait the maximum time between each
3929 * iteration and increase the max iterations when
3930 * polling the phy; this fixes erroneous timeouts at
3931 * 10Mbps.
3932 */
3933 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3934 0xFFFF);
3935 val = wm_kmrn_readreg(sc,
3936 KUMCTRLSTA_OFFSET_INB_PARAM);
3937 val |= 0x3F;
3938 wm_kmrn_writereg(sc,
3939 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3940 break;
3941 default:
3942 break;
3943 }
3944
3945 if (sc->sc_type == WM_T_80003) {
3946 val = CSR_READ(sc, WMREG_CTRL_EXT);
3947 val &= ~CTRL_EXT_LINK_MODE_MASK;
3948 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3949
3950 /* Bypass RX and TX FIFO's */
3951 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3952 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3953 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3954 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3955 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3956 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3957 }
3958 }
3959 #if 0
3960 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3961 #endif
3962
3963 /* Set up checksum offload parameters. */
3964 reg = CSR_READ(sc, WMREG_RXCSUM);
3965 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3966 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3967 reg |= RXCSUM_IPOFL;
3968 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3969 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3970 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3971 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3972 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3973
3974 /* Set up the interrupt registers. */
3975 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3976 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3977 ICR_RXO | ICR_RXT0;
3978 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3979
3980 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3981 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3982 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3983 reg = CSR_READ(sc, WMREG_KABGTXD);
3984 reg |= KABGTXD_BGSQLBIAS;
3985 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3986 }
3987
3988 /* Set up the inter-packet gap. */
3989 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3990
3991 if (sc->sc_type >= WM_T_82543) {
3992 /*
3993 * Set up the interrupt throttling register (units of 256ns)
3994 * Note that a footnote in Intel's documentation says this
3995 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3996 * or 10Mbit mode. Empirically, it appears to be the case
3997 * that that is also true for the 1024ns units of the other
3998 * interrupt-related timer registers -- so, really, we ought
3999 * to divide this value by 4 when the link speed is low.
4000 *
4001 * XXX implement this division at link speed change!
4002 */
4003
4004 /*
4005 * For N interrupts/sec, set this value to:
4006 * 1000000000 / (N * 256). Note that we set the
4007 * absolute and packet timer values to this value
4008 * divided by 4 to get "simple timer" behavior.
4009 */
4010
4011 sc->sc_itr = 1500; /* 2604 ints/sec */
4012 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4013 }
4014
4015 /* Set the VLAN ethernetype. */
4016 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4017
4018 /*
4019 * Set up the transmit control register; we start out with
4020 * a collision distance suitable for FDX, but update it whe
4021 * we resolve the media type.
4022 */
4023 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4024 | TCTL_CT(TX_COLLISION_THRESHOLD)
4025 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4026 if (sc->sc_type >= WM_T_82571)
4027 sc->sc_tctl |= TCTL_MULR;
4028 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4029
4030 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4031 /* Write TDT after TCTL.EN is set. See the document. */
4032 CSR_WRITE(sc, WMREG_TDT, 0);
4033 }
4034
4035 if (sc->sc_type == WM_T_80003) {
4036 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4037 reg &= ~TCTL_EXT_GCEX_MASK;
4038 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4039 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4040 }
4041
4042 /* Set the media. */
4043 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4044 goto out;
4045
4046 /* Configure for OS presence */
4047 wm_init_manageability(sc);
4048
4049 /*
4050 * Set up the receive control register; we actually program
4051 * the register when we set the receive filter. Use multicast
4052 * address offset type 0.
4053 *
4054 * Only the i82544 has the ability to strip the incoming
4055 * CRC, so we don't enable that feature.
4056 */
4057 sc->sc_mchash_type = 0;
4058 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4059 | RCTL_MO(sc->sc_mchash_type);
4060
4061 /*
4062 * The I350 has a bug where it always strips the CRC whether
4063 * asked to or not. So ask for stripped CRC here and cope in rxeof
4064 */
4065 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4066 || (sc->sc_type == WM_T_I210))
4067 sc->sc_rctl |= RCTL_SECRC;
4068
4069 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4070 && (ifp->if_mtu > ETHERMTU)) {
4071 sc->sc_rctl |= RCTL_LPE;
4072 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4073 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4074 }
4075
4076 if (MCLBYTES == 2048) {
4077 sc->sc_rctl |= RCTL_2k;
4078 } else {
4079 if (sc->sc_type >= WM_T_82543) {
4080 switch (MCLBYTES) {
4081 case 4096:
4082 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4083 break;
4084 case 8192:
4085 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4086 break;
4087 case 16384:
4088 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4089 break;
4090 default:
4091 panic("wm_init: MCLBYTES %d unsupported",
4092 MCLBYTES);
4093 break;
4094 }
4095 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4096 }
4097
4098 /* Set the receive filter. */
4099 wm_set_filter(sc);
4100
4101 /* Enable ECC */
4102 switch (sc->sc_type) {
4103 case WM_T_82571:
4104 reg = CSR_READ(sc, WMREG_PBA_ECC);
4105 reg |= PBA_ECC_CORR_EN;
4106 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4107 break;
4108 case WM_T_PCH_LPT:
4109 reg = CSR_READ(sc, WMREG_PBECCSTS);
4110 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4111 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4112
4113 reg = CSR_READ(sc, WMREG_CTRL);
4114 reg |= CTRL_MEHE;
4115 CSR_WRITE(sc, WMREG_CTRL, reg);
4116 break;
4117 default:
4118 break;
4119 }
4120
4121 /* On 575 and later set RDT only if RX enabled */
4122 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4123 for (i = 0; i < WM_NRXDESC; i++)
4124 WM_INIT_RXDESC(sc, i);
4125
4126 sc->sc_stopping = false;
4127
4128 /* Start the one second link check clock. */
4129 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4130
4131 /* ...all done! */
4132 ifp->if_flags |= IFF_RUNNING;
4133 ifp->if_flags &= ~IFF_OACTIVE;
4134
4135 out:
4136 sc->sc_if_flags = ifp->if_flags;
4137 if (error)
4138 log(LOG_ERR, "%s: interface not running\n",
4139 device_xname(sc->sc_dev));
4140 return error;
4141 }
4142
4143 /*
4144 * wm_stop: [ifnet interface function]
4145 *
4146 * Stop transmission on the interface.
4147 */
4148 static void
4149 wm_stop(struct ifnet *ifp, int disable)
4150 {
4151 struct wm_softc *sc = ifp->if_softc;
4152
4153 WM_BOTH_LOCK(sc);
4154 wm_stop_locked(ifp, disable);
4155 WM_BOTH_UNLOCK(sc);
4156 }
4157
4158 static void
4159 wm_stop_locked(struct ifnet *ifp, int disable)
4160 {
4161 struct wm_softc *sc = ifp->if_softc;
4162 struct wm_txsoft *txs;
4163 int i;
4164
4165 KASSERT(WM_BOTH_LOCKED(sc));
4166
4167 sc->sc_stopping = true;
4168
4169 /* Stop the one second clock. */
4170 callout_stop(&sc->sc_tick_ch);
4171
4172 /* Stop the 82547 Tx FIFO stall check timer. */
4173 if (sc->sc_type == WM_T_82547)
4174 callout_stop(&sc->sc_txfifo_ch);
4175
4176 if (sc->sc_flags & WM_F_HAS_MII) {
4177 /* Down the MII. */
4178 mii_down(&sc->sc_mii);
4179 } else {
4180 #if 0
4181 /* Should we clear PHY's status properly? */
4182 wm_reset(sc);
4183 #endif
4184 }
4185
4186 /* Stop the transmit and receive processes. */
4187 CSR_WRITE(sc, WMREG_TCTL, 0);
4188 CSR_WRITE(sc, WMREG_RCTL, 0);
4189 sc->sc_rctl &= ~RCTL_EN;
4190
4191 /*
4192 * Clear the interrupt mask to ensure the device cannot assert its
4193 * interrupt line.
4194 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4195 * any currently pending or shared interrupt.
4196 */
4197 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4198 sc->sc_icr = 0;
4199
4200 /* Release any queued transmit buffers. */
4201 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4202 txs = &sc->sc_txsoft[i];
4203 if (txs->txs_mbuf != NULL) {
4204 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4205 m_freem(txs->txs_mbuf);
4206 txs->txs_mbuf = NULL;
4207 }
4208 }
4209
4210 /* Mark the interface as down and cancel the watchdog timer. */
4211 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4212 ifp->if_timer = 0;
4213
4214 if (disable)
4215 wm_rxdrain(sc);
4216
4217 #if 0 /* notyet */
4218 if (sc->sc_type >= WM_T_82544)
4219 CSR_WRITE(sc, WMREG_WUC, 0);
4220 #endif
4221 }
4222
4223 /*
4224 * wm_tx_offload:
4225 *
4226 * Set up TCP/IP checksumming parameters for the
4227 * specified packet.
4228 */
4229 static int
4230 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4231 uint8_t *fieldsp)
4232 {
4233 struct mbuf *m0 = txs->txs_mbuf;
4234 struct livengood_tcpip_ctxdesc *t;
4235 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4236 uint32_t ipcse;
4237 struct ether_header *eh;
4238 int offset, iphl;
4239 uint8_t fields;
4240
4241 /*
4242 * XXX It would be nice if the mbuf pkthdr had offset
4243 * fields for the protocol headers.
4244 */
4245
4246 eh = mtod(m0, struct ether_header *);
4247 switch (htons(eh->ether_type)) {
4248 case ETHERTYPE_IP:
4249 case ETHERTYPE_IPV6:
4250 offset = ETHER_HDR_LEN;
4251 break;
4252
4253 case ETHERTYPE_VLAN:
4254 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4255 break;
4256
4257 default:
4258 /*
4259 * Don't support this protocol or encapsulation.
4260 */
4261 *fieldsp = 0;
4262 *cmdp = 0;
4263 return 0;
4264 }
4265
4266 if ((m0->m_pkthdr.csum_flags &
4267 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4268 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4269 } else {
4270 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4271 }
4272 ipcse = offset + iphl - 1;
4273
4274 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4275 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4276 seg = 0;
4277 fields = 0;
4278
4279 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4280 int hlen = offset + iphl;
4281 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4282
4283 if (__predict_false(m0->m_len <
4284 (hlen + sizeof(struct tcphdr)))) {
4285 /*
4286 * TCP/IP headers are not in the first mbuf; we need
4287 * to do this the slow and painful way. Let's just
4288 * hope this doesn't happen very often.
4289 */
4290 struct tcphdr th;
4291
4292 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4293
4294 m_copydata(m0, hlen, sizeof(th), &th);
4295 if (v4) {
4296 struct ip ip;
4297
4298 m_copydata(m0, offset, sizeof(ip), &ip);
4299 ip.ip_len = 0;
4300 m_copyback(m0,
4301 offset + offsetof(struct ip, ip_len),
4302 sizeof(ip.ip_len), &ip.ip_len);
4303 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4304 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4305 } else {
4306 struct ip6_hdr ip6;
4307
4308 m_copydata(m0, offset, sizeof(ip6), &ip6);
4309 ip6.ip6_plen = 0;
4310 m_copyback(m0,
4311 offset + offsetof(struct ip6_hdr, ip6_plen),
4312 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4313 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4314 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4315 }
4316 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4317 sizeof(th.th_sum), &th.th_sum);
4318
4319 hlen += th.th_off << 2;
4320 } else {
4321 /*
4322 * TCP/IP headers are in the first mbuf; we can do
4323 * this the easy way.
4324 */
4325 struct tcphdr *th;
4326
4327 if (v4) {
4328 struct ip *ip =
4329 (void *)(mtod(m0, char *) + offset);
4330 th = (void *)(mtod(m0, char *) + hlen);
4331
4332 ip->ip_len = 0;
4333 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4334 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4335 } else {
4336 struct ip6_hdr *ip6 =
4337 (void *)(mtod(m0, char *) + offset);
4338 th = (void *)(mtod(m0, char *) + hlen);
4339
4340 ip6->ip6_plen = 0;
4341 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4342 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4343 }
4344 hlen += th->th_off << 2;
4345 }
4346
4347 if (v4) {
4348 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4349 cmdlen |= WTX_TCPIP_CMD_IP;
4350 } else {
4351 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4352 ipcse = 0;
4353 }
4354 cmd |= WTX_TCPIP_CMD_TSE;
4355 cmdlen |= WTX_TCPIP_CMD_TSE |
4356 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4357 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4358 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4359 }
4360
4361 /*
4362 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4363 * offload feature, if we load the context descriptor, we
4364 * MUST provide valid values for IPCSS and TUCSS fields.
4365 */
4366
4367 ipcs = WTX_TCPIP_IPCSS(offset) |
4368 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4369 WTX_TCPIP_IPCSE(ipcse);
4370 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4371 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4372 fields |= WTX_IXSM;
4373 }
4374
4375 offset += iphl;
4376
4377 if (m0->m_pkthdr.csum_flags &
4378 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4379 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4380 fields |= WTX_TXSM;
4381 tucs = WTX_TCPIP_TUCSS(offset) |
4382 WTX_TCPIP_TUCSO(offset +
4383 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4384 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4385 } else if ((m0->m_pkthdr.csum_flags &
4386 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4387 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4388 fields |= WTX_TXSM;
4389 tucs = WTX_TCPIP_TUCSS(offset) |
4390 WTX_TCPIP_TUCSO(offset +
4391 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4392 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4393 } else {
4394 /* Just initialize it to a valid TCP context. */
4395 tucs = WTX_TCPIP_TUCSS(offset) |
4396 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4397 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4398 }
4399
4400 /* Fill in the context descriptor. */
4401 t = (struct livengood_tcpip_ctxdesc *)
4402 &sc->sc_txdescs[sc->sc_txnext];
4403 t->tcpip_ipcs = htole32(ipcs);
4404 t->tcpip_tucs = htole32(tucs);
4405 t->tcpip_cmdlen = htole32(cmdlen);
4406 t->tcpip_seg = htole32(seg);
4407 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4408
4409 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4410 txs->txs_ndesc++;
4411
4412 *cmdp = cmd;
4413 *fieldsp = fields;
4414
4415 return 0;
4416 }
4417
4418 static void
4419 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4420 {
4421 struct mbuf *m;
4422 int i;
4423
4424 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4425 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4426 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4427 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4428 m->m_data, m->m_len, m->m_flags);
4429 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4430 i, i == 1 ? "" : "s");
4431 }
4432
4433 /*
4434 * wm_82547_txfifo_stall:
4435 *
4436 * Callout used to wait for the 82547 Tx FIFO to drain,
4437 * reset the FIFO pointers, and restart packet transmission.
4438 */
4439 static void
4440 wm_82547_txfifo_stall(void *arg)
4441 {
4442 struct wm_softc *sc = arg;
4443 #ifndef WM_MPSAFE
4444 int s;
4445
4446 s = splnet();
4447 #endif
4448 WM_TX_LOCK(sc);
4449
4450 if (sc->sc_stopping)
4451 goto out;
4452
4453 if (sc->sc_txfifo_stall) {
4454 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4455 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4456 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4457 /*
4458 * Packets have drained. Stop transmitter, reset
4459 * FIFO pointers, restart transmitter, and kick
4460 * the packet queue.
4461 */
4462 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4463 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4464 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4465 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4466 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4467 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4468 CSR_WRITE(sc, WMREG_TCTL, tctl);
4469 CSR_WRITE_FLUSH(sc);
4470
4471 sc->sc_txfifo_head = 0;
4472 sc->sc_txfifo_stall = 0;
4473 wm_start_locked(&sc->sc_ethercom.ec_if);
4474 } else {
4475 /*
4476 * Still waiting for packets to drain; try again in
4477 * another tick.
4478 */
4479 callout_schedule(&sc->sc_txfifo_ch, 1);
4480 }
4481 }
4482
4483 out:
4484 WM_TX_UNLOCK(sc);
4485 #ifndef WM_MPSAFE
4486 splx(s);
4487 #endif
4488 }
4489
4490 /*
4491 * wm_82547_txfifo_bugchk:
4492 *
4493 * Check for bug condition in the 82547 Tx FIFO. We need to
4494 * prevent enqueueing a packet that would wrap around the end
4495 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4496 *
4497 * We do this by checking the amount of space before the end
4498 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4499 * the Tx FIFO, wait for all remaining packets to drain, reset
4500 * the internal FIFO pointers to the beginning, and restart
4501 * transmission on the interface.
4502 */
4503 #define WM_FIFO_HDR 0x10
4504 #define WM_82547_PAD_LEN 0x3e0
4505 static int
4506 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4507 {
4508 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4509 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4510
4511 /* Just return if already stalled. */
4512 if (sc->sc_txfifo_stall)
4513 return 1;
4514
4515 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4516 /* Stall only occurs in half-duplex mode. */
4517 goto send_packet;
4518 }
4519
4520 if (len >= WM_82547_PAD_LEN + space) {
4521 sc->sc_txfifo_stall = 1;
4522 callout_schedule(&sc->sc_txfifo_ch, 1);
4523 return 1;
4524 }
4525
4526 send_packet:
4527 sc->sc_txfifo_head += len;
4528 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4529 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4530
4531 return 0;
4532 }
4533
4534 /*
4535 * wm_start: [ifnet interface function]
4536 *
4537 * Start packet transmission on the interface.
4538 */
4539 static void
4540 wm_start(struct ifnet *ifp)
4541 {
4542 struct wm_softc *sc = ifp->if_softc;
4543
4544 WM_TX_LOCK(sc);
4545 if (!sc->sc_stopping)
4546 wm_start_locked(ifp);
4547 WM_TX_UNLOCK(sc);
4548 }
4549
4550 static void
4551 wm_start_locked(struct ifnet *ifp)
4552 {
4553 struct wm_softc *sc = ifp->if_softc;
4554 struct mbuf *m0;
4555 struct m_tag *mtag;
4556 struct wm_txsoft *txs;
4557 bus_dmamap_t dmamap;
4558 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4559 bus_addr_t curaddr;
4560 bus_size_t seglen, curlen;
4561 uint32_t cksumcmd;
4562 uint8_t cksumfields;
4563
4564 KASSERT(WM_TX_LOCKED(sc));
4565
4566 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4567 return;
4568
4569 /* Remember the previous number of free descriptors. */
4570 ofree = sc->sc_txfree;
4571
4572 /*
4573 * Loop through the send queue, setting up transmit descriptors
4574 * until we drain the queue, or use up all available transmit
4575 * descriptors.
4576 */
4577 for (;;) {
4578 m0 = NULL;
4579
4580 /* Get a work queue entry. */
4581 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4582 wm_txintr(sc);
4583 if (sc->sc_txsfree == 0) {
4584 DPRINTF(WM_DEBUG_TX,
4585 ("%s: TX: no free job descriptors\n",
4586 device_xname(sc->sc_dev)));
4587 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4588 break;
4589 }
4590 }
4591
4592 /* Grab a packet off the queue. */
4593 IFQ_DEQUEUE(&ifp->if_snd, m0);
4594 if (m0 == NULL)
4595 break;
4596
4597 DPRINTF(WM_DEBUG_TX,
4598 ("%s: TX: have packet to transmit: %p\n",
4599 device_xname(sc->sc_dev), m0));
4600
4601 txs = &sc->sc_txsoft[sc->sc_txsnext];
4602 dmamap = txs->txs_dmamap;
4603
4604 use_tso = (m0->m_pkthdr.csum_flags &
4605 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4606
4607 /*
4608 * So says the Linux driver:
4609 * The controller does a simple calculation to make sure
4610 * there is enough room in the FIFO before initiating the
4611 * DMA for each buffer. The calc is:
4612 * 4 = ceil(buffer len / MSS)
4613 * To make sure we don't overrun the FIFO, adjust the max
4614 * buffer len if the MSS drops.
4615 */
4616 dmamap->dm_maxsegsz =
4617 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4618 ? m0->m_pkthdr.segsz << 2
4619 : WTX_MAX_LEN;
4620
4621 /*
4622 * Load the DMA map. If this fails, the packet either
4623 * didn't fit in the allotted number of segments, or we
4624 * were short on resources. For the too-many-segments
4625 * case, we simply report an error and drop the packet,
4626 * since we can't sanely copy a jumbo packet to a single
4627 * buffer.
4628 */
4629 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4630 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4631 if (error) {
4632 if (error == EFBIG) {
4633 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4634 log(LOG_ERR, "%s: Tx packet consumes too many "
4635 "DMA segments, dropping...\n",
4636 device_xname(sc->sc_dev));
4637 wm_dump_mbuf_chain(sc, m0);
4638 m_freem(m0);
4639 continue;
4640 }
4641 /* Short on resources, just stop for now. */
4642 DPRINTF(WM_DEBUG_TX,
4643 ("%s: TX: dmamap load failed: %d\n",
4644 device_xname(sc->sc_dev), error));
4645 break;
4646 }
4647
4648 segs_needed = dmamap->dm_nsegs;
4649 if (use_tso) {
4650 /* For sentinel descriptor; see below. */
4651 segs_needed++;
4652 }
4653
4654 /*
4655 * Ensure we have enough descriptors free to describe
4656 * the packet. Note, we always reserve one descriptor
4657 * at the end of the ring due to the semantics of the
4658 * TDT register, plus one more in the event we need
4659 * to load offload context.
4660 */
4661 if (segs_needed > sc->sc_txfree - 2) {
4662 /*
4663 * Not enough free descriptors to transmit this
4664 * packet. We haven't committed anything yet,
4665 * so just unload the DMA map, put the packet
4666 * pack on the queue, and punt. Notify the upper
4667 * layer that there are no more slots left.
4668 */
4669 DPRINTF(WM_DEBUG_TX,
4670 ("%s: TX: need %d (%d) descriptors, have %d\n",
4671 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4672 segs_needed, sc->sc_txfree - 1));
4673 ifp->if_flags |= IFF_OACTIVE;
4674 bus_dmamap_unload(sc->sc_dmat, dmamap);
4675 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4676 break;
4677 }
4678
4679 /*
4680 * Check for 82547 Tx FIFO bug. We need to do this
4681 * once we know we can transmit the packet, since we
4682 * do some internal FIFO space accounting here.
4683 */
4684 if (sc->sc_type == WM_T_82547 &&
4685 wm_82547_txfifo_bugchk(sc, m0)) {
4686 DPRINTF(WM_DEBUG_TX,
4687 ("%s: TX: 82547 Tx FIFO bug detected\n",
4688 device_xname(sc->sc_dev)));
4689 ifp->if_flags |= IFF_OACTIVE;
4690 bus_dmamap_unload(sc->sc_dmat, dmamap);
4691 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4692 break;
4693 }
4694
4695 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4696
4697 DPRINTF(WM_DEBUG_TX,
4698 ("%s: TX: packet has %d (%d) DMA segments\n",
4699 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4700
4701 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4702
4703 /*
4704 * Store a pointer to the packet so that we can free it
4705 * later.
4706 *
4707 * Initially, we consider the number of descriptors the
4708 * packet uses the number of DMA segments. This may be
4709 * incremented by 1 if we do checksum offload (a descriptor
4710 * is used to set the checksum context).
4711 */
4712 txs->txs_mbuf = m0;
4713 txs->txs_firstdesc = sc->sc_txnext;
4714 txs->txs_ndesc = segs_needed;
4715
4716 /* Set up offload parameters for this packet. */
4717 if (m0->m_pkthdr.csum_flags &
4718 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4719 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4720 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4721 if (wm_tx_offload(sc, txs, &cksumcmd,
4722 &cksumfields) != 0) {
4723 /* Error message already displayed. */
4724 bus_dmamap_unload(sc->sc_dmat, dmamap);
4725 continue;
4726 }
4727 } else {
4728 cksumcmd = 0;
4729 cksumfields = 0;
4730 }
4731
4732 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4733
4734 /* Sync the DMA map. */
4735 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4736 BUS_DMASYNC_PREWRITE);
4737
4738 /* Initialize the transmit descriptor. */
4739 for (nexttx = sc->sc_txnext, seg = 0;
4740 seg < dmamap->dm_nsegs; seg++) {
4741 for (seglen = dmamap->dm_segs[seg].ds_len,
4742 curaddr = dmamap->dm_segs[seg].ds_addr;
4743 seglen != 0;
4744 curaddr += curlen, seglen -= curlen,
4745 nexttx = WM_NEXTTX(sc, nexttx)) {
4746 curlen = seglen;
4747
4748 /*
4749 * So says the Linux driver:
4750 * Work around for premature descriptor
4751 * write-backs in TSO mode. Append a
4752 * 4-byte sentinel descriptor.
4753 */
4754 if (use_tso &&
4755 seg == dmamap->dm_nsegs - 1 &&
4756 curlen > 8)
4757 curlen -= 4;
4758
4759 wm_set_dma_addr(
4760 &sc->sc_txdescs[nexttx].wtx_addr,
4761 curaddr);
4762 sc->sc_txdescs[nexttx].wtx_cmdlen =
4763 htole32(cksumcmd | curlen);
4764 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4765 0;
4766 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4767 cksumfields;
4768 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4769 lasttx = nexttx;
4770
4771 DPRINTF(WM_DEBUG_TX,
4772 ("%s: TX: desc %d: low %#" PRIx64 ", "
4773 "len %#04zx\n",
4774 device_xname(sc->sc_dev), nexttx,
4775 (uint64_t)curaddr, curlen));
4776 }
4777 }
4778
4779 KASSERT(lasttx != -1);
4780
4781 /*
4782 * Set up the command byte on the last descriptor of
4783 * the packet. If we're in the interrupt delay window,
4784 * delay the interrupt.
4785 */
4786 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4787 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4788
4789 /*
4790 * If VLANs are enabled and the packet has a VLAN tag, set
4791 * up the descriptor to encapsulate the packet for us.
4792 *
4793 * This is only valid on the last descriptor of the packet.
4794 */
4795 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4796 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4797 htole32(WTX_CMD_VLE);
4798 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4799 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4800 }
4801
4802 txs->txs_lastdesc = lasttx;
4803
4804 DPRINTF(WM_DEBUG_TX,
4805 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4806 device_xname(sc->sc_dev),
4807 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4808
4809 /* Sync the descriptors we're using. */
4810 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4811 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4812
4813 /* Give the packet to the chip. */
4814 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4815
4816 DPRINTF(WM_DEBUG_TX,
4817 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4818
4819 DPRINTF(WM_DEBUG_TX,
4820 ("%s: TX: finished transmitting packet, job %d\n",
4821 device_xname(sc->sc_dev), sc->sc_txsnext));
4822
4823 /* Advance the tx pointer. */
4824 sc->sc_txfree -= txs->txs_ndesc;
4825 sc->sc_txnext = nexttx;
4826
4827 sc->sc_txsfree--;
4828 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4829
4830 /* Pass the packet to any BPF listeners. */
4831 bpf_mtap(ifp, m0);
4832 }
4833
4834 if (m0 != NULL) {
4835 ifp->if_flags |= IFF_OACTIVE;
4836 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4837 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4838 m_freem(m0);
4839 }
4840
4841 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4842 /* No more slots; notify upper layer. */
4843 ifp->if_flags |= IFF_OACTIVE;
4844 }
4845
4846 if (sc->sc_txfree != ofree) {
4847 /* Set a watchdog timer in case the chip flakes out. */
4848 ifp->if_timer = 5;
4849 }
4850 }
4851
4852 /*
4853 * wm_nq_tx_offload:
4854 *
4855 * Set up TCP/IP checksumming parameters for the
4856 * specified packet, for NEWQUEUE devices
4857 */
4858 static int
4859 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4860 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4861 {
4862 struct mbuf *m0 = txs->txs_mbuf;
4863 struct m_tag *mtag;
4864 uint32_t vl_len, mssidx, cmdc;
4865 struct ether_header *eh;
4866 int offset, iphl;
4867
4868 /*
4869 * XXX It would be nice if the mbuf pkthdr had offset
4870 * fields for the protocol headers.
4871 */
4872 *cmdlenp = 0;
4873 *fieldsp = 0;
4874
4875 eh = mtod(m0, struct ether_header *);
4876 switch (htons(eh->ether_type)) {
4877 case ETHERTYPE_IP:
4878 case ETHERTYPE_IPV6:
4879 offset = ETHER_HDR_LEN;
4880 break;
4881
4882 case ETHERTYPE_VLAN:
4883 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4884 break;
4885
4886 default:
4887 /* Don't support this protocol or encapsulation. */
4888 *do_csum = false;
4889 return 0;
4890 }
4891 *do_csum = true;
4892 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4893 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4894
4895 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4896 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4897
4898 if ((m0->m_pkthdr.csum_flags &
4899 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4900 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4901 } else {
4902 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4903 }
4904 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4905 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4906
4907 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4908 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4909 << NQTXC_VLLEN_VLAN_SHIFT);
4910 *cmdlenp |= NQTX_CMD_VLE;
4911 }
4912
4913 mssidx = 0;
4914
4915 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4916 int hlen = offset + iphl;
4917 int tcp_hlen;
4918 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4919
4920 if (__predict_false(m0->m_len <
4921 (hlen + sizeof(struct tcphdr)))) {
4922 /*
4923 * TCP/IP headers are not in the first mbuf; we need
4924 * to do this the slow and painful way. Let's just
4925 * hope this doesn't happen very often.
4926 */
4927 struct tcphdr th;
4928
4929 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4930
4931 m_copydata(m0, hlen, sizeof(th), &th);
4932 if (v4) {
4933 struct ip ip;
4934
4935 m_copydata(m0, offset, sizeof(ip), &ip);
4936 ip.ip_len = 0;
4937 m_copyback(m0,
4938 offset + offsetof(struct ip, ip_len),
4939 sizeof(ip.ip_len), &ip.ip_len);
4940 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4941 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4942 } else {
4943 struct ip6_hdr ip6;
4944
4945 m_copydata(m0, offset, sizeof(ip6), &ip6);
4946 ip6.ip6_plen = 0;
4947 m_copyback(m0,
4948 offset + offsetof(struct ip6_hdr, ip6_plen),
4949 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4950 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4951 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4952 }
4953 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4954 sizeof(th.th_sum), &th.th_sum);
4955
4956 tcp_hlen = th.th_off << 2;
4957 } else {
4958 /*
4959 * TCP/IP headers are in the first mbuf; we can do
4960 * this the easy way.
4961 */
4962 struct tcphdr *th;
4963
4964 if (v4) {
4965 struct ip *ip =
4966 (void *)(mtod(m0, char *) + offset);
4967 th = (void *)(mtod(m0, char *) + hlen);
4968
4969 ip->ip_len = 0;
4970 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4971 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4972 } else {
4973 struct ip6_hdr *ip6 =
4974 (void *)(mtod(m0, char *) + offset);
4975 th = (void *)(mtod(m0, char *) + hlen);
4976
4977 ip6->ip6_plen = 0;
4978 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4979 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4980 }
4981 tcp_hlen = th->th_off << 2;
4982 }
4983 hlen += tcp_hlen;
4984 *cmdlenp |= NQTX_CMD_TSE;
4985
4986 if (v4) {
4987 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4988 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4989 } else {
4990 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4991 *fieldsp |= NQTXD_FIELDS_TUXSM;
4992 }
4993 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4994 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4995 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4996 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4997 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4998 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4999 } else {
5000 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5001 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5002 }
5003
5004 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5005 *fieldsp |= NQTXD_FIELDS_IXSM;
5006 cmdc |= NQTXC_CMD_IP4;
5007 }
5008
5009 if (m0->m_pkthdr.csum_flags &
5010 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5011 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5012 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5013 cmdc |= NQTXC_CMD_TCP;
5014 } else {
5015 cmdc |= NQTXC_CMD_UDP;
5016 }
5017 cmdc |= NQTXC_CMD_IP4;
5018 *fieldsp |= NQTXD_FIELDS_TUXSM;
5019 }
5020 if (m0->m_pkthdr.csum_flags &
5021 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5022 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5023 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5024 cmdc |= NQTXC_CMD_TCP;
5025 } else {
5026 cmdc |= NQTXC_CMD_UDP;
5027 }
5028 cmdc |= NQTXC_CMD_IP6;
5029 *fieldsp |= NQTXD_FIELDS_TUXSM;
5030 }
5031
5032 /* Fill in the context descriptor. */
5033 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5034 htole32(vl_len);
5035 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5036 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5037 htole32(cmdc);
5038 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5039 htole32(mssidx);
5040 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5041 DPRINTF(WM_DEBUG_TX,
5042 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5043 sc->sc_txnext, 0, vl_len));
5044 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5045 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5046 txs->txs_ndesc++;
5047 return 0;
5048 }
5049
5050 /*
5051 * wm_nq_start: [ifnet interface function]
5052 *
5053 * Start packet transmission on the interface for NEWQUEUE devices
5054 */
5055 static void
5056 wm_nq_start(struct ifnet *ifp)
5057 {
5058 struct wm_softc *sc = ifp->if_softc;
5059
5060 WM_TX_LOCK(sc);
5061 if (!sc->sc_stopping)
5062 wm_nq_start_locked(ifp);
5063 WM_TX_UNLOCK(sc);
5064 }
5065
5066 static void
5067 wm_nq_start_locked(struct ifnet *ifp)
5068 {
5069 struct wm_softc *sc = ifp->if_softc;
5070 struct mbuf *m0;
5071 struct m_tag *mtag;
5072 struct wm_txsoft *txs;
5073 bus_dmamap_t dmamap;
5074 int error, nexttx, lasttx = -1, seg, segs_needed;
5075 bool do_csum, sent;
5076
5077 KASSERT(WM_TX_LOCKED(sc));
5078
5079 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5080 return;
5081
5082 sent = false;
5083
5084 /*
5085 * Loop through the send queue, setting up transmit descriptors
5086 * until we drain the queue, or use up all available transmit
5087 * descriptors.
5088 */
5089 for (;;) {
5090 m0 = NULL;
5091
5092 /* Get a work queue entry. */
5093 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5094 wm_txintr(sc);
5095 if (sc->sc_txsfree == 0) {
5096 DPRINTF(WM_DEBUG_TX,
5097 ("%s: TX: no free job descriptors\n",
5098 device_xname(sc->sc_dev)));
5099 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5100 break;
5101 }
5102 }
5103
5104 /* Grab a packet off the queue. */
5105 IFQ_DEQUEUE(&ifp->if_snd, m0);
5106 if (m0 == NULL)
5107 break;
5108
5109 DPRINTF(WM_DEBUG_TX,
5110 ("%s: TX: have packet to transmit: %p\n",
5111 device_xname(sc->sc_dev), m0));
5112
5113 txs = &sc->sc_txsoft[sc->sc_txsnext];
5114 dmamap = txs->txs_dmamap;
5115
5116 /*
5117 * Load the DMA map. If this fails, the packet either
5118 * didn't fit in the allotted number of segments, or we
5119 * were short on resources. For the too-many-segments
5120 * case, we simply report an error and drop the packet,
5121 * since we can't sanely copy a jumbo packet to a single
5122 * buffer.
5123 */
5124 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5125 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5126 if (error) {
5127 if (error == EFBIG) {
5128 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5129 log(LOG_ERR, "%s: Tx packet consumes too many "
5130 "DMA segments, dropping...\n",
5131 device_xname(sc->sc_dev));
5132 wm_dump_mbuf_chain(sc, m0);
5133 m_freem(m0);
5134 continue;
5135 }
5136 /* Short on resources, just stop for now. */
5137 DPRINTF(WM_DEBUG_TX,
5138 ("%s: TX: dmamap load failed: %d\n",
5139 device_xname(sc->sc_dev), error));
5140 break;
5141 }
5142
5143 segs_needed = dmamap->dm_nsegs;
5144
5145 /*
5146 * Ensure we have enough descriptors free to describe
5147 * the packet. Note, we always reserve one descriptor
5148 * at the end of the ring due to the semantics of the
5149 * TDT register, plus one more in the event we need
5150 * to load offload context.
5151 */
5152 if (segs_needed > sc->sc_txfree - 2) {
5153 /*
5154 * Not enough free descriptors to transmit this
5155 * packet. We haven't committed anything yet,
5156 * so just unload the DMA map, put the packet
5157 * pack on the queue, and punt. Notify the upper
5158 * layer that there are no more slots left.
5159 */
5160 DPRINTF(WM_DEBUG_TX,
5161 ("%s: TX: need %d (%d) descriptors, have %d\n",
5162 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5163 segs_needed, sc->sc_txfree - 1));
5164 ifp->if_flags |= IFF_OACTIVE;
5165 bus_dmamap_unload(sc->sc_dmat, dmamap);
5166 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5167 break;
5168 }
5169
5170 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5171
5172 DPRINTF(WM_DEBUG_TX,
5173 ("%s: TX: packet has %d (%d) DMA segments\n",
5174 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5175
5176 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5177
5178 /*
5179 * Store a pointer to the packet so that we can free it
5180 * later.
5181 *
5182 * Initially, we consider the number of descriptors the
5183 * packet uses the number of DMA segments. This may be
5184 * incremented by 1 if we do checksum offload (a descriptor
5185 * is used to set the checksum context).
5186 */
5187 txs->txs_mbuf = m0;
5188 txs->txs_firstdesc = sc->sc_txnext;
5189 txs->txs_ndesc = segs_needed;
5190
5191 /* Set up offload parameters for this packet. */
5192 uint32_t cmdlen, fields, dcmdlen;
5193 if (m0->m_pkthdr.csum_flags &
5194 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5195 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5196 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5197 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5198 &do_csum) != 0) {
5199 /* Error message already displayed. */
5200 bus_dmamap_unload(sc->sc_dmat, dmamap);
5201 continue;
5202 }
5203 } else {
5204 do_csum = false;
5205 cmdlen = 0;
5206 fields = 0;
5207 }
5208
5209 /* Sync the DMA map. */
5210 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5211 BUS_DMASYNC_PREWRITE);
5212
5213 /* Initialize the first transmit descriptor. */
5214 nexttx = sc->sc_txnext;
5215 if (!do_csum) {
5216 /* setup a legacy descriptor */
5217 wm_set_dma_addr(
5218 &sc->sc_txdescs[nexttx].wtx_addr,
5219 dmamap->dm_segs[0].ds_addr);
5220 sc->sc_txdescs[nexttx].wtx_cmdlen =
5221 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5222 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5223 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5224 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5225 NULL) {
5226 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5227 htole32(WTX_CMD_VLE);
5228 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5229 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5230 } else {
5231 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5232 }
5233 dcmdlen = 0;
5234 } else {
5235 /* setup an advanced data descriptor */
5236 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5237 htole64(dmamap->dm_segs[0].ds_addr);
5238 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5239 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5240 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5241 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5242 htole32(fields);
5243 DPRINTF(WM_DEBUG_TX,
5244 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5245 device_xname(sc->sc_dev), nexttx,
5246 (uint64_t)dmamap->dm_segs[0].ds_addr));
5247 DPRINTF(WM_DEBUG_TX,
5248 ("\t 0x%08x%08x\n", fields,
5249 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5250 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5251 }
5252
5253 lasttx = nexttx;
5254 nexttx = WM_NEXTTX(sc, nexttx);
5255 /*
5256 * fill in the next descriptors. legacy or adcanced format
5257 * is the same here
5258 */
5259 for (seg = 1; seg < dmamap->dm_nsegs;
5260 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5261 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5262 htole64(dmamap->dm_segs[seg].ds_addr);
5263 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5264 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5265 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5266 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5267 lasttx = nexttx;
5268
5269 DPRINTF(WM_DEBUG_TX,
5270 ("%s: TX: desc %d: %#" PRIx64 ", "
5271 "len %#04zx\n",
5272 device_xname(sc->sc_dev), nexttx,
5273 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5274 dmamap->dm_segs[seg].ds_len));
5275 }
5276
5277 KASSERT(lasttx != -1);
5278
5279 /*
5280 * Set up the command byte on the last descriptor of
5281 * the packet. If we're in the interrupt delay window,
5282 * delay the interrupt.
5283 */
5284 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5285 (NQTX_CMD_EOP | NQTX_CMD_RS));
5286 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5287 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5288
5289 txs->txs_lastdesc = lasttx;
5290
5291 DPRINTF(WM_DEBUG_TX,
5292 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5293 device_xname(sc->sc_dev),
5294 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5295
5296 /* Sync the descriptors we're using. */
5297 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5298 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5299
5300 /* Give the packet to the chip. */
5301 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5302 sent = true;
5303
5304 DPRINTF(WM_DEBUG_TX,
5305 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5306
5307 DPRINTF(WM_DEBUG_TX,
5308 ("%s: TX: finished transmitting packet, job %d\n",
5309 device_xname(sc->sc_dev), sc->sc_txsnext));
5310
5311 /* Advance the tx pointer. */
5312 sc->sc_txfree -= txs->txs_ndesc;
5313 sc->sc_txnext = nexttx;
5314
5315 sc->sc_txsfree--;
5316 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5317
5318 /* Pass the packet to any BPF listeners. */
5319 bpf_mtap(ifp, m0);
5320 }
5321
5322 if (m0 != NULL) {
5323 ifp->if_flags |= IFF_OACTIVE;
5324 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5325 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5326 m_freem(m0);
5327 }
5328
5329 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5330 /* No more slots; notify upper layer. */
5331 ifp->if_flags |= IFF_OACTIVE;
5332 }
5333
5334 if (sent) {
5335 /* Set a watchdog timer in case the chip flakes out. */
5336 ifp->if_timer = 5;
5337 }
5338 }
5339
5340 /* Interrupt */
5341
5342 /*
5343 * wm_txintr:
5344 *
5345 * Helper; handle transmit interrupts.
5346 */
5347 static void
5348 wm_txintr(struct wm_softc *sc)
5349 {
5350 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5351 struct wm_txsoft *txs;
5352 uint8_t status;
5353 int i;
5354
5355 if (sc->sc_stopping)
5356 return;
5357
5358 ifp->if_flags &= ~IFF_OACTIVE;
5359
5360 /*
5361 * Go through the Tx list and free mbufs for those
5362 * frames which have been transmitted.
5363 */
5364 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5365 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5366 txs = &sc->sc_txsoft[i];
5367
5368 DPRINTF(WM_DEBUG_TX,
5369 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5370
5371 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5372 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5373
5374 status =
5375 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5376 if ((status & WTX_ST_DD) == 0) {
5377 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5378 BUS_DMASYNC_PREREAD);
5379 break;
5380 }
5381
5382 DPRINTF(WM_DEBUG_TX,
5383 ("%s: TX: job %d done: descs %d..%d\n",
5384 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5385 txs->txs_lastdesc));
5386
5387 /*
5388 * XXX We should probably be using the statistics
5389 * XXX registers, but I don't know if they exist
5390 * XXX on chips before the i82544.
5391 */
5392
5393 #ifdef WM_EVENT_COUNTERS
5394 if (status & WTX_ST_TU)
5395 WM_EVCNT_INCR(&sc->sc_ev_tu);
5396 #endif /* WM_EVENT_COUNTERS */
5397
5398 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5399 ifp->if_oerrors++;
5400 if (status & WTX_ST_LC)
5401 log(LOG_WARNING, "%s: late collision\n",
5402 device_xname(sc->sc_dev));
5403 else if (status & WTX_ST_EC) {
5404 ifp->if_collisions += 16;
5405 log(LOG_WARNING, "%s: excessive collisions\n",
5406 device_xname(sc->sc_dev));
5407 }
5408 } else
5409 ifp->if_opackets++;
5410
5411 sc->sc_txfree += txs->txs_ndesc;
5412 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5413 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5414 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5415 m_freem(txs->txs_mbuf);
5416 txs->txs_mbuf = NULL;
5417 }
5418
5419 /* Update the dirty transmit buffer pointer. */
5420 sc->sc_txsdirty = i;
5421 DPRINTF(WM_DEBUG_TX,
5422 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5423
5424 /*
5425 * If there are no more pending transmissions, cancel the watchdog
5426 * timer.
5427 */
5428 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5429 ifp->if_timer = 0;
5430 }
5431
5432 /*
5433 * wm_rxintr:
5434 *
5435 * Helper; handle receive interrupts.
5436 */
5437 static void
5438 wm_rxintr(struct wm_softc *sc)
5439 {
5440 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5441 struct wm_rxsoft *rxs;
5442 struct mbuf *m;
5443 int i, len;
5444 uint8_t status, errors;
5445 uint16_t vlantag;
5446
5447 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5448 rxs = &sc->sc_rxsoft[i];
5449
5450 DPRINTF(WM_DEBUG_RX,
5451 ("%s: RX: checking descriptor %d\n",
5452 device_xname(sc->sc_dev), i));
5453
5454 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5455
5456 status = sc->sc_rxdescs[i].wrx_status;
5457 errors = sc->sc_rxdescs[i].wrx_errors;
5458 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5459 vlantag = sc->sc_rxdescs[i].wrx_special;
5460
5461 if ((status & WRX_ST_DD) == 0) {
5462 /* We have processed all of the receive descriptors. */
5463 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5464 break;
5465 }
5466
5467 if (__predict_false(sc->sc_rxdiscard)) {
5468 DPRINTF(WM_DEBUG_RX,
5469 ("%s: RX: discarding contents of descriptor %d\n",
5470 device_xname(sc->sc_dev), i));
5471 WM_INIT_RXDESC(sc, i);
5472 if (status & WRX_ST_EOP) {
5473 /* Reset our state. */
5474 DPRINTF(WM_DEBUG_RX,
5475 ("%s: RX: resetting rxdiscard -> 0\n",
5476 device_xname(sc->sc_dev)));
5477 sc->sc_rxdiscard = 0;
5478 }
5479 continue;
5480 }
5481
5482 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5483 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5484
5485 m = rxs->rxs_mbuf;
5486
5487 /*
5488 * Add a new receive buffer to the ring, unless of
5489 * course the length is zero. Treat the latter as a
5490 * failed mapping.
5491 */
5492 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5493 /*
5494 * Failed, throw away what we've done so
5495 * far, and discard the rest of the packet.
5496 */
5497 ifp->if_ierrors++;
5498 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5499 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5500 WM_INIT_RXDESC(sc, i);
5501 if ((status & WRX_ST_EOP) == 0)
5502 sc->sc_rxdiscard = 1;
5503 if (sc->sc_rxhead != NULL)
5504 m_freem(sc->sc_rxhead);
5505 WM_RXCHAIN_RESET(sc);
5506 DPRINTF(WM_DEBUG_RX,
5507 ("%s: RX: Rx buffer allocation failed, "
5508 "dropping packet%s\n", device_xname(sc->sc_dev),
5509 sc->sc_rxdiscard ? " (discard)" : ""));
5510 continue;
5511 }
5512
5513 m->m_len = len;
5514 sc->sc_rxlen += len;
5515 DPRINTF(WM_DEBUG_RX,
5516 ("%s: RX: buffer at %p len %d\n",
5517 device_xname(sc->sc_dev), m->m_data, len));
5518
5519 /* If this is not the end of the packet, keep looking. */
5520 if ((status & WRX_ST_EOP) == 0) {
5521 WM_RXCHAIN_LINK(sc, m);
5522 DPRINTF(WM_DEBUG_RX,
5523 ("%s: RX: not yet EOP, rxlen -> %d\n",
5524 device_xname(sc->sc_dev), sc->sc_rxlen));
5525 continue;
5526 }
5527
5528 /*
5529 * Okay, we have the entire packet now. The chip is
5530 * configured to include the FCS except I350 and I21[01]
5531 * (not all chips can be configured to strip it),
5532 * so we need to trim it.
5533 * May need to adjust length of previous mbuf in the
5534 * chain if the current mbuf is too short.
5535 * For an eratta, the RCTL_SECRC bit in RCTL register
5536 * is always set in I350, so we don't trim it.
5537 */
5538 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5539 && (sc->sc_type != WM_T_I210)
5540 && (sc->sc_type != WM_T_I211)) {
5541 if (m->m_len < ETHER_CRC_LEN) {
5542 sc->sc_rxtail->m_len
5543 -= (ETHER_CRC_LEN - m->m_len);
5544 m->m_len = 0;
5545 } else
5546 m->m_len -= ETHER_CRC_LEN;
5547 len = sc->sc_rxlen - ETHER_CRC_LEN;
5548 } else
5549 len = sc->sc_rxlen;
5550
5551 WM_RXCHAIN_LINK(sc, m);
5552
5553 *sc->sc_rxtailp = NULL;
5554 m = sc->sc_rxhead;
5555
5556 WM_RXCHAIN_RESET(sc);
5557
5558 DPRINTF(WM_DEBUG_RX,
5559 ("%s: RX: have entire packet, len -> %d\n",
5560 device_xname(sc->sc_dev), len));
5561
5562 /* If an error occurred, update stats and drop the packet. */
5563 if (errors &
5564 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5565 if (errors & WRX_ER_SE)
5566 log(LOG_WARNING, "%s: symbol error\n",
5567 device_xname(sc->sc_dev));
5568 else if (errors & WRX_ER_SEQ)
5569 log(LOG_WARNING, "%s: receive sequence error\n",
5570 device_xname(sc->sc_dev));
5571 else if (errors & WRX_ER_CE)
5572 log(LOG_WARNING, "%s: CRC error\n",
5573 device_xname(sc->sc_dev));
5574 m_freem(m);
5575 continue;
5576 }
5577
5578 /* No errors. Receive the packet. */
5579 m->m_pkthdr.rcvif = ifp;
5580 m->m_pkthdr.len = len;
5581
5582 /*
5583 * If VLANs are enabled, VLAN packets have been unwrapped
5584 * for us. Associate the tag with the packet.
5585 */
5586 /* XXXX should check for i350 and i354 */
5587 if ((status & WRX_ST_VP) != 0) {
5588 VLAN_INPUT_TAG(ifp, m,
5589 le16toh(vlantag),
5590 continue);
5591 }
5592
5593 /* Set up checksum info for this packet. */
5594 if ((status & WRX_ST_IXSM) == 0) {
5595 if (status & WRX_ST_IPCS) {
5596 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5597 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5598 if (errors & WRX_ER_IPE)
5599 m->m_pkthdr.csum_flags |=
5600 M_CSUM_IPv4_BAD;
5601 }
5602 if (status & WRX_ST_TCPCS) {
5603 /*
5604 * Note: we don't know if this was TCP or UDP,
5605 * so we just set both bits, and expect the
5606 * upper layers to deal.
5607 */
5608 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5609 m->m_pkthdr.csum_flags |=
5610 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5611 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5612 if (errors & WRX_ER_TCPE)
5613 m->m_pkthdr.csum_flags |=
5614 M_CSUM_TCP_UDP_BAD;
5615 }
5616 }
5617
5618 ifp->if_ipackets++;
5619
5620 WM_RX_UNLOCK(sc);
5621
5622 /* Pass this up to any BPF listeners. */
5623 bpf_mtap(ifp, m);
5624
5625 /* Pass it on. */
5626 (*ifp->if_input)(ifp, m);
5627
5628 WM_RX_LOCK(sc);
5629
5630 if (sc->sc_stopping)
5631 break;
5632 }
5633
5634 /* Update the receive pointer. */
5635 sc->sc_rxptr = i;
5636
5637 DPRINTF(WM_DEBUG_RX,
5638 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5639 }
5640
5641 /*
5642 * wm_linkintr_gmii:
5643 *
5644 * Helper; handle link interrupts for GMII.
5645 */
5646 static void
5647 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5648 {
5649
5650 KASSERT(WM_TX_LOCKED(sc));
5651
5652 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5653 __func__));
5654
5655 if (icr & ICR_LSC) {
5656 DPRINTF(WM_DEBUG_LINK,
5657 ("%s: LINK: LSC -> mii_pollstat\n",
5658 device_xname(sc->sc_dev)));
5659 mii_pollstat(&sc->sc_mii);
5660 if (sc->sc_type == WM_T_82543) {
5661 int miistatus, active;
5662
5663 /*
5664 * With 82543, we need to force speed and
5665 * duplex on the MAC equal to what the PHY
5666 * speed and duplex configuration is.
5667 */
5668 miistatus = sc->sc_mii.mii_media_status;
5669
5670 if (miistatus & IFM_ACTIVE) {
5671 active = sc->sc_mii.mii_media_active;
5672 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5673 switch (IFM_SUBTYPE(active)) {
5674 case IFM_10_T:
5675 sc->sc_ctrl |= CTRL_SPEED_10;
5676 break;
5677 case IFM_100_TX:
5678 sc->sc_ctrl |= CTRL_SPEED_100;
5679 break;
5680 case IFM_1000_T:
5681 sc->sc_ctrl |= CTRL_SPEED_1000;
5682 break;
5683 default:
5684 /*
5685 * fiber?
5686 * Shoud not enter here.
5687 */
5688 printf("unknown media (%x)\n",
5689 active);
5690 break;
5691 }
5692 if (active & IFM_FDX)
5693 sc->sc_ctrl |= CTRL_FD;
5694 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5695 }
5696 } else if ((sc->sc_type == WM_T_ICH8)
5697 && (sc->sc_phytype == WMPHY_IGP_3)) {
5698 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5699 } else if (sc->sc_type == WM_T_PCH) {
5700 wm_k1_gig_workaround_hv(sc,
5701 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5702 }
5703
5704 if ((sc->sc_phytype == WMPHY_82578)
5705 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5706 == IFM_1000_T)) {
5707
5708 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5709 delay(200*1000); /* XXX too big */
5710
5711 /* Link stall fix for link up */
5712 wm_gmii_hv_writereg(sc->sc_dev, 1,
5713 HV_MUX_DATA_CTRL,
5714 HV_MUX_DATA_CTRL_GEN_TO_MAC
5715 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5716 wm_gmii_hv_writereg(sc->sc_dev, 1,
5717 HV_MUX_DATA_CTRL,
5718 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5719 }
5720 }
5721 } else if (icr & ICR_RXSEQ) {
5722 DPRINTF(WM_DEBUG_LINK,
5723 ("%s: LINK Receive sequence error\n",
5724 device_xname(sc->sc_dev)));
5725 }
5726 }
5727
5728 /*
5729 * wm_linkintr_tbi:
5730 *
5731 * Helper; handle link interrupts for TBI mode.
5732 */
5733 static void
5734 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5735 {
5736 uint32_t status;
5737
5738 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5739 __func__));
5740
5741 status = CSR_READ(sc, WMREG_STATUS);
5742 if (icr & ICR_LSC) {
5743 if (status & STATUS_LU) {
5744 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5745 device_xname(sc->sc_dev),
5746 (status & STATUS_FD) ? "FDX" : "HDX"));
5747 /*
5748 * NOTE: CTRL will update TFCE and RFCE automatically,
5749 * so we should update sc->sc_ctrl
5750 */
5751
5752 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5753 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5754 sc->sc_fcrtl &= ~FCRTL_XONE;
5755 if (status & STATUS_FD)
5756 sc->sc_tctl |=
5757 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5758 else
5759 sc->sc_tctl |=
5760 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5761 if (sc->sc_ctrl & CTRL_TFCE)
5762 sc->sc_fcrtl |= FCRTL_XONE;
5763 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5764 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5765 WMREG_OLD_FCRTL : WMREG_FCRTL,
5766 sc->sc_fcrtl);
5767 sc->sc_tbi_linkup = 1;
5768 } else {
5769 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5770 device_xname(sc->sc_dev)));
5771 sc->sc_tbi_linkup = 0;
5772 }
5773 wm_tbi_set_linkled(sc);
5774 } else if (icr & ICR_RXSEQ) {
5775 DPRINTF(WM_DEBUG_LINK,
5776 ("%s: LINK: Receive sequence error\n",
5777 device_xname(sc->sc_dev)));
5778 }
5779 }
5780
5781 /*
5782 * wm_linkintr:
5783 *
5784 * Helper; handle link interrupts.
5785 */
5786 static void
5787 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5788 {
5789
5790 if (sc->sc_flags & WM_F_HAS_MII)
5791 wm_linkintr_gmii(sc, icr);
5792 else
5793 wm_linkintr_tbi(sc, icr);
5794 }
5795
5796 /*
5797 * wm_intr:
5798 *
5799 * Interrupt service routine.
5800 */
5801 static int
5802 wm_intr(void *arg)
5803 {
5804 struct wm_softc *sc = arg;
5805 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5806 uint32_t icr;
5807 int handled = 0;
5808
5809 while (1 /* CONSTCOND */) {
5810 icr = CSR_READ(sc, WMREG_ICR);
5811 if ((icr & sc->sc_icr) == 0)
5812 break;
5813 rnd_add_uint32(&sc->rnd_source, icr);
5814
5815 WM_RX_LOCK(sc);
5816
5817 if (sc->sc_stopping) {
5818 WM_RX_UNLOCK(sc);
5819 break;
5820 }
5821
5822 handled = 1;
5823
5824 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5825 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5826 DPRINTF(WM_DEBUG_RX,
5827 ("%s: RX: got Rx intr 0x%08x\n",
5828 device_xname(sc->sc_dev),
5829 icr & (ICR_RXDMT0|ICR_RXT0)));
5830 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5831 }
5832 #endif
5833 wm_rxintr(sc);
5834
5835 WM_RX_UNLOCK(sc);
5836 WM_TX_LOCK(sc);
5837
5838 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5839 if (icr & ICR_TXDW) {
5840 DPRINTF(WM_DEBUG_TX,
5841 ("%s: TX: got TXDW interrupt\n",
5842 device_xname(sc->sc_dev)));
5843 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5844 }
5845 #endif
5846 wm_txintr(sc);
5847
5848 if (icr & (ICR_LSC|ICR_RXSEQ)) {
5849 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5850 wm_linkintr(sc, icr);
5851 }
5852
5853 WM_TX_UNLOCK(sc);
5854
5855 if (icr & ICR_RXO) {
5856 #if defined(WM_DEBUG)
5857 log(LOG_WARNING, "%s: Receive overrun\n",
5858 device_xname(sc->sc_dev));
5859 #endif /* defined(WM_DEBUG) */
5860 }
5861 }
5862
5863 if (handled) {
5864 /* Try to get more packets going. */
5865 ifp->if_start(ifp);
5866 }
5867
5868 return handled;
5869 }
5870
5871 /*
5872 * Media related.
5873 * GMII, SGMII, TBI (and SERDES)
5874 */
5875
5876 /* GMII related */
5877
5878 /*
5879 * wm_gmii_reset:
5880 *
5881 * Reset the PHY.
5882 */
5883 static void
5884 wm_gmii_reset(struct wm_softc *sc)
5885 {
5886 uint32_t reg;
5887 int rv;
5888
5889 /* get phy semaphore */
5890 switch (sc->sc_type) {
5891 case WM_T_82571:
5892 case WM_T_82572:
5893 case WM_T_82573:
5894 case WM_T_82574:
5895 case WM_T_82583:
5896 /* XXX should get sw semaphore, too */
5897 rv = wm_get_swsm_semaphore(sc);
5898 break;
5899 case WM_T_82575:
5900 case WM_T_82576:
5901 case WM_T_82580:
5902 case WM_T_I350:
5903 case WM_T_I354:
5904 case WM_T_I210:
5905 case WM_T_I211:
5906 case WM_T_80003:
5907 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5908 break;
5909 case WM_T_ICH8:
5910 case WM_T_ICH9:
5911 case WM_T_ICH10:
5912 case WM_T_PCH:
5913 case WM_T_PCH2:
5914 case WM_T_PCH_LPT:
5915 rv = wm_get_swfwhw_semaphore(sc);
5916 break;
5917 default:
5918 /* nothing to do*/
5919 rv = 0;
5920 break;
5921 }
5922 if (rv != 0) {
5923 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5924 __func__);
5925 return;
5926 }
5927
5928 switch (sc->sc_type) {
5929 case WM_T_82542_2_0:
5930 case WM_T_82542_2_1:
5931 /* null */
5932 break;
5933 case WM_T_82543:
5934 /*
5935 * With 82543, we need to force speed and duplex on the MAC
5936 * equal to what the PHY speed and duplex configuration is.
5937 * In addition, we need to perform a hardware reset on the PHY
5938 * to take it out of reset.
5939 */
5940 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5941 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5942
5943 /* The PHY reset pin is active-low. */
5944 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5945 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5946 CTRL_EXT_SWDPIN(4));
5947 reg |= CTRL_EXT_SWDPIO(4);
5948
5949 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5950 CSR_WRITE_FLUSH(sc);
5951 delay(10*1000);
5952
5953 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5954 CSR_WRITE_FLUSH(sc);
5955 delay(150);
5956 #if 0
5957 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5958 #endif
5959 delay(20*1000); /* XXX extra delay to get PHY ID? */
5960 break;
5961 case WM_T_82544: /* reset 10000us */
5962 case WM_T_82540:
5963 case WM_T_82545:
5964 case WM_T_82545_3:
5965 case WM_T_82546:
5966 case WM_T_82546_3:
5967 case WM_T_82541:
5968 case WM_T_82541_2:
5969 case WM_T_82547:
5970 case WM_T_82547_2:
5971 case WM_T_82571: /* reset 100us */
5972 case WM_T_82572:
5973 case WM_T_82573:
5974 case WM_T_82574:
5975 case WM_T_82575:
5976 case WM_T_82576:
5977 case WM_T_82580:
5978 case WM_T_I350:
5979 case WM_T_I354:
5980 case WM_T_I210:
5981 case WM_T_I211:
5982 case WM_T_82583:
5983 case WM_T_80003:
5984 /* generic reset */
5985 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5986 CSR_WRITE_FLUSH(sc);
5987 delay(20000);
5988 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5989 CSR_WRITE_FLUSH(sc);
5990 delay(20000);
5991
5992 if ((sc->sc_type == WM_T_82541)
5993 || (sc->sc_type == WM_T_82541_2)
5994 || (sc->sc_type == WM_T_82547)
5995 || (sc->sc_type == WM_T_82547_2)) {
5996 /* workaround for igp are done in igp_reset() */
5997 /* XXX add code to set LED after phy reset */
5998 }
5999 break;
6000 case WM_T_ICH8:
6001 case WM_T_ICH9:
6002 case WM_T_ICH10:
6003 case WM_T_PCH:
6004 case WM_T_PCH2:
6005 case WM_T_PCH_LPT:
6006 /* generic reset */
6007 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6008 CSR_WRITE_FLUSH(sc);
6009 delay(100);
6010 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6011 CSR_WRITE_FLUSH(sc);
6012 delay(150);
6013 break;
6014 default:
6015 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6016 __func__);
6017 break;
6018 }
6019
6020 /* release PHY semaphore */
6021 switch (sc->sc_type) {
6022 case WM_T_82571:
6023 case WM_T_82572:
6024 case WM_T_82573:
6025 case WM_T_82574:
6026 case WM_T_82583:
6027 /* XXX should put sw semaphore, too */
6028 wm_put_swsm_semaphore(sc);
6029 break;
6030 case WM_T_82575:
6031 case WM_T_82576:
6032 case WM_T_82580:
6033 case WM_T_I350:
6034 case WM_T_I354:
6035 case WM_T_I210:
6036 case WM_T_I211:
6037 case WM_T_80003:
6038 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6039 break;
6040 case WM_T_ICH8:
6041 case WM_T_ICH9:
6042 case WM_T_ICH10:
6043 case WM_T_PCH:
6044 case WM_T_PCH2:
6045 case WM_T_PCH_LPT:
6046 wm_put_swfwhw_semaphore(sc);
6047 break;
6048 default:
6049 /* nothing to do*/
6050 rv = 0;
6051 break;
6052 }
6053
6054 /* get_cfg_done */
6055 wm_get_cfg_done(sc);
6056
6057 /* extra setup */
6058 switch (sc->sc_type) {
6059 case WM_T_82542_2_0:
6060 case WM_T_82542_2_1:
6061 case WM_T_82543:
6062 case WM_T_82544:
6063 case WM_T_82540:
6064 case WM_T_82545:
6065 case WM_T_82545_3:
6066 case WM_T_82546:
6067 case WM_T_82546_3:
6068 case WM_T_82541_2:
6069 case WM_T_82547_2:
6070 case WM_T_82571:
6071 case WM_T_82572:
6072 case WM_T_82573:
6073 case WM_T_82574:
6074 case WM_T_82575:
6075 case WM_T_82576:
6076 case WM_T_82580:
6077 case WM_T_I350:
6078 case WM_T_I354:
6079 case WM_T_I210:
6080 case WM_T_I211:
6081 case WM_T_82583:
6082 case WM_T_80003:
6083 /* null */
6084 break;
6085 case WM_T_82541:
6086 case WM_T_82547:
6087 /* XXX Configure actively LED after PHY reset */
6088 break;
6089 case WM_T_ICH8:
6090 case WM_T_ICH9:
6091 case WM_T_ICH10:
6092 case WM_T_PCH:
6093 case WM_T_PCH2:
6094 case WM_T_PCH_LPT:
6095 /* Allow time for h/w to get to a quiescent state afer reset */
6096 delay(10*1000);
6097
6098 if (sc->sc_type == WM_T_PCH)
6099 wm_hv_phy_workaround_ich8lan(sc);
6100
6101 if (sc->sc_type == WM_T_PCH2)
6102 wm_lv_phy_workaround_ich8lan(sc);
6103
6104 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6105 /*
6106 * dummy read to clear the phy wakeup bit after lcd
6107 * reset
6108 */
6109 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6110 }
6111
6112 /*
6113 * XXX Configure the LCD with th extended configuration region
6114 * in NVM
6115 */
6116
6117 /* Configure the LCD with the OEM bits in NVM */
6118 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6119 || (sc->sc_type == WM_T_PCH_LPT)) {
6120 /*
6121 * Disable LPLU.
6122 * XXX It seems that 82567 has LPLU, too.
6123 */
6124 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6125 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6126 reg |= HV_OEM_BITS_ANEGNOW;
6127 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6128 }
6129 break;
6130 default:
6131 panic("%s: unknown type\n", __func__);
6132 break;
6133 }
6134 }
6135
6136 /*
6137 * wm_get_phy_id_82575:
6138 *
6139 * Return PHY ID. Return -1 if it failed.
6140 */
6141 static int
6142 wm_get_phy_id_82575(struct wm_softc *sc)
6143 {
6144 uint32_t reg;
6145 int phyid = -1;
6146
6147 /* XXX */
6148 if ((sc->sc_flags & WM_F_SGMII) == 0)
6149 return -1;
6150
6151 if (wm_sgmii_uses_mdio(sc)) {
6152 switch (sc->sc_type) {
6153 case WM_T_82575:
6154 case WM_T_82576:
6155 reg = CSR_READ(sc, WMREG_MDIC);
6156 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6157 break;
6158 case WM_T_82580:
6159 case WM_T_I350:
6160 case WM_T_I354:
6161 case WM_T_I210:
6162 case WM_T_I211:
6163 reg = CSR_READ(sc, WMREG_MDICNFG);
6164 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6165 break;
6166 default:
6167 return -1;
6168 }
6169 }
6170
6171 return phyid;
6172 }
6173
6174
6175 /*
6176 * wm_gmii_mediainit:
6177 *
6178 * Initialize media for use on 1000BASE-T devices.
6179 */
6180 static void
6181 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6182 {
6183 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6184 struct mii_data *mii = &sc->sc_mii;
6185 uint32_t reg;
6186
6187 /* We have GMII. */
6188 sc->sc_flags |= WM_F_HAS_MII;
6189
6190 if (sc->sc_type == WM_T_80003)
6191 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6192 else
6193 sc->sc_tipg = TIPG_1000T_DFLT;
6194
6195 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6196 if ((sc->sc_type == WM_T_82580)
6197 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6198 || (sc->sc_type == WM_T_I211)) {
6199 reg = CSR_READ(sc, WMREG_PHPM);
6200 reg &= ~PHPM_GO_LINK_D;
6201 CSR_WRITE(sc, WMREG_PHPM, reg);
6202 }
6203
6204 /*
6205 * Let the chip set speed/duplex on its own based on
6206 * signals from the PHY.
6207 * XXXbouyer - I'm not sure this is right for the 80003,
6208 * the em driver only sets CTRL_SLU here - but it seems to work.
6209 */
6210 sc->sc_ctrl |= CTRL_SLU;
6211 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6212
6213 /* Initialize our media structures and probe the GMII. */
6214 mii->mii_ifp = ifp;
6215
6216 /*
6217 * Determine the PHY access method.
6218 *
6219 * For SGMII, use SGMII specific method.
6220 *
6221 * For some devices, we can determine the PHY access method
6222 * from sc_type.
6223 *
6224 * For ICH8 variants, it's difficult to determine the PHY access
6225 * method by sc_type, so use the PCI product ID for some devices.
6226 * For other ICH8 variants, try to use igp's method. If the PHY
6227 * can't detect, then use bm's method.
6228 */
6229 switch (prodid) {
6230 case PCI_PRODUCT_INTEL_PCH_M_LM:
6231 case PCI_PRODUCT_INTEL_PCH_M_LC:
6232 /* 82577 */
6233 sc->sc_phytype = WMPHY_82577;
6234 mii->mii_readreg = wm_gmii_hv_readreg;
6235 mii->mii_writereg = wm_gmii_hv_writereg;
6236 break;
6237 case PCI_PRODUCT_INTEL_PCH_D_DM:
6238 case PCI_PRODUCT_INTEL_PCH_D_DC:
6239 /* 82578 */
6240 sc->sc_phytype = WMPHY_82578;
6241 mii->mii_readreg = wm_gmii_hv_readreg;
6242 mii->mii_writereg = wm_gmii_hv_writereg;
6243 break;
6244 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6245 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6246 /* 82579 */
6247 sc->sc_phytype = WMPHY_82579;
6248 mii->mii_readreg = wm_gmii_hv_readreg;
6249 mii->mii_writereg = wm_gmii_hv_writereg;
6250 break;
6251 case PCI_PRODUCT_INTEL_I217_LM:
6252 case PCI_PRODUCT_INTEL_I217_V:
6253 case PCI_PRODUCT_INTEL_I218_LM:
6254 case PCI_PRODUCT_INTEL_I218_V:
6255 /* I21[78] */
6256 mii->mii_readreg = wm_gmii_hv_readreg;
6257 mii->mii_writereg = wm_gmii_hv_writereg;
6258 break;
6259 case PCI_PRODUCT_INTEL_82801I_BM:
6260 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6261 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6262 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6263 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6264 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6265 /* 82567 */
6266 sc->sc_phytype = WMPHY_BM;
6267 mii->mii_readreg = wm_gmii_bm_readreg;
6268 mii->mii_writereg = wm_gmii_bm_writereg;
6269 break;
6270 default:
6271 if (((sc->sc_flags & WM_F_SGMII) != 0)
6272 && !wm_sgmii_uses_mdio(sc)){
6273 mii->mii_readreg = wm_sgmii_readreg;
6274 mii->mii_writereg = wm_sgmii_writereg;
6275 } else if (sc->sc_type >= WM_T_80003) {
6276 mii->mii_readreg = wm_gmii_i80003_readreg;
6277 mii->mii_writereg = wm_gmii_i80003_writereg;
6278 } else if (sc->sc_type >= WM_T_I210) {
6279 mii->mii_readreg = wm_gmii_i82544_readreg;
6280 mii->mii_writereg = wm_gmii_i82544_writereg;
6281 } else if (sc->sc_type >= WM_T_82580) {
6282 sc->sc_phytype = WMPHY_82580;
6283 mii->mii_readreg = wm_gmii_82580_readreg;
6284 mii->mii_writereg = wm_gmii_82580_writereg;
6285 } else if (sc->sc_type >= WM_T_82544) {
6286 mii->mii_readreg = wm_gmii_i82544_readreg;
6287 mii->mii_writereg = wm_gmii_i82544_writereg;
6288 } else {
6289 mii->mii_readreg = wm_gmii_i82543_readreg;
6290 mii->mii_writereg = wm_gmii_i82543_writereg;
6291 }
6292 break;
6293 }
6294 mii->mii_statchg = wm_gmii_statchg;
6295
6296 wm_gmii_reset(sc);
6297
6298 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6299 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6300 wm_gmii_mediastatus);
6301
6302 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6303 || (sc->sc_type == WM_T_82580)
6304 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6305 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6306 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6307 /* Attach only one port */
6308 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6309 MII_OFFSET_ANY, MIIF_DOPAUSE);
6310 } else {
6311 int i, id;
6312 uint32_t ctrl_ext;
6313
6314 id = wm_get_phy_id_82575(sc);
6315 if (id != -1) {
6316 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6317 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6318 }
6319 if ((id == -1)
6320 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6321 /* Power on sgmii phy if it is disabled */
6322 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6323 CSR_WRITE(sc, WMREG_CTRL_EXT,
6324 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6325 CSR_WRITE_FLUSH(sc);
6326 delay(300*1000); /* XXX too long */
6327
6328 /* from 1 to 8 */
6329 for (i = 1; i < 8; i++)
6330 mii_attach(sc->sc_dev, &sc->sc_mii,
6331 0xffffffff, i, MII_OFFSET_ANY,
6332 MIIF_DOPAUSE);
6333
6334 /* restore previous sfp cage power state */
6335 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6336 }
6337 }
6338 } else {
6339 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6340 MII_OFFSET_ANY, MIIF_DOPAUSE);
6341 }
6342
6343 /*
6344 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6345 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6346 */
6347 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6348 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6349 wm_set_mdio_slow_mode_hv(sc);
6350 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6351 MII_OFFSET_ANY, MIIF_DOPAUSE);
6352 }
6353
6354 /*
6355 * (For ICH8 variants)
6356 * If PHY detection failed, use BM's r/w function and retry.
6357 */
6358 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6359 /* if failed, retry with *_bm_* */
6360 mii->mii_readreg = wm_gmii_bm_readreg;
6361 mii->mii_writereg = wm_gmii_bm_writereg;
6362
6363 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6364 MII_OFFSET_ANY, MIIF_DOPAUSE);
6365 }
6366
6367 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6368 /* Any PHY wasn't find */
6369 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6370 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6371 sc->sc_phytype = WMPHY_NONE;
6372 } else {
6373 /*
6374 * PHY Found!
6375 * Check PHY type.
6376 */
6377 uint32_t model;
6378 struct mii_softc *child;
6379
6380 child = LIST_FIRST(&mii->mii_phys);
6381 if (device_is_a(child->mii_dev, "igphy")) {
6382 struct igphy_softc *isc = (struct igphy_softc *)child;
6383
6384 model = isc->sc_mii.mii_mpd_model;
6385 if (model == MII_MODEL_yyINTEL_I82566)
6386 sc->sc_phytype = WMPHY_IGP_3;
6387 }
6388
6389 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6390 }
6391 }
6392
6393 /*
6394 * wm_gmii_mediastatus: [ifmedia interface function]
6395 *
6396 * Get the current interface media status on a 1000BASE-T device.
6397 */
6398 static void
6399 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6400 {
6401 struct wm_softc *sc = ifp->if_softc;
6402
6403 ether_mediastatus(ifp, ifmr);
6404 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6405 | sc->sc_flowflags;
6406 }
6407
6408 /*
6409 * wm_gmii_mediachange: [ifmedia interface function]
6410 *
6411 * Set hardware to newly-selected media on a 1000BASE-T device.
6412 */
6413 static int
6414 wm_gmii_mediachange(struct ifnet *ifp)
6415 {
6416 struct wm_softc *sc = ifp->if_softc;
6417 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6418 int rc;
6419
6420 if ((ifp->if_flags & IFF_UP) == 0)
6421 return 0;
6422
6423 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6424 sc->sc_ctrl |= CTRL_SLU;
6425 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6426 || (sc->sc_type > WM_T_82543)) {
6427 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6428 } else {
6429 sc->sc_ctrl &= ~CTRL_ASDE;
6430 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6431 if (ife->ifm_media & IFM_FDX)
6432 sc->sc_ctrl |= CTRL_FD;
6433 switch (IFM_SUBTYPE(ife->ifm_media)) {
6434 case IFM_10_T:
6435 sc->sc_ctrl |= CTRL_SPEED_10;
6436 break;
6437 case IFM_100_TX:
6438 sc->sc_ctrl |= CTRL_SPEED_100;
6439 break;
6440 case IFM_1000_T:
6441 sc->sc_ctrl |= CTRL_SPEED_1000;
6442 break;
6443 default:
6444 panic("wm_gmii_mediachange: bad media 0x%x",
6445 ife->ifm_media);
6446 }
6447 }
6448 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6449 if (sc->sc_type <= WM_T_82543)
6450 wm_gmii_reset(sc);
6451
6452 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6453 return 0;
6454 return rc;
6455 }
6456
6457 #define MDI_IO CTRL_SWDPIN(2)
6458 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6459 #define MDI_CLK CTRL_SWDPIN(3)
6460
6461 static void
6462 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6463 {
6464 uint32_t i, v;
6465
6466 v = CSR_READ(sc, WMREG_CTRL);
6467 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6468 v |= MDI_DIR | CTRL_SWDPIO(3);
6469
6470 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6471 if (data & i)
6472 v |= MDI_IO;
6473 else
6474 v &= ~MDI_IO;
6475 CSR_WRITE(sc, WMREG_CTRL, v);
6476 CSR_WRITE_FLUSH(sc);
6477 delay(10);
6478 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6479 CSR_WRITE_FLUSH(sc);
6480 delay(10);
6481 CSR_WRITE(sc, WMREG_CTRL, v);
6482 CSR_WRITE_FLUSH(sc);
6483 delay(10);
6484 }
6485 }
6486
6487 static uint32_t
6488 wm_i82543_mii_recvbits(struct wm_softc *sc)
6489 {
6490 uint32_t v, i, data = 0;
6491
6492 v = CSR_READ(sc, WMREG_CTRL);
6493 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6494 v |= CTRL_SWDPIO(3);
6495
6496 CSR_WRITE(sc, WMREG_CTRL, v);
6497 CSR_WRITE_FLUSH(sc);
6498 delay(10);
6499 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6500 CSR_WRITE_FLUSH(sc);
6501 delay(10);
6502 CSR_WRITE(sc, WMREG_CTRL, v);
6503 CSR_WRITE_FLUSH(sc);
6504 delay(10);
6505
6506 for (i = 0; i < 16; i++) {
6507 data <<= 1;
6508 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6509 CSR_WRITE_FLUSH(sc);
6510 delay(10);
6511 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6512 data |= 1;
6513 CSR_WRITE(sc, WMREG_CTRL, v);
6514 CSR_WRITE_FLUSH(sc);
6515 delay(10);
6516 }
6517
6518 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6519 CSR_WRITE_FLUSH(sc);
6520 delay(10);
6521 CSR_WRITE(sc, WMREG_CTRL, v);
6522 CSR_WRITE_FLUSH(sc);
6523 delay(10);
6524
6525 return data;
6526 }
6527
6528 #undef MDI_IO
6529 #undef MDI_DIR
6530 #undef MDI_CLK
6531
6532 /*
6533 * wm_gmii_i82543_readreg: [mii interface function]
6534 *
6535 * Read a PHY register on the GMII (i82543 version).
6536 */
6537 static int
6538 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6539 {
6540 struct wm_softc *sc = device_private(self);
6541 int rv;
6542
6543 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6544 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6545 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6546 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6547
6548 DPRINTF(WM_DEBUG_GMII,
6549 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6550 device_xname(sc->sc_dev), phy, reg, rv));
6551
6552 return rv;
6553 }
6554
6555 /*
6556 * wm_gmii_i82543_writereg: [mii interface function]
6557 *
6558 * Write a PHY register on the GMII (i82543 version).
6559 */
6560 static void
6561 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6562 {
6563 struct wm_softc *sc = device_private(self);
6564
6565 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6566 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6567 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6568 (MII_COMMAND_START << 30), 32);
6569 }
6570
6571 /*
6572 * wm_gmii_i82544_readreg: [mii interface function]
6573 *
6574 * Read a PHY register on the GMII.
6575 */
6576 static int
6577 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6578 {
6579 struct wm_softc *sc = device_private(self);
6580 uint32_t mdic = 0;
6581 int i, rv;
6582
6583 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6584 MDIC_REGADD(reg));
6585
6586 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6587 mdic = CSR_READ(sc, WMREG_MDIC);
6588 if (mdic & MDIC_READY)
6589 break;
6590 delay(50);
6591 }
6592
6593 if ((mdic & MDIC_READY) == 0) {
6594 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6595 device_xname(sc->sc_dev), phy, reg);
6596 rv = 0;
6597 } else if (mdic & MDIC_E) {
6598 #if 0 /* This is normal if no PHY is present. */
6599 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6600 device_xname(sc->sc_dev), phy, reg);
6601 #endif
6602 rv = 0;
6603 } else {
6604 rv = MDIC_DATA(mdic);
6605 if (rv == 0xffff)
6606 rv = 0;
6607 }
6608
6609 return rv;
6610 }
6611
6612 /*
6613 * wm_gmii_i82544_writereg: [mii interface function]
6614 *
6615 * Write a PHY register on the GMII.
6616 */
6617 static void
6618 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6619 {
6620 struct wm_softc *sc = device_private(self);
6621 uint32_t mdic = 0;
6622 int i;
6623
6624 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6625 MDIC_REGADD(reg) | MDIC_DATA(val));
6626
6627 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6628 mdic = CSR_READ(sc, WMREG_MDIC);
6629 if (mdic & MDIC_READY)
6630 break;
6631 delay(50);
6632 }
6633
6634 if ((mdic & MDIC_READY) == 0)
6635 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6636 device_xname(sc->sc_dev), phy, reg);
6637 else if (mdic & MDIC_E)
6638 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6639 device_xname(sc->sc_dev), phy, reg);
6640 }
6641
6642 /*
6643 * wm_gmii_i80003_readreg: [mii interface function]
6644 *
6645 * Read a PHY register on the kumeran
6646 * This could be handled by the PHY layer if we didn't have to lock the
6647 * ressource ...
6648 */
6649 static int
6650 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6651 {
6652 struct wm_softc *sc = device_private(self);
6653 int sem;
6654 int rv;
6655
6656 if (phy != 1) /* only one PHY on kumeran bus */
6657 return 0;
6658
6659 sem = swfwphysem[sc->sc_funcid];
6660 if (wm_get_swfw_semaphore(sc, sem)) {
6661 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6662 __func__);
6663 return 0;
6664 }
6665
6666 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6667 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6668 reg >> GG82563_PAGE_SHIFT);
6669 } else {
6670 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6671 reg >> GG82563_PAGE_SHIFT);
6672 }
6673 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6674 delay(200);
6675 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6676 delay(200);
6677
6678 wm_put_swfw_semaphore(sc, sem);
6679 return rv;
6680 }
6681
6682 /*
6683 * wm_gmii_i80003_writereg: [mii interface function]
6684 *
6685 * Write a PHY register on the kumeran.
6686 * This could be handled by the PHY layer if we didn't have to lock the
6687 * ressource ...
6688 */
6689 static void
6690 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6691 {
6692 struct wm_softc *sc = device_private(self);
6693 int sem;
6694
6695 if (phy != 1) /* only one PHY on kumeran bus */
6696 return;
6697
6698 sem = swfwphysem[sc->sc_funcid];
6699 if (wm_get_swfw_semaphore(sc, sem)) {
6700 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6701 __func__);
6702 return;
6703 }
6704
6705 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6706 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6707 reg >> GG82563_PAGE_SHIFT);
6708 } else {
6709 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6710 reg >> GG82563_PAGE_SHIFT);
6711 }
6712 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6713 delay(200);
6714 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6715 delay(200);
6716
6717 wm_put_swfw_semaphore(sc, sem);
6718 }
6719
6720 /*
6721 * wm_gmii_bm_readreg: [mii interface function]
6722 *
6723 * Read a PHY register on the kumeran
6724 * This could be handled by the PHY layer if we didn't have to lock the
6725 * ressource ...
6726 */
6727 static int
6728 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6729 {
6730 struct wm_softc *sc = device_private(self);
6731 int sem;
6732 int rv;
6733
6734 sem = swfwphysem[sc->sc_funcid];
6735 if (wm_get_swfw_semaphore(sc, sem)) {
6736 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6737 __func__);
6738 return 0;
6739 }
6740
6741 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6742 if (phy == 1)
6743 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6744 reg);
6745 else
6746 wm_gmii_i82544_writereg(self, phy,
6747 GG82563_PHY_PAGE_SELECT,
6748 reg >> GG82563_PAGE_SHIFT);
6749 }
6750
6751 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6752 wm_put_swfw_semaphore(sc, sem);
6753 return rv;
6754 }
6755
6756 /*
6757 * wm_gmii_bm_writereg: [mii interface function]
6758 *
6759 * Write a PHY register on the kumeran.
6760 * This could be handled by the PHY layer if we didn't have to lock the
6761 * ressource ...
6762 */
6763 static void
6764 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6765 {
6766 struct wm_softc *sc = device_private(self);
6767 int sem;
6768
6769 sem = swfwphysem[sc->sc_funcid];
6770 if (wm_get_swfw_semaphore(sc, sem)) {
6771 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6772 __func__);
6773 return;
6774 }
6775
6776 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6777 if (phy == 1)
6778 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6779 reg);
6780 else
6781 wm_gmii_i82544_writereg(self, phy,
6782 GG82563_PHY_PAGE_SELECT,
6783 reg >> GG82563_PAGE_SHIFT);
6784 }
6785
6786 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6787 wm_put_swfw_semaphore(sc, sem);
6788 }
6789
6790 static void
6791 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6792 {
6793 struct wm_softc *sc = device_private(self);
6794 uint16_t regnum = BM_PHY_REG_NUM(offset);
6795 uint16_t wuce;
6796
6797 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6798 if (sc->sc_type == WM_T_PCH) {
6799 /* XXX e1000 driver do nothing... why? */
6800 }
6801
6802 /* Set page 769 */
6803 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6804 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6805
6806 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6807
6808 wuce &= ~BM_WUC_HOST_WU_BIT;
6809 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6810 wuce | BM_WUC_ENABLE_BIT);
6811
6812 /* Select page 800 */
6813 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6814 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6815
6816 /* Write page 800 */
6817 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6818
6819 if (rd)
6820 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6821 else
6822 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6823
6824 /* Set page 769 */
6825 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6826 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6827
6828 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6829 }
6830
6831 /*
6832 * wm_gmii_hv_readreg: [mii interface function]
6833 *
6834 * Read a PHY register on the kumeran
6835 * This could be handled by the PHY layer if we didn't have to lock the
6836 * ressource ...
6837 */
6838 static int
6839 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6840 {
6841 struct wm_softc *sc = device_private(self);
6842 uint16_t page = BM_PHY_REG_PAGE(reg);
6843 uint16_t regnum = BM_PHY_REG_NUM(reg);
6844 uint16_t val;
6845 int rv;
6846
6847 if (wm_get_swfwhw_semaphore(sc)) {
6848 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6849 __func__);
6850 return 0;
6851 }
6852
6853 /* XXX Workaround failure in MDIO access while cable is disconnected */
6854 if (sc->sc_phytype == WMPHY_82577) {
6855 /* XXX must write */
6856 }
6857
6858 /* Page 800 works differently than the rest so it has its own func */
6859 if (page == BM_WUC_PAGE) {
6860 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6861 return val;
6862 }
6863
6864 /*
6865 * Lower than page 768 works differently than the rest so it has its
6866 * own func
6867 */
6868 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6869 printf("gmii_hv_readreg!!!\n");
6870 return 0;
6871 }
6872
6873 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6874 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6875 page << BME1000_PAGE_SHIFT);
6876 }
6877
6878 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6879 wm_put_swfwhw_semaphore(sc);
6880 return rv;
6881 }
6882
6883 /*
6884 * wm_gmii_hv_writereg: [mii interface function]
6885 *
6886 * Write a PHY register on the kumeran.
6887 * This could be handled by the PHY layer if we didn't have to lock the
6888 * ressource ...
6889 */
6890 static void
6891 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6892 {
6893 struct wm_softc *sc = device_private(self);
6894 uint16_t page = BM_PHY_REG_PAGE(reg);
6895 uint16_t regnum = BM_PHY_REG_NUM(reg);
6896
6897 if (wm_get_swfwhw_semaphore(sc)) {
6898 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6899 __func__);
6900 return;
6901 }
6902
6903 /* XXX Workaround failure in MDIO access while cable is disconnected */
6904
6905 /* Page 800 works differently than the rest so it has its own func */
6906 if (page == BM_WUC_PAGE) {
6907 uint16_t tmp;
6908
6909 tmp = val;
6910 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6911 return;
6912 }
6913
6914 /*
6915 * Lower than page 768 works differently than the rest so it has its
6916 * own func
6917 */
6918 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6919 printf("gmii_hv_writereg!!!\n");
6920 return;
6921 }
6922
6923 /*
6924 * XXX Workaround MDIO accesses being disabled after entering IEEE
6925 * Power Down (whenever bit 11 of the PHY control register is set)
6926 */
6927
6928 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6929 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6930 page << BME1000_PAGE_SHIFT);
6931 }
6932
6933 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6934 wm_put_swfwhw_semaphore(sc);
6935 }
6936
6937 /*
6938 * wm_gmii_82580_readreg: [mii interface function]
6939 *
6940 * Read a PHY register on the 82580 and I350.
6941 * This could be handled by the PHY layer if we didn't have to lock the
6942 * ressource ...
6943 */
6944 static int
6945 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6946 {
6947 struct wm_softc *sc = device_private(self);
6948 int sem;
6949 int rv;
6950
6951 sem = swfwphysem[sc->sc_funcid];
6952 if (wm_get_swfw_semaphore(sc, sem)) {
6953 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6954 __func__);
6955 return 0;
6956 }
6957
6958 rv = wm_gmii_i82544_readreg(self, phy, reg);
6959
6960 wm_put_swfw_semaphore(sc, sem);
6961 return rv;
6962 }
6963
6964 /*
6965 * wm_gmii_82580_writereg: [mii interface function]
6966 *
6967 * Write a PHY register on the 82580 and I350.
6968 * This could be handled by the PHY layer if we didn't have to lock the
6969 * ressource ...
6970 */
6971 static void
6972 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6973 {
6974 struct wm_softc *sc = device_private(self);
6975 int sem;
6976
6977 sem = swfwphysem[sc->sc_funcid];
6978 if (wm_get_swfw_semaphore(sc, sem)) {
6979 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6980 __func__);
6981 return;
6982 }
6983
6984 wm_gmii_i82544_writereg(self, phy, reg, val);
6985
6986 wm_put_swfw_semaphore(sc, sem);
6987 }
6988
6989 /*
6990 * wm_gmii_statchg: [mii interface function]
6991 *
6992 * Callback from MII layer when media changes.
6993 */
6994 static void
6995 wm_gmii_statchg(struct ifnet *ifp)
6996 {
6997 struct wm_softc *sc = ifp->if_softc;
6998 struct mii_data *mii = &sc->sc_mii;
6999
7000 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7001 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7002 sc->sc_fcrtl &= ~FCRTL_XONE;
7003
7004 /*
7005 * Get flow control negotiation result.
7006 */
7007 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7008 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7009 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7010 mii->mii_media_active &= ~IFM_ETH_FMASK;
7011 }
7012
7013 if (sc->sc_flowflags & IFM_FLOW) {
7014 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7015 sc->sc_ctrl |= CTRL_TFCE;
7016 sc->sc_fcrtl |= FCRTL_XONE;
7017 }
7018 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7019 sc->sc_ctrl |= CTRL_RFCE;
7020 }
7021
7022 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7023 DPRINTF(WM_DEBUG_LINK,
7024 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7025 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7026 } else {
7027 DPRINTF(WM_DEBUG_LINK,
7028 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7029 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7030 }
7031
7032 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7033 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7034 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7035 : WMREG_FCRTL, sc->sc_fcrtl);
7036 if (sc->sc_type == WM_T_80003) {
7037 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7038 case IFM_1000_T:
7039 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7040 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7041 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7042 break;
7043 default:
7044 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7045 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7046 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7047 break;
7048 }
7049 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7050 }
7051 }
7052
7053 /*
7054 * wm_kmrn_readreg:
7055 *
7056 * Read a kumeran register
7057 */
7058 static int
7059 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7060 {
7061 int rv;
7062
7063 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7064 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7065 aprint_error_dev(sc->sc_dev,
7066 "%s: failed to get semaphore\n", __func__);
7067 return 0;
7068 }
7069 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7070 if (wm_get_swfwhw_semaphore(sc)) {
7071 aprint_error_dev(sc->sc_dev,
7072 "%s: failed to get semaphore\n", __func__);
7073 return 0;
7074 }
7075 }
7076
7077 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7078 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7079 KUMCTRLSTA_REN);
7080 CSR_WRITE_FLUSH(sc);
7081 delay(2);
7082
7083 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7084
7085 if (sc->sc_flags == WM_F_LOCK_SWFW)
7086 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7087 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7088 wm_put_swfwhw_semaphore(sc);
7089
7090 return rv;
7091 }
7092
7093 /*
7094 * wm_kmrn_writereg:
7095 *
7096 * Write a kumeran register
7097 */
7098 static void
7099 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7100 {
7101
7102 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7103 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7104 aprint_error_dev(sc->sc_dev,
7105 "%s: failed to get semaphore\n", __func__);
7106 return;
7107 }
7108 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7109 if (wm_get_swfwhw_semaphore(sc)) {
7110 aprint_error_dev(sc->sc_dev,
7111 "%s: failed to get semaphore\n", __func__);
7112 return;
7113 }
7114 }
7115
7116 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7117 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7118 (val & KUMCTRLSTA_MASK));
7119
7120 if (sc->sc_flags == WM_F_LOCK_SWFW)
7121 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7122 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7123 wm_put_swfwhw_semaphore(sc);
7124 }
7125
7126 /* SGMII related */
7127
7128 /*
7129 * wm_sgmii_uses_mdio
7130 *
7131 * Check whether the transaction is to the internal PHY or the external
7132 * MDIO interface. Return true if it's MDIO.
7133 */
7134 static bool
7135 wm_sgmii_uses_mdio(struct wm_softc *sc)
7136 {
7137 uint32_t reg;
7138 bool ismdio = false;
7139
7140 switch (sc->sc_type) {
7141 case WM_T_82575:
7142 case WM_T_82576:
7143 reg = CSR_READ(sc, WMREG_MDIC);
7144 ismdio = ((reg & MDIC_DEST) != 0);
7145 break;
7146 case WM_T_82580:
7147 case WM_T_I350:
7148 case WM_T_I354:
7149 case WM_T_I210:
7150 case WM_T_I211:
7151 reg = CSR_READ(sc, WMREG_MDICNFG);
7152 ismdio = ((reg & MDICNFG_DEST) != 0);
7153 break;
7154 default:
7155 break;
7156 }
7157
7158 return ismdio;
7159 }
7160
7161 /*
7162 * wm_sgmii_readreg: [mii interface function]
7163 *
7164 * Read a PHY register on the SGMII
7165 * This could be handled by the PHY layer if we didn't have to lock the
7166 * ressource ...
7167 */
7168 static int
7169 wm_sgmii_readreg(device_t self, int phy, int reg)
7170 {
7171 struct wm_softc *sc = device_private(self);
7172 uint32_t i2ccmd;
7173 int i, rv;
7174
7175 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7176 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7177 __func__);
7178 return 0;
7179 }
7180
7181 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7182 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7183 | I2CCMD_OPCODE_READ;
7184 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7185
7186 /* Poll the ready bit */
7187 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7188 delay(50);
7189 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7190 if (i2ccmd & I2CCMD_READY)
7191 break;
7192 }
7193 if ((i2ccmd & I2CCMD_READY) == 0)
7194 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7195 if ((i2ccmd & I2CCMD_ERROR) != 0)
7196 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7197
7198 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7199
7200 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7201 return rv;
7202 }
7203
7204 /*
7205 * wm_sgmii_writereg: [mii interface function]
7206 *
7207 * Write a PHY register on the SGMII.
7208 * This could be handled by the PHY layer if we didn't have to lock the
7209 * ressource ...
7210 */
7211 static void
7212 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7213 {
7214 struct wm_softc *sc = device_private(self);
7215 uint32_t i2ccmd;
7216 int i;
7217
7218 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7219 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7220 __func__);
7221 return;
7222 }
7223
7224 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7225 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7226 | I2CCMD_OPCODE_WRITE;
7227 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7228
7229 /* Poll the ready bit */
7230 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7231 delay(50);
7232 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7233 if (i2ccmd & I2CCMD_READY)
7234 break;
7235 }
7236 if ((i2ccmd & I2CCMD_READY) == 0)
7237 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7238 if ((i2ccmd & I2CCMD_ERROR) != 0)
7239 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7240
7241 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7242 }
7243
7244 /* TBI related */
7245
7246 /* XXX Currently TBI only */
7247 static int
7248 wm_check_for_link(struct wm_softc *sc)
7249 {
7250 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7251 uint32_t rxcw;
7252 uint32_t ctrl;
7253 uint32_t status;
7254 uint32_t sig;
7255
7256 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7257 sc->sc_tbi_linkup = 1;
7258 return 0;
7259 }
7260
7261 rxcw = CSR_READ(sc, WMREG_RXCW);
7262 ctrl = CSR_READ(sc, WMREG_CTRL);
7263 status = CSR_READ(sc, WMREG_STATUS);
7264
7265 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7266
7267 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7268 device_xname(sc->sc_dev), __func__,
7269 ((ctrl & CTRL_SWDPIN(1)) == sig),
7270 ((status & STATUS_LU) != 0),
7271 ((rxcw & RXCW_C) != 0)
7272 ));
7273
7274 /*
7275 * SWDPIN LU RXCW
7276 * 0 0 0
7277 * 0 0 1 (should not happen)
7278 * 0 1 0 (should not happen)
7279 * 0 1 1 (should not happen)
7280 * 1 0 0 Disable autonego and force linkup
7281 * 1 0 1 got /C/ but not linkup yet
7282 * 1 1 0 (linkup)
7283 * 1 1 1 If IFM_AUTO, back to autonego
7284 *
7285 */
7286 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7287 && ((status & STATUS_LU) == 0)
7288 && ((rxcw & RXCW_C) == 0)) {
7289 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7290 __func__));
7291 sc->sc_tbi_linkup = 0;
7292 /* Disable auto-negotiation in the TXCW register */
7293 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7294
7295 /*
7296 * Force link-up and also force full-duplex.
7297 *
7298 * NOTE: CTRL was updated TFCE and RFCE automatically,
7299 * so we should update sc->sc_ctrl
7300 */
7301 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7302 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7303 } else if (((status & STATUS_LU) != 0)
7304 && ((rxcw & RXCW_C) != 0)
7305 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7306 sc->sc_tbi_linkup = 1;
7307 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7308 __func__));
7309 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7310 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7311 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7312 && ((rxcw & RXCW_C) != 0)) {
7313 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7314 } else {
7315 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7316 status));
7317 }
7318
7319 return 0;
7320 }
7321
7322 /*
7323 * wm_tbi_mediainit:
7324 *
7325 * Initialize media for use on 1000BASE-X devices.
7326 */
7327 static void
7328 wm_tbi_mediainit(struct wm_softc *sc)
7329 {
7330 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7331 const char *sep = "";
7332
7333 if (sc->sc_type < WM_T_82543)
7334 sc->sc_tipg = TIPG_WM_DFLT;
7335 else
7336 sc->sc_tipg = TIPG_LG_DFLT;
7337
7338 sc->sc_tbi_anegticks = 5;
7339
7340 /* Initialize our media structures */
7341 sc->sc_mii.mii_ifp = ifp;
7342
7343 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7344 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7345 wm_tbi_mediastatus);
7346
7347 /*
7348 * SWD Pins:
7349 *
7350 * 0 = Link LED (output)
7351 * 1 = Loss Of Signal (input)
7352 */
7353 sc->sc_ctrl |= CTRL_SWDPIO(0);
7354 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7355 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7356 sc->sc_ctrl &= ~CTRL_LRST;
7357
7358 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7359
7360 #define ADD(ss, mm, dd) \
7361 do { \
7362 aprint_normal("%s%s", sep, ss); \
7363 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7364 sep = ", "; \
7365 } while (/*CONSTCOND*/0)
7366
7367 aprint_normal_dev(sc->sc_dev, "");
7368
7369 /* Only 82545 is LX */
7370 if (sc->sc_type == WM_T_82545) {
7371 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7372 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7373 } else {
7374 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7375 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7376 }
7377 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7378 aprint_normal("\n");
7379
7380 #undef ADD
7381
7382 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7383 }
7384
7385 /*
7386 * wm_tbi_mediastatus: [ifmedia interface function]
7387 *
7388 * Get the current interface media status on a 1000BASE-X device.
7389 */
7390 static void
7391 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7392 {
7393 struct wm_softc *sc = ifp->if_softc;
7394 uint32_t ctrl, status;
7395
7396 ifmr->ifm_status = IFM_AVALID;
7397 ifmr->ifm_active = IFM_ETHER;
7398
7399 status = CSR_READ(sc, WMREG_STATUS);
7400 if ((status & STATUS_LU) == 0) {
7401 ifmr->ifm_active |= IFM_NONE;
7402 return;
7403 }
7404
7405 ifmr->ifm_status |= IFM_ACTIVE;
7406 /* Only 82545 is LX */
7407 if (sc->sc_type == WM_T_82545)
7408 ifmr->ifm_active |= IFM_1000_LX;
7409 else
7410 ifmr->ifm_active |= IFM_1000_SX;
7411 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7412 ifmr->ifm_active |= IFM_FDX;
7413 else
7414 ifmr->ifm_active |= IFM_HDX;
7415 ctrl = CSR_READ(sc, WMREG_CTRL);
7416 if (ctrl & CTRL_RFCE)
7417 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7418 if (ctrl & CTRL_TFCE)
7419 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7420 }
7421
7422 /*
7423 * wm_tbi_mediachange: [ifmedia interface function]
7424 *
7425 * Set hardware to newly-selected media on a 1000BASE-X device.
7426 */
7427 static int
7428 wm_tbi_mediachange(struct ifnet *ifp)
7429 {
7430 struct wm_softc *sc = ifp->if_softc;
7431 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7432 uint32_t status;
7433 int i;
7434
7435 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7436 return 0;
7437
7438 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7439 || (sc->sc_type >= WM_T_82575))
7440 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7441
7442 /* XXX power_up_serdes_link_82575() */
7443
7444 sc->sc_ctrl &= ~CTRL_LRST;
7445 sc->sc_txcw = TXCW_ANE;
7446 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7447 sc->sc_txcw |= TXCW_FD | TXCW_HD;
7448 else if (ife->ifm_media & IFM_FDX)
7449 sc->sc_txcw |= TXCW_FD;
7450 else
7451 sc->sc_txcw |= TXCW_HD;
7452
7453 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7454 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7455
7456 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7457 device_xname(sc->sc_dev), sc->sc_txcw));
7458 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7459 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7460 CSR_WRITE_FLUSH(sc);
7461 delay(1000);
7462
7463 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7464 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7465
7466 /*
7467 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7468 * optics detect a signal, 0 if they don't.
7469 */
7470 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7471 /* Have signal; wait for the link to come up. */
7472 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7473 delay(10000);
7474 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7475 break;
7476 }
7477
7478 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7479 device_xname(sc->sc_dev),i));
7480
7481 status = CSR_READ(sc, WMREG_STATUS);
7482 DPRINTF(WM_DEBUG_LINK,
7483 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7484 device_xname(sc->sc_dev),status, STATUS_LU));
7485 if (status & STATUS_LU) {
7486 /* Link is up. */
7487 DPRINTF(WM_DEBUG_LINK,
7488 ("%s: LINK: set media -> link up %s\n",
7489 device_xname(sc->sc_dev),
7490 (status & STATUS_FD) ? "FDX" : "HDX"));
7491
7492 /*
7493 * NOTE: CTRL will update TFCE and RFCE automatically,
7494 * so we should update sc->sc_ctrl
7495 */
7496 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7497 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7498 sc->sc_fcrtl &= ~FCRTL_XONE;
7499 if (status & STATUS_FD)
7500 sc->sc_tctl |=
7501 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7502 else
7503 sc->sc_tctl |=
7504 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7505 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7506 sc->sc_fcrtl |= FCRTL_XONE;
7507 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7508 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7509 WMREG_OLD_FCRTL : WMREG_FCRTL,
7510 sc->sc_fcrtl);
7511 sc->sc_tbi_linkup = 1;
7512 } else {
7513 if (i == WM_LINKUP_TIMEOUT)
7514 wm_check_for_link(sc);
7515 /* Link is down. */
7516 DPRINTF(WM_DEBUG_LINK,
7517 ("%s: LINK: set media -> link down\n",
7518 device_xname(sc->sc_dev)));
7519 sc->sc_tbi_linkup = 0;
7520 }
7521 } else {
7522 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7523 device_xname(sc->sc_dev)));
7524 sc->sc_tbi_linkup = 0;
7525 }
7526
7527 wm_tbi_set_linkled(sc);
7528
7529 return 0;
7530 }
7531
7532 /*
7533 * wm_tbi_set_linkled:
7534 *
7535 * Update the link LED on 1000BASE-X devices.
7536 */
7537 static void
7538 wm_tbi_set_linkled(struct wm_softc *sc)
7539 {
7540
7541 if (sc->sc_tbi_linkup)
7542 sc->sc_ctrl |= CTRL_SWDPIN(0);
7543 else
7544 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7545
7546 /* 82540 or newer devices are active low */
7547 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7548
7549 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7550 }
7551
7552 /*
7553 * wm_tbi_check_link:
7554 *
7555 * Check the link on 1000BASE-X devices.
7556 */
7557 static void
7558 wm_tbi_check_link(struct wm_softc *sc)
7559 {
7560 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7561 uint32_t status;
7562
7563 KASSERT(WM_TX_LOCKED(sc));
7564
7565 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7566 sc->sc_tbi_linkup = 1;
7567 return;
7568 }
7569
7570 status = CSR_READ(sc, WMREG_STATUS);
7571
7572 /* XXX is this needed? */
7573 (void)CSR_READ(sc, WMREG_RXCW);
7574 (void)CSR_READ(sc, WMREG_CTRL);
7575
7576 /* set link status */
7577 if ((status & STATUS_LU) == 0) {
7578 DPRINTF(WM_DEBUG_LINK,
7579 ("%s: LINK: checklink -> down\n",
7580 device_xname(sc->sc_dev)));
7581 sc->sc_tbi_linkup = 0;
7582 } else if (sc->sc_tbi_linkup == 0) {
7583 DPRINTF(WM_DEBUG_LINK,
7584 ("%s: LINK: checklink -> up %s\n",
7585 device_xname(sc->sc_dev),
7586 (status & STATUS_FD) ? "FDX" : "HDX"));
7587 sc->sc_tbi_linkup = 1;
7588 }
7589
7590 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7591 && ((status & STATUS_LU) == 0)) {
7592 sc->sc_tbi_linkup = 0;
7593 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7594 /* If the timer expired, retry autonegotiation */
7595 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7596 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7597 sc->sc_tbi_ticks = 0;
7598 /*
7599 * Reset the link, and let autonegotiation do
7600 * its thing
7601 */
7602 sc->sc_ctrl |= CTRL_LRST;
7603 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7604 CSR_WRITE_FLUSH(sc);
7605 delay(1000);
7606 sc->sc_ctrl &= ~CTRL_LRST;
7607 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7608 CSR_WRITE_FLUSH(sc);
7609 delay(1000);
7610 CSR_WRITE(sc, WMREG_TXCW,
7611 sc->sc_txcw & ~TXCW_ANE);
7612 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7613 }
7614 }
7615 }
7616
7617 wm_tbi_set_linkled(sc);
7618 }
7619
7620 /* SFP related */
7621
7622 static int
7623 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7624 {
7625 uint32_t i2ccmd;
7626 int i;
7627
7628 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7629 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7630
7631 /* Poll the ready bit */
7632 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7633 delay(50);
7634 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7635 if (i2ccmd & I2CCMD_READY)
7636 break;
7637 }
7638 if ((i2ccmd & I2CCMD_READY) == 0)
7639 return -1;
7640 if ((i2ccmd & I2CCMD_ERROR) != 0)
7641 return -1;
7642
7643 *data = i2ccmd & 0x00ff;
7644
7645 return 0;
7646 }
7647
7648 static uint32_t
7649 wm_sfp_get_media_type(struct wm_softc *sc)
7650 {
7651 uint32_t ctrl_ext;
7652 uint8_t val = 0;
7653 int timeout = 3;
7654 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
7655 int rv = -1;
7656
7657 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7658 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7659 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7660 CSR_WRITE_FLUSH(sc);
7661
7662 /* Read SFP module data */
7663 while (timeout) {
7664 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7665 if (rv == 0)
7666 break;
7667 delay(100*1000); /* XXX too big */
7668 timeout--;
7669 }
7670 if (rv != 0)
7671 goto out;
7672 switch (val) {
7673 case SFF_SFP_ID_SFF:
7674 aprint_normal_dev(sc->sc_dev,
7675 "Module/Connector soldered to board\n");
7676 break;
7677 case SFF_SFP_ID_SFP:
7678 aprint_normal_dev(sc->sc_dev, "SFP\n");
7679 break;
7680 case SFF_SFP_ID_UNKNOWN:
7681 goto out;
7682 default:
7683 break;
7684 }
7685
7686 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7687 if (rv != 0) {
7688 goto out;
7689 }
7690
7691 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7692 mediatype = WM_MEDIATYPE_SERDES;
7693 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7694 sc->sc_flags |= WM_F_SGMII;
7695 mediatype = WM_MEDIATYPE_COPPER;
7696 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7697 sc->sc_flags |= WM_F_SGMII;
7698 mediatype = WM_MEDIATYPE_SERDES;
7699 }
7700
7701 out:
7702 /* Restore I2C interface setting */
7703 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7704
7705 return mediatype;
7706 }
7707 /*
7708 * NVM related.
7709 * Microwire, SPI (w/wo EERD) and Flash.
7710 */
7711
7712 /* Both spi and uwire */
7713
7714 /*
7715 * wm_eeprom_sendbits:
7716 *
7717 * Send a series of bits to the EEPROM.
7718 */
7719 static void
7720 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7721 {
7722 uint32_t reg;
7723 int x;
7724
7725 reg = CSR_READ(sc, WMREG_EECD);
7726
7727 for (x = nbits; x > 0; x--) {
7728 if (bits & (1U << (x - 1)))
7729 reg |= EECD_DI;
7730 else
7731 reg &= ~EECD_DI;
7732 CSR_WRITE(sc, WMREG_EECD, reg);
7733 CSR_WRITE_FLUSH(sc);
7734 delay(2);
7735 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7736 CSR_WRITE_FLUSH(sc);
7737 delay(2);
7738 CSR_WRITE(sc, WMREG_EECD, reg);
7739 CSR_WRITE_FLUSH(sc);
7740 delay(2);
7741 }
7742 }
7743
7744 /*
7745 * wm_eeprom_recvbits:
7746 *
7747 * Receive a series of bits from the EEPROM.
7748 */
7749 static void
7750 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7751 {
7752 uint32_t reg, val;
7753 int x;
7754
7755 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7756
7757 val = 0;
7758 for (x = nbits; x > 0; x--) {
7759 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7760 CSR_WRITE_FLUSH(sc);
7761 delay(2);
7762 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7763 val |= (1U << (x - 1));
7764 CSR_WRITE(sc, WMREG_EECD, reg);
7765 CSR_WRITE_FLUSH(sc);
7766 delay(2);
7767 }
7768 *valp = val;
7769 }
7770
7771 /* Microwire */
7772
7773 /*
7774 * wm_nvm_read_uwire:
7775 *
7776 * Read a word from the EEPROM using the MicroWire protocol.
7777 */
7778 static int
7779 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7780 {
7781 uint32_t reg, val;
7782 int i;
7783
7784 for (i = 0; i < wordcnt; i++) {
7785 /* Clear SK and DI. */
7786 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7787 CSR_WRITE(sc, WMREG_EECD, reg);
7788
7789 /*
7790 * XXX: workaround for a bug in qemu-0.12.x and prior
7791 * and Xen.
7792 *
7793 * We use this workaround only for 82540 because qemu's
7794 * e1000 act as 82540.
7795 */
7796 if (sc->sc_type == WM_T_82540) {
7797 reg |= EECD_SK;
7798 CSR_WRITE(sc, WMREG_EECD, reg);
7799 reg &= ~EECD_SK;
7800 CSR_WRITE(sc, WMREG_EECD, reg);
7801 CSR_WRITE_FLUSH(sc);
7802 delay(2);
7803 }
7804 /* XXX: end of workaround */
7805
7806 /* Set CHIP SELECT. */
7807 reg |= EECD_CS;
7808 CSR_WRITE(sc, WMREG_EECD, reg);
7809 CSR_WRITE_FLUSH(sc);
7810 delay(2);
7811
7812 /* Shift in the READ command. */
7813 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7814
7815 /* Shift in address. */
7816 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
7817
7818 /* Shift out the data. */
7819 wm_eeprom_recvbits(sc, &val, 16);
7820 data[i] = val & 0xffff;
7821
7822 /* Clear CHIP SELECT. */
7823 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7824 CSR_WRITE(sc, WMREG_EECD, reg);
7825 CSR_WRITE_FLUSH(sc);
7826 delay(2);
7827 }
7828
7829 return 0;
7830 }
7831
7832 /* SPI */
7833
7834 /*
7835 * Set SPI and FLASH related information from the EECD register.
7836 * For 82541 and 82547, the word size is taken from EEPROM.
7837 */
7838 static int
7839 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
7840 {
7841 int size;
7842 uint32_t reg;
7843 uint16_t data;
7844
7845 reg = CSR_READ(sc, WMREG_EECD);
7846 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7847
7848 /* Read the size of NVM from EECD by default */
7849 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7850 switch (sc->sc_type) {
7851 case WM_T_82541:
7852 case WM_T_82541_2:
7853 case WM_T_82547:
7854 case WM_T_82547_2:
7855 /* Set dummy value to access EEPROM */
7856 sc->sc_nvm_wordsize = 64;
7857 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
7858 reg = data;
7859 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
7860 if (size == 0)
7861 size = 6; /* 64 word size */
7862 else
7863 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
7864 break;
7865 case WM_T_80003:
7866 case WM_T_82571:
7867 case WM_T_82572:
7868 case WM_T_82573: /* SPI case */
7869 case WM_T_82574: /* SPI case */
7870 case WM_T_82583: /* SPI case */
7871 size += NVM_WORD_SIZE_BASE_SHIFT;
7872 if (size > 14)
7873 size = 14;
7874 break;
7875 case WM_T_82575:
7876 case WM_T_82576:
7877 case WM_T_82580:
7878 case WM_T_I350:
7879 case WM_T_I354:
7880 case WM_T_I210:
7881 case WM_T_I211:
7882 size += NVM_WORD_SIZE_BASE_SHIFT;
7883 if (size > 15)
7884 size = 15;
7885 break;
7886 default:
7887 aprint_error_dev(sc->sc_dev,
7888 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
7889 return -1;
7890 break;
7891 }
7892
7893 sc->sc_nvm_wordsize = 1 << size;
7894
7895 return 0;
7896 }
7897
7898 /*
7899 * wm_nvm_ready_spi:
7900 *
7901 * Wait for a SPI EEPROM to be ready for commands.
7902 */
7903 static int
7904 wm_nvm_ready_spi(struct wm_softc *sc)
7905 {
7906 uint32_t val;
7907 int usec;
7908
7909 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7910 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7911 wm_eeprom_recvbits(sc, &val, 8);
7912 if ((val & SPI_SR_RDY) == 0)
7913 break;
7914 }
7915 if (usec >= SPI_MAX_RETRIES) {
7916 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7917 return 1;
7918 }
7919 return 0;
7920 }
7921
7922 /*
7923 * wm_nvm_read_spi:
7924 *
7925 * Read a work from the EEPROM using the SPI protocol.
7926 */
7927 static int
7928 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7929 {
7930 uint32_t reg, val;
7931 int i;
7932 uint8_t opc;
7933
7934 /* Clear SK and CS. */
7935 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7936 CSR_WRITE(sc, WMREG_EECD, reg);
7937 CSR_WRITE_FLUSH(sc);
7938 delay(2);
7939
7940 if (wm_nvm_ready_spi(sc))
7941 return 1;
7942
7943 /* Toggle CS to flush commands. */
7944 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7945 CSR_WRITE_FLUSH(sc);
7946 delay(2);
7947 CSR_WRITE(sc, WMREG_EECD, reg);
7948 CSR_WRITE_FLUSH(sc);
7949 delay(2);
7950
7951 opc = SPI_OPC_READ;
7952 if (sc->sc_nvm_addrbits == 8 && word >= 128)
7953 opc |= SPI_OPC_A8;
7954
7955 wm_eeprom_sendbits(sc, opc, 8);
7956 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
7957
7958 for (i = 0; i < wordcnt; i++) {
7959 wm_eeprom_recvbits(sc, &val, 16);
7960 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7961 }
7962
7963 /* Raise CS and clear SK. */
7964 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7965 CSR_WRITE(sc, WMREG_EECD, reg);
7966 CSR_WRITE_FLUSH(sc);
7967 delay(2);
7968
7969 return 0;
7970 }
7971
7972 /* Using with EERD */
7973
7974 static int
7975 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7976 {
7977 uint32_t attempts = 100000;
7978 uint32_t i, reg = 0;
7979 int32_t done = -1;
7980
7981 for (i = 0; i < attempts; i++) {
7982 reg = CSR_READ(sc, rw);
7983
7984 if (reg & EERD_DONE) {
7985 done = 0;
7986 break;
7987 }
7988 delay(5);
7989 }
7990
7991 return done;
7992 }
7993
7994 static int
7995 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7996 uint16_t *data)
7997 {
7998 int i, eerd = 0;
7999 int error = 0;
8000
8001 for (i = 0; i < wordcnt; i++) {
8002 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8003
8004 CSR_WRITE(sc, WMREG_EERD, eerd);
8005 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8006 if (error != 0)
8007 break;
8008
8009 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8010 }
8011
8012 return error;
8013 }
8014
8015 /* Flash */
8016
8017 static int
8018 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8019 {
8020 uint32_t eecd;
8021 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8022 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8023 uint8_t sig_byte = 0;
8024
8025 switch (sc->sc_type) {
8026 case WM_T_ICH8:
8027 case WM_T_ICH9:
8028 eecd = CSR_READ(sc, WMREG_EECD);
8029 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8030 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8031 return 0;
8032 }
8033 /* FALLTHROUGH */
8034 default:
8035 /* Default to 0 */
8036 *bank = 0;
8037
8038 /* Check bank 0 */
8039 wm_read_ich8_byte(sc, act_offset, &sig_byte);
8040 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8041 *bank = 0;
8042 return 0;
8043 }
8044
8045 /* Check bank 1 */
8046 wm_read_ich8_byte(sc, act_offset + bank1_offset,
8047 &sig_byte);
8048 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8049 *bank = 1;
8050 return 0;
8051 }
8052 }
8053
8054 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8055 device_xname(sc->sc_dev)));
8056 return -1;
8057 }
8058
8059 /******************************************************************************
8060 * This function does initial flash setup so that a new read/write/erase cycle
8061 * can be started.
8062 *
8063 * sc - The pointer to the hw structure
8064 ****************************************************************************/
8065 static int32_t
8066 wm_ich8_cycle_init(struct wm_softc *sc)
8067 {
8068 uint16_t hsfsts;
8069 int32_t error = 1;
8070 int32_t i = 0;
8071
8072 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8073
8074 /* May be check the Flash Des Valid bit in Hw status */
8075 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8076 return error;
8077 }
8078
8079 /* Clear FCERR in Hw status by writing 1 */
8080 /* Clear DAEL in Hw status by writing a 1 */
8081 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8082
8083 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8084
8085 /*
8086 * Either we should have a hardware SPI cycle in progress bit to check
8087 * against, in order to start a new cycle or FDONE bit should be
8088 * changed in the hardware so that it is 1 after harware reset, which
8089 * can then be used as an indication whether a cycle is in progress or
8090 * has been completed .. we should also have some software semaphore
8091 * mechanism to guard FDONE or the cycle in progress bit so that two
8092 * threads access to those bits can be sequentiallized or a way so that
8093 * 2 threads dont start the cycle at the same time
8094 */
8095
8096 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8097 /*
8098 * There is no cycle running at present, so we can start a
8099 * cycle
8100 */
8101
8102 /* Begin by setting Flash Cycle Done. */
8103 hsfsts |= HSFSTS_DONE;
8104 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8105 error = 0;
8106 } else {
8107 /*
8108 * otherwise poll for sometime so the current cycle has a
8109 * chance to end before giving up.
8110 */
8111 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8112 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8113 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8114 error = 0;
8115 break;
8116 }
8117 delay(1);
8118 }
8119 if (error == 0) {
8120 /*
8121 * Successful in waiting for previous cycle to timeout,
8122 * now set the Flash Cycle Done.
8123 */
8124 hsfsts |= HSFSTS_DONE;
8125 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8126 }
8127 }
8128 return error;
8129 }
8130
8131 /******************************************************************************
8132 * This function starts a flash cycle and waits for its completion
8133 *
8134 * sc - The pointer to the hw structure
8135 ****************************************************************************/
8136 static int32_t
8137 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8138 {
8139 uint16_t hsflctl;
8140 uint16_t hsfsts;
8141 int32_t error = 1;
8142 uint32_t i = 0;
8143
8144 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8145 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8146 hsflctl |= HSFCTL_GO;
8147 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8148
8149 /* Wait till FDONE bit is set to 1 */
8150 do {
8151 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8152 if (hsfsts & HSFSTS_DONE)
8153 break;
8154 delay(1);
8155 i++;
8156 } while (i < timeout);
8157 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8158 error = 0;
8159
8160 return error;
8161 }
8162
8163 /******************************************************************************
8164 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8165 *
8166 * sc - The pointer to the hw structure
8167 * index - The index of the byte or word to read.
8168 * size - Size of data to read, 1=byte 2=word
8169 * data - Pointer to the word to store the value read.
8170 *****************************************************************************/
8171 static int32_t
8172 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8173 uint32_t size, uint16_t *data)
8174 {
8175 uint16_t hsfsts;
8176 uint16_t hsflctl;
8177 uint32_t flash_linear_address;
8178 uint32_t flash_data = 0;
8179 int32_t error = 1;
8180 int32_t count = 0;
8181
8182 if (size < 1 || size > 2 || data == 0x0 ||
8183 index > ICH_FLASH_LINEAR_ADDR_MASK)
8184 return error;
8185
8186 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8187 sc->sc_ich8_flash_base;
8188
8189 do {
8190 delay(1);
8191 /* Steps */
8192 error = wm_ich8_cycle_init(sc);
8193 if (error)
8194 break;
8195
8196 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8197 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8198 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8199 & HSFCTL_BCOUNT_MASK;
8200 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8201 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8202
8203 /*
8204 * Write the last 24 bits of index into Flash Linear address
8205 * field in Flash Address
8206 */
8207 /* TODO: TBD maybe check the index against the size of flash */
8208
8209 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8210
8211 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8212
8213 /*
8214 * Check if FCERR is set to 1, if set to 1, clear it and try
8215 * the whole sequence a few more times, else read in (shift in)
8216 * the Flash Data0, the order is least significant byte first
8217 * msb to lsb
8218 */
8219 if (error == 0) {
8220 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8221 if (size == 1)
8222 *data = (uint8_t)(flash_data & 0x000000FF);
8223 else if (size == 2)
8224 *data = (uint16_t)(flash_data & 0x0000FFFF);
8225 break;
8226 } else {
8227 /*
8228 * If we've gotten here, then things are probably
8229 * completely hosed, but if the error condition is
8230 * detected, it won't hurt to give it another try...
8231 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8232 */
8233 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8234 if (hsfsts & HSFSTS_ERR) {
8235 /* Repeat for some time before giving up. */
8236 continue;
8237 } else if ((hsfsts & HSFSTS_DONE) == 0)
8238 break;
8239 }
8240 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8241
8242 return error;
8243 }
8244
8245 /******************************************************************************
8246 * Reads a single byte from the NVM using the ICH8 flash access registers.
8247 *
8248 * sc - pointer to wm_hw structure
8249 * index - The index of the byte to read.
8250 * data - Pointer to a byte to store the value read.
8251 *****************************************************************************/
8252 static int32_t
8253 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8254 {
8255 int32_t status;
8256 uint16_t word = 0;
8257
8258 status = wm_read_ich8_data(sc, index, 1, &word);
8259 if (status == 0)
8260 *data = (uint8_t)word;
8261 else
8262 *data = 0;
8263
8264 return status;
8265 }
8266
8267 /******************************************************************************
8268 * Reads a word from the NVM using the ICH8 flash access registers.
8269 *
8270 * sc - pointer to wm_hw structure
8271 * index - The starting byte index of the word to read.
8272 * data - Pointer to a word to store the value read.
8273 *****************************************************************************/
8274 static int32_t
8275 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8276 {
8277 int32_t status;
8278
8279 status = wm_read_ich8_data(sc, index, 2, data);
8280 return status;
8281 }
8282
8283 /******************************************************************************
8284 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8285 * register.
8286 *
8287 * sc - Struct containing variables accessed by shared code
8288 * offset - offset of word in the EEPROM to read
8289 * data - word read from the EEPROM
8290 * words - number of words to read
8291 *****************************************************************************/
8292 static int
8293 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8294 {
8295 int32_t error = 0;
8296 uint32_t flash_bank = 0;
8297 uint32_t act_offset = 0;
8298 uint32_t bank_offset = 0;
8299 uint16_t word = 0;
8300 uint16_t i = 0;
8301
8302 /*
8303 * We need to know which is the valid flash bank. In the event
8304 * that we didn't allocate eeprom_shadow_ram, we may not be
8305 * managing flash_bank. So it cannot be trusted and needs
8306 * to be updated with each read.
8307 */
8308 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8309 if (error) {
8310 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8311 device_xname(sc->sc_dev)));
8312 flash_bank = 0;
8313 }
8314
8315 /*
8316 * Adjust offset appropriately if we're on bank 1 - adjust for word
8317 * size
8318 */
8319 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8320
8321 error = wm_get_swfwhw_semaphore(sc);
8322 if (error) {
8323 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8324 __func__);
8325 return error;
8326 }
8327
8328 for (i = 0; i < words; i++) {
8329 /* The NVM part needs a byte offset, hence * 2 */
8330 act_offset = bank_offset + ((offset + i) * 2);
8331 error = wm_read_ich8_word(sc, act_offset, &word);
8332 if (error) {
8333 aprint_error_dev(sc->sc_dev,
8334 "%s: failed to read NVM\n", __func__);
8335 break;
8336 }
8337 data[i] = word;
8338 }
8339
8340 wm_put_swfwhw_semaphore(sc);
8341 return error;
8342 }
8343
8344 /* Lock, detecting NVM type, validate checksum and read */
8345
8346 /*
8347 * wm_nvm_acquire:
8348 *
8349 * Perform the EEPROM handshake required on some chips.
8350 */
8351 static int
8352 wm_nvm_acquire(struct wm_softc *sc)
8353 {
8354 uint32_t reg;
8355 int x;
8356 int ret = 0;
8357
8358 /* always success */
8359 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8360 return 0;
8361
8362 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8363 ret = wm_get_swfwhw_semaphore(sc);
8364 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8365 /* This will also do wm_get_swsm_semaphore() if needed */
8366 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8367 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8368 ret = wm_get_swsm_semaphore(sc);
8369 }
8370
8371 if (ret) {
8372 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8373 __func__);
8374 return 1;
8375 }
8376
8377 if (sc->sc_flags & WM_F_LOCK_EECD) {
8378 reg = CSR_READ(sc, WMREG_EECD);
8379
8380 /* Request EEPROM access. */
8381 reg |= EECD_EE_REQ;
8382 CSR_WRITE(sc, WMREG_EECD, reg);
8383
8384 /* ..and wait for it to be granted. */
8385 for (x = 0; x < 1000; x++) {
8386 reg = CSR_READ(sc, WMREG_EECD);
8387 if (reg & EECD_EE_GNT)
8388 break;
8389 delay(5);
8390 }
8391 if ((reg & EECD_EE_GNT) == 0) {
8392 aprint_error_dev(sc->sc_dev,
8393 "could not acquire EEPROM GNT\n");
8394 reg &= ~EECD_EE_REQ;
8395 CSR_WRITE(sc, WMREG_EECD, reg);
8396 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8397 wm_put_swfwhw_semaphore(sc);
8398 if (sc->sc_flags & WM_F_LOCK_SWFW)
8399 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8400 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8401 wm_put_swsm_semaphore(sc);
8402 return 1;
8403 }
8404 }
8405
8406 return 0;
8407 }
8408
8409 /*
8410 * wm_nvm_release:
8411 *
8412 * Release the EEPROM mutex.
8413 */
8414 static void
8415 wm_nvm_release(struct wm_softc *sc)
8416 {
8417 uint32_t reg;
8418
8419 /* always success */
8420 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8421 return;
8422
8423 if (sc->sc_flags & WM_F_LOCK_EECD) {
8424 reg = CSR_READ(sc, WMREG_EECD);
8425 reg &= ~EECD_EE_REQ;
8426 CSR_WRITE(sc, WMREG_EECD, reg);
8427 }
8428
8429 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8430 wm_put_swfwhw_semaphore(sc);
8431 if (sc->sc_flags & WM_F_LOCK_SWFW)
8432 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8433 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8434 wm_put_swsm_semaphore(sc);
8435 }
8436
8437 static int
8438 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8439 {
8440 uint32_t eecd = 0;
8441
8442 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8443 || sc->sc_type == WM_T_82583) {
8444 eecd = CSR_READ(sc, WMREG_EECD);
8445
8446 /* Isolate bits 15 & 16 */
8447 eecd = ((eecd >> 15) & 0x03);
8448
8449 /* If both bits are set, device is Flash type */
8450 if (eecd == 0x03)
8451 return 0;
8452 }
8453 return 1;
8454 }
8455
8456 /*
8457 * wm_nvm_validate_checksum
8458 *
8459 * The checksum is defined as the sum of the first 64 (16 bit) words.
8460 */
8461 static int
8462 wm_nvm_validate_checksum(struct wm_softc *sc)
8463 {
8464 uint16_t checksum;
8465 uint16_t eeprom_data;
8466 #ifdef WM_DEBUG
8467 uint16_t csum_wordaddr, valid_checksum;
8468 #endif
8469 int i;
8470
8471 checksum = 0;
8472
8473 /* Don't check for I211 */
8474 if (sc->sc_type == WM_T_I211)
8475 return 0;
8476
8477 #ifdef WM_DEBUG
8478 if (sc->sc_type == WM_T_PCH_LPT) {
8479 csum_wordaddr = NVM_OFF_COMPAT;
8480 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8481 } else {
8482 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8483 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8484 }
8485
8486 /* Dump EEPROM image for debug */
8487 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8488 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8489 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8490 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8491 if ((eeprom_data & valid_checksum) == 0) {
8492 DPRINTF(WM_DEBUG_NVM,
8493 ("%s: NVM need to be updated (%04x != %04x)\n",
8494 device_xname(sc->sc_dev), eeprom_data,
8495 valid_checksum));
8496 }
8497 }
8498
8499 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8500 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8501 for (i = 0; i < NVM_SIZE; i++) {
8502 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8503 printf("XXXX ");
8504 else
8505 printf("%04hx ", eeprom_data);
8506 if (i % 8 == 7)
8507 printf("\n");
8508 }
8509 }
8510
8511 #endif /* WM_DEBUG */
8512
8513 for (i = 0; i < NVM_SIZE; i++) {
8514 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8515 return 1;
8516 checksum += eeprom_data;
8517 }
8518
8519 if (checksum != (uint16_t) NVM_CHECKSUM) {
8520 #ifdef WM_DEBUG
8521 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8522 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8523 #endif
8524 }
8525
8526 return 0;
8527 }
8528
8529 /*
8530 * wm_nvm_read:
8531 *
8532 * Read data from the serial EEPROM.
8533 */
8534 static int
8535 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8536 {
8537 int rv;
8538
8539 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8540 return 1;
8541
8542 if (wm_nvm_acquire(sc))
8543 return 1;
8544
8545 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8546 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8547 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8548 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8549 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8550 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8551 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8552 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8553 else
8554 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8555
8556 wm_nvm_release(sc);
8557 return rv;
8558 }
8559
8560 /*
8561 * Hardware semaphores.
8562 * Very complexed...
8563 */
8564
8565 static int
8566 wm_get_swsm_semaphore(struct wm_softc *sc)
8567 {
8568 int32_t timeout;
8569 uint32_t swsm;
8570
8571 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8572 /* Get the SW semaphore. */
8573 timeout = sc->sc_nvm_wordsize + 1;
8574 while (timeout) {
8575 swsm = CSR_READ(sc, WMREG_SWSM);
8576
8577 if ((swsm & SWSM_SMBI) == 0)
8578 break;
8579
8580 delay(50);
8581 timeout--;
8582 }
8583
8584 if (timeout == 0) {
8585 aprint_error_dev(sc->sc_dev,
8586 "could not acquire SWSM SMBI\n");
8587 return 1;
8588 }
8589 }
8590
8591 /* Get the FW semaphore. */
8592 timeout = sc->sc_nvm_wordsize + 1;
8593 while (timeout) {
8594 swsm = CSR_READ(sc, WMREG_SWSM);
8595 swsm |= SWSM_SWESMBI;
8596 CSR_WRITE(sc, WMREG_SWSM, swsm);
8597 /* If we managed to set the bit we got the semaphore. */
8598 swsm = CSR_READ(sc, WMREG_SWSM);
8599 if (swsm & SWSM_SWESMBI)
8600 break;
8601
8602 delay(50);
8603 timeout--;
8604 }
8605
8606 if (timeout == 0) {
8607 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8608 /* Release semaphores */
8609 wm_put_swsm_semaphore(sc);
8610 return 1;
8611 }
8612 return 0;
8613 }
8614
8615 static void
8616 wm_put_swsm_semaphore(struct wm_softc *sc)
8617 {
8618 uint32_t swsm;
8619
8620 swsm = CSR_READ(sc, WMREG_SWSM);
8621 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8622 CSR_WRITE(sc, WMREG_SWSM, swsm);
8623 }
8624
8625 static int
8626 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8627 {
8628 uint32_t swfw_sync;
8629 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8630 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8631 int timeout = 200;
8632
8633 for (timeout = 0; timeout < 200; timeout++) {
8634 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8635 if (wm_get_swsm_semaphore(sc)) {
8636 aprint_error_dev(sc->sc_dev,
8637 "%s: failed to get semaphore\n",
8638 __func__);
8639 return 1;
8640 }
8641 }
8642 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8643 if ((swfw_sync & (swmask | fwmask)) == 0) {
8644 swfw_sync |= swmask;
8645 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8646 if (sc->sc_flags & WM_F_LOCK_SWSM)
8647 wm_put_swsm_semaphore(sc);
8648 return 0;
8649 }
8650 if (sc->sc_flags & WM_F_LOCK_SWSM)
8651 wm_put_swsm_semaphore(sc);
8652 delay(5000);
8653 }
8654 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8655 device_xname(sc->sc_dev), mask, swfw_sync);
8656 return 1;
8657 }
8658
8659 static void
8660 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8661 {
8662 uint32_t swfw_sync;
8663
8664 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8665 while (wm_get_swsm_semaphore(sc) != 0)
8666 continue;
8667 }
8668 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8669 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8670 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8671 if (sc->sc_flags & WM_F_LOCK_SWSM)
8672 wm_put_swsm_semaphore(sc);
8673 }
8674
8675 static int
8676 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8677 {
8678 uint32_t ext_ctrl;
8679 int timeout = 200;
8680
8681 for (timeout = 0; timeout < 200; timeout++) {
8682 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8683 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8684 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8685
8686 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8687 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8688 return 0;
8689 delay(5000);
8690 }
8691 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8692 device_xname(sc->sc_dev), ext_ctrl);
8693 return 1;
8694 }
8695
8696 static void
8697 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8698 {
8699 uint32_t ext_ctrl;
8700 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8701 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8702 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8703 }
8704
8705 static int
8706 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8707 {
8708 int i = 0;
8709 uint32_t reg;
8710
8711 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8712 do {
8713 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8714 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8715 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8716 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8717 break;
8718 delay(2*1000);
8719 i++;
8720 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8721
8722 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8723 wm_put_hw_semaphore_82573(sc);
8724 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8725 device_xname(sc->sc_dev));
8726 return -1;
8727 }
8728
8729 return 0;
8730 }
8731
8732 static void
8733 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8734 {
8735 uint32_t reg;
8736
8737 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8738 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8739 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8740 }
8741
8742 /*
8743 * Management mode and power management related subroutines.
8744 * BMC, AMT, suspend/resume and EEE.
8745 */
8746
8747 static int
8748 wm_check_mng_mode(struct wm_softc *sc)
8749 {
8750 int rv;
8751
8752 switch (sc->sc_type) {
8753 case WM_T_ICH8:
8754 case WM_T_ICH9:
8755 case WM_T_ICH10:
8756 case WM_T_PCH:
8757 case WM_T_PCH2:
8758 case WM_T_PCH_LPT:
8759 rv = wm_check_mng_mode_ich8lan(sc);
8760 break;
8761 case WM_T_82574:
8762 case WM_T_82583:
8763 rv = wm_check_mng_mode_82574(sc);
8764 break;
8765 case WM_T_82571:
8766 case WM_T_82572:
8767 case WM_T_82573:
8768 case WM_T_80003:
8769 rv = wm_check_mng_mode_generic(sc);
8770 break;
8771 default:
8772 /* noting to do */
8773 rv = 0;
8774 break;
8775 }
8776
8777 return rv;
8778 }
8779
8780 static int
8781 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8782 {
8783 uint32_t fwsm;
8784
8785 fwsm = CSR_READ(sc, WMREG_FWSM);
8786
8787 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8788 return 1;
8789
8790 return 0;
8791 }
8792
8793 static int
8794 wm_check_mng_mode_82574(struct wm_softc *sc)
8795 {
8796 uint16_t data;
8797
8798 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8799
8800 if ((data & NVM_CFG2_MNGM_MASK) != 0)
8801 return 1;
8802
8803 return 0;
8804 }
8805
8806 static int
8807 wm_check_mng_mode_generic(struct wm_softc *sc)
8808 {
8809 uint32_t fwsm;
8810
8811 fwsm = CSR_READ(sc, WMREG_FWSM);
8812
8813 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8814 return 1;
8815
8816 return 0;
8817 }
8818
8819 static int
8820 wm_enable_mng_pass_thru(struct wm_softc *sc)
8821 {
8822 uint32_t manc, fwsm, factps;
8823
8824 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8825 return 0;
8826
8827 manc = CSR_READ(sc, WMREG_MANC);
8828
8829 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8830 device_xname(sc->sc_dev), manc));
8831 if ((manc & MANC_RECV_TCO_EN) == 0)
8832 return 0;
8833
8834 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8835 fwsm = CSR_READ(sc, WMREG_FWSM);
8836 factps = CSR_READ(sc, WMREG_FACTPS);
8837 if (((factps & FACTPS_MNGCG) == 0)
8838 && ((fwsm & FWSM_MODE_MASK)
8839 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8840 return 1;
8841 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8842 uint16_t data;
8843
8844 factps = CSR_READ(sc, WMREG_FACTPS);
8845 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8846 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8847 device_xname(sc->sc_dev), factps, data));
8848 if (((factps & FACTPS_MNGCG) == 0)
8849 && ((data & NVM_CFG2_MNGM_MASK)
8850 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
8851 return 1;
8852 } else if (((manc & MANC_SMBUS_EN) != 0)
8853 && ((manc & MANC_ASF_EN) == 0))
8854 return 1;
8855
8856 return 0;
8857 }
8858
8859 static int
8860 wm_check_reset_block(struct wm_softc *sc)
8861 {
8862 uint32_t reg;
8863
8864 switch (sc->sc_type) {
8865 case WM_T_ICH8:
8866 case WM_T_ICH9:
8867 case WM_T_ICH10:
8868 case WM_T_PCH:
8869 case WM_T_PCH2:
8870 case WM_T_PCH_LPT:
8871 reg = CSR_READ(sc, WMREG_FWSM);
8872 if ((reg & FWSM_RSPCIPHY) != 0)
8873 return 0;
8874 else
8875 return -1;
8876 break;
8877 case WM_T_82571:
8878 case WM_T_82572:
8879 case WM_T_82573:
8880 case WM_T_82574:
8881 case WM_T_82583:
8882 case WM_T_80003:
8883 reg = CSR_READ(sc, WMREG_MANC);
8884 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8885 return -1;
8886 else
8887 return 0;
8888 break;
8889 default:
8890 /* no problem */
8891 break;
8892 }
8893
8894 return 0;
8895 }
8896
8897 static void
8898 wm_get_hw_control(struct wm_softc *sc)
8899 {
8900 uint32_t reg;
8901
8902 switch (sc->sc_type) {
8903 case WM_T_82573:
8904 reg = CSR_READ(sc, WMREG_SWSM);
8905 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8906 break;
8907 case WM_T_82571:
8908 case WM_T_82572:
8909 case WM_T_82574:
8910 case WM_T_82583:
8911 case WM_T_80003:
8912 case WM_T_ICH8:
8913 case WM_T_ICH9:
8914 case WM_T_ICH10:
8915 case WM_T_PCH:
8916 case WM_T_PCH2:
8917 case WM_T_PCH_LPT:
8918 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8919 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8920 break;
8921 default:
8922 break;
8923 }
8924 }
8925
8926 static void
8927 wm_release_hw_control(struct wm_softc *sc)
8928 {
8929 uint32_t reg;
8930
8931 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8932 return;
8933
8934 if (sc->sc_type == WM_T_82573) {
8935 reg = CSR_READ(sc, WMREG_SWSM);
8936 reg &= ~SWSM_DRV_LOAD;
8937 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8938 } else {
8939 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8940 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8941 }
8942 }
8943
8944 static void
8945 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8946 {
8947 uint32_t reg;
8948
8949 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8950
8951 if (on != 0)
8952 reg |= EXTCNFCTR_GATE_PHY_CFG;
8953 else
8954 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8955
8956 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8957 }
8958
8959 static void
8960 wm_smbustopci(struct wm_softc *sc)
8961 {
8962 uint32_t fwsm;
8963
8964 fwsm = CSR_READ(sc, WMREG_FWSM);
8965 if (((fwsm & FWSM_FW_VALID) == 0)
8966 && ((wm_check_reset_block(sc) == 0))) {
8967 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8968 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8969 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8970 CSR_WRITE_FLUSH(sc);
8971 delay(10);
8972 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8973 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8974 CSR_WRITE_FLUSH(sc);
8975 delay(50*1000);
8976
8977 /*
8978 * Gate automatic PHY configuration by hardware on non-managed
8979 * 82579
8980 */
8981 if (sc->sc_type == WM_T_PCH2)
8982 wm_gate_hw_phy_config_ich8lan(sc, 1);
8983 }
8984 }
8985
8986 static void
8987 wm_init_manageability(struct wm_softc *sc)
8988 {
8989
8990 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8991 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8992 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8993
8994 /* Disable hardware interception of ARP */
8995 manc &= ~MANC_ARP_EN;
8996
8997 /* Enable receiving management packets to the host */
8998 if (sc->sc_type >= WM_T_82571) {
8999 manc |= MANC_EN_MNG2HOST;
9000 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9001 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9002
9003 }
9004
9005 CSR_WRITE(sc, WMREG_MANC, manc);
9006 }
9007 }
9008
9009 static void
9010 wm_release_manageability(struct wm_softc *sc)
9011 {
9012
9013 if (sc->sc_flags & WM_F_HAS_MANAGE) {
9014 uint32_t manc = CSR_READ(sc, WMREG_MANC);
9015
9016 manc |= MANC_ARP_EN;
9017 if (sc->sc_type >= WM_T_82571)
9018 manc &= ~MANC_EN_MNG2HOST;
9019
9020 CSR_WRITE(sc, WMREG_MANC, manc);
9021 }
9022 }
9023
9024 static void
9025 wm_get_wakeup(struct wm_softc *sc)
9026 {
9027
9028 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9029 switch (sc->sc_type) {
9030 case WM_T_82573:
9031 case WM_T_82583:
9032 sc->sc_flags |= WM_F_HAS_AMT;
9033 /* FALLTHROUGH */
9034 case WM_T_80003:
9035 case WM_T_82541:
9036 case WM_T_82547:
9037 case WM_T_82571:
9038 case WM_T_82572:
9039 case WM_T_82574:
9040 case WM_T_82575:
9041 case WM_T_82576:
9042 case WM_T_82580:
9043 case WM_T_I350:
9044 case WM_T_I354:
9045 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9046 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9047 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9048 break;
9049 case WM_T_ICH8:
9050 case WM_T_ICH9:
9051 case WM_T_ICH10:
9052 case WM_T_PCH:
9053 case WM_T_PCH2:
9054 case WM_T_PCH_LPT:
9055 sc->sc_flags |= WM_F_HAS_AMT;
9056 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9057 break;
9058 default:
9059 break;
9060 }
9061
9062 /* 1: HAS_MANAGE */
9063 if (wm_enable_mng_pass_thru(sc) != 0)
9064 sc->sc_flags |= WM_F_HAS_MANAGE;
9065
9066 #ifdef WM_DEBUG
9067 printf("\n");
9068 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9069 printf("HAS_AMT,");
9070 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9071 printf("ARC_SUBSYS_VALID,");
9072 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9073 printf("ASF_FIRMWARE_PRES,");
9074 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9075 printf("HAS_MANAGE,");
9076 printf("\n");
9077 #endif
9078 /*
9079 * Note that the WOL flags is set after the resetting of the eeprom
9080 * stuff
9081 */
9082 }
9083
9084 #ifdef WM_WOL
9085 /* WOL in the newer chipset interfaces (pchlan) */
9086 static void
9087 wm_enable_phy_wakeup(struct wm_softc *sc)
9088 {
9089 #if 0
9090 uint16_t preg;
9091
9092 /* Copy MAC RARs to PHY RARs */
9093
9094 /* Copy MAC MTA to PHY MTA */
9095
9096 /* Configure PHY Rx Control register */
9097
9098 /* Enable PHY wakeup in MAC register */
9099
9100 /* Configure and enable PHY wakeup in PHY registers */
9101
9102 /* Activate PHY wakeup */
9103
9104 /* XXX */
9105 #endif
9106 }
9107
9108 /* Power down workaround on D3 */
9109 static void
9110 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9111 {
9112 uint32_t reg;
9113 int i;
9114
9115 for (i = 0; i < 2; i++) {
9116 /* Disable link */
9117 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9118 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9119 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9120
9121 /*
9122 * Call gig speed drop workaround on Gig disable before
9123 * accessing any PHY registers
9124 */
9125 if (sc->sc_type == WM_T_ICH8)
9126 wm_gig_downshift_workaround_ich8lan(sc);
9127
9128 /* Write VR power-down enable */
9129 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9130 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9131 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9132 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9133
9134 /* Read it back and test */
9135 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9136 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9137 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9138 break;
9139
9140 /* Issue PHY reset and repeat at most one more time */
9141 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9142 }
9143 }
9144
9145 static void
9146 wm_enable_wakeup(struct wm_softc *sc)
9147 {
9148 uint32_t reg, pmreg;
9149 pcireg_t pmode;
9150
9151 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9152 &pmreg, NULL) == 0)
9153 return;
9154
9155 /* Advertise the wakeup capability */
9156 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9157 | CTRL_SWDPIN(3));
9158 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9159
9160 /* ICH workaround */
9161 switch (sc->sc_type) {
9162 case WM_T_ICH8:
9163 case WM_T_ICH9:
9164 case WM_T_ICH10:
9165 case WM_T_PCH:
9166 case WM_T_PCH2:
9167 case WM_T_PCH_LPT:
9168 /* Disable gig during WOL */
9169 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9170 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9171 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9172 if (sc->sc_type == WM_T_PCH)
9173 wm_gmii_reset(sc);
9174
9175 /* Power down workaround */
9176 if (sc->sc_phytype == WMPHY_82577) {
9177 struct mii_softc *child;
9178
9179 /* Assume that the PHY is copper */
9180 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9181 if (child->mii_mpd_rev <= 2)
9182 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9183 (768 << 5) | 25, 0x0444); /* magic num */
9184 }
9185 break;
9186 default:
9187 break;
9188 }
9189
9190 /* Keep the laser running on fiber adapters */
9191 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9192 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9193 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9194 reg |= CTRL_EXT_SWDPIN(3);
9195 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9196 }
9197
9198 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9199 #if 0 /* for the multicast packet */
9200 reg |= WUFC_MC;
9201 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9202 #endif
9203
9204 if (sc->sc_type == WM_T_PCH) {
9205 wm_enable_phy_wakeup(sc);
9206 } else {
9207 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9208 CSR_WRITE(sc, WMREG_WUFC, reg);
9209 }
9210
9211 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9212 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9213 || (sc->sc_type == WM_T_PCH2))
9214 && (sc->sc_phytype == WMPHY_IGP_3))
9215 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9216
9217 /* Request PME */
9218 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9219 #if 0
9220 /* Disable WOL */
9221 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9222 #else
9223 /* For WOL */
9224 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9225 #endif
9226 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9227 }
9228 #endif /* WM_WOL */
9229
9230 /* EEE */
9231
9232 static void
9233 wm_set_eee_i350(struct wm_softc *sc)
9234 {
9235 uint32_t ipcnfg, eeer;
9236
9237 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9238 eeer = CSR_READ(sc, WMREG_EEER);
9239
9240 if ((sc->sc_flags & WM_F_EEE) != 0) {
9241 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9242 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9243 | EEER_LPI_FC);
9244 } else {
9245 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9246 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9247 | EEER_LPI_FC);
9248 }
9249
9250 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9251 CSR_WRITE(sc, WMREG_EEER, eeer);
9252 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9253 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9254 }
9255
9256 /*
9257 * Workarounds (mainly PHY related).
9258 * Basically, PHY's workarounds are in the PHY drivers.
9259 */
9260
9261 /* Work-around for 82566 Kumeran PCS lock loss */
9262 static void
9263 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9264 {
9265 int miistatus, active, i;
9266 int reg;
9267
9268 miistatus = sc->sc_mii.mii_media_status;
9269
9270 /* If the link is not up, do nothing */
9271 if ((miistatus & IFM_ACTIVE) != 0)
9272 return;
9273
9274 active = sc->sc_mii.mii_media_active;
9275
9276 /* Nothing to do if the link is other than 1Gbps */
9277 if (IFM_SUBTYPE(active) != IFM_1000_T)
9278 return;
9279
9280 for (i = 0; i < 10; i++) {
9281 /* read twice */
9282 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9283 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9284 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9285 goto out; /* GOOD! */
9286
9287 /* Reset the PHY */
9288 wm_gmii_reset(sc);
9289 delay(5*1000);
9290 }
9291
9292 /* Disable GigE link negotiation */
9293 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9294 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9295 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9296
9297 /*
9298 * Call gig speed drop workaround on Gig disable before accessing
9299 * any PHY registers.
9300 */
9301 wm_gig_downshift_workaround_ich8lan(sc);
9302
9303 out:
9304 return;
9305 }
9306
9307 /* WOL from S5 stops working */
9308 static void
9309 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9310 {
9311 uint16_t kmrn_reg;
9312
9313 /* Only for igp3 */
9314 if (sc->sc_phytype == WMPHY_IGP_3) {
9315 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9316 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9317 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9318 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9319 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9320 }
9321 }
9322
9323 /*
9324 * Workaround for pch's PHYs
9325 * XXX should be moved to new PHY driver?
9326 */
9327 static void
9328 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9329 {
9330 if (sc->sc_phytype == WMPHY_82577)
9331 wm_set_mdio_slow_mode_hv(sc);
9332
9333 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9334
9335 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9336
9337 /* 82578 */
9338 if (sc->sc_phytype == WMPHY_82578) {
9339 /* PCH rev. < 3 */
9340 if (sc->sc_rev < 3) {
9341 /* XXX 6 bit shift? Why? Is it page2? */
9342 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9343 0x66c0);
9344 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9345 0xffff);
9346 }
9347
9348 /* XXX phy rev. < 2 */
9349 }
9350
9351 /* Select page 0 */
9352
9353 /* XXX acquire semaphore */
9354 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9355 /* XXX release semaphore */
9356
9357 /*
9358 * Configure the K1 Si workaround during phy reset assuming there is
9359 * link so that it disables K1 if link is in 1Gbps.
9360 */
9361 wm_k1_gig_workaround_hv(sc, 1);
9362 }
9363
9364 static void
9365 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9366 {
9367
9368 wm_set_mdio_slow_mode_hv(sc);
9369 }
9370
9371 static void
9372 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9373 {
9374 int k1_enable = sc->sc_nvm_k1_enabled;
9375
9376 /* XXX acquire semaphore */
9377
9378 if (link) {
9379 k1_enable = 0;
9380
9381 /* Link stall fix for link up */
9382 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9383 } else {
9384 /* Link stall fix for link down */
9385 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9386 }
9387
9388 wm_configure_k1_ich8lan(sc, k1_enable);
9389
9390 /* XXX release semaphore */
9391 }
9392
9393 static void
9394 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9395 {
9396 uint32_t reg;
9397
9398 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9399 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9400 reg | HV_KMRN_MDIO_SLOW);
9401 }
9402
9403 static void
9404 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9405 {
9406 uint32_t ctrl, ctrl_ext, tmp;
9407 uint16_t kmrn_reg;
9408
9409 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9410
9411 if (k1_enable)
9412 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9413 else
9414 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9415
9416 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9417
9418 delay(20);
9419
9420 ctrl = CSR_READ(sc, WMREG_CTRL);
9421 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9422
9423 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9424 tmp |= CTRL_FRCSPD;
9425
9426 CSR_WRITE(sc, WMREG_CTRL, tmp);
9427 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9428 CSR_WRITE_FLUSH(sc);
9429 delay(20);
9430
9431 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9432 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9433 CSR_WRITE_FLUSH(sc);
9434 delay(20);
9435 }
9436
9437 /* special case - for 82575 - need to do manual init ... */
9438 static void
9439 wm_reset_init_script_82575(struct wm_softc *sc)
9440 {
9441 /*
9442 * remark: this is untested code - we have no board without EEPROM
9443 * same setup as mentioned int the freeBSD driver for the i82575
9444 */
9445
9446 /* SerDes configuration via SERDESCTRL */
9447 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9448 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9449 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9450 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9451
9452 /* CCM configuration via CCMCTL register */
9453 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9454 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9455
9456 /* PCIe lanes configuration */
9457 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9458 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9459 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9460 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9461
9462 /* PCIe PLL Configuration */
9463 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9464 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9465 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9466 }
9467