if_wm.c revision 1.283 1 /* $NetBSD: if_wm.c,v 1.283 2014/07/28 06:36:09 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.283 2014/07/28 06:36:09 ozaki-r Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 #ifdef NET_MPSAFE
145 #define WM_MPSAFE 1
146 #endif
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 union {
203 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
204 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
205 } wdc_u;
206 };
207
208 struct wm_control_data_82542 {
209 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
210 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
211 };
212
213 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
214 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
215 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
216
217 /*
218 * Software state for transmit jobs.
219 */
220 struct wm_txsoft {
221 struct mbuf *txs_mbuf; /* head of our mbuf chain */
222 bus_dmamap_t txs_dmamap; /* our DMA map */
223 int txs_firstdesc; /* first descriptor in packet */
224 int txs_lastdesc; /* last descriptor in packet */
225 int txs_ndesc; /* # of descriptors used */
226 };
227
228 /*
229 * Software state for receive buffers. Each descriptor gets a
230 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
231 * more than one buffer, we chain them together.
232 */
233 struct wm_rxsoft {
234 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
235 bus_dmamap_t rxs_dmamap; /* our DMA map */
236 };
237
238 #define WM_LINKUP_TIMEOUT 50
239
240 static uint16_t swfwphysem[] = {
241 SWFW_PHY0_SM,
242 SWFW_PHY1_SM,
243 SWFW_PHY2_SM,
244 SWFW_PHY3_SM
245 };
246
247 /*
248 * Software state per device.
249 */
250 struct wm_softc {
251 device_t sc_dev; /* generic device information */
252 bus_space_tag_t sc_st; /* bus space tag */
253 bus_space_handle_t sc_sh; /* bus space handle */
254 bus_size_t sc_ss; /* bus space size */
255 bus_space_tag_t sc_iot; /* I/O space tag */
256 bus_space_handle_t sc_ioh; /* I/O space handle */
257 bus_size_t sc_ios; /* I/O space size */
258 bus_space_tag_t sc_flasht; /* flash registers space tag */
259 bus_space_handle_t sc_flashh; /* flash registers space handle */
260 bus_dma_tag_t sc_dmat; /* bus DMA tag */
261
262 struct ethercom sc_ethercom; /* ethernet common data */
263 struct mii_data sc_mii; /* MII/media information */
264
265 pci_chipset_tag_t sc_pc;
266 pcitag_t sc_pcitag;
267 int sc_bus_speed; /* PCI/PCIX bus speed */
268 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
269
270 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
271 wm_chip_type sc_type; /* MAC type */
272 int sc_rev; /* MAC revision */
273 wm_phy_type sc_phytype; /* PHY type */
274 int sc_funcid; /* unit number of the chip (0 to 3) */
275 int sc_flags; /* flags; see below */
276 int sc_if_flags; /* last if_flags */
277 int sc_flowflags; /* 802.3x flow control flags */
278 int sc_align_tweak;
279
280 void *sc_ih; /* interrupt cookie */
281 callout_t sc_tick_ch; /* tick callout */
282 bool sc_stopping;
283
284 int sc_ee_addrbits; /* EEPROM address bits */
285 int sc_ich8_flash_base;
286 int sc_ich8_flash_bank_size;
287 int sc_nvm_k1_enabled;
288
289 /* Software state for the transmit and receive descriptors. */
290 int sc_txnum; /* must be a power of two */
291 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
292 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
293
294 /* Control data structures. */
295 int sc_ntxdesc; /* must be a power of two */
296 struct wm_control_data_82544 *sc_control_data;
297 bus_dmamap_t sc_cddmamap; /* control data DMA map */
298 bus_dma_segment_t sc_cd_seg; /* control data segment */
299 int sc_cd_rseg; /* real number of control segment */
300 size_t sc_cd_size; /* control data size */
301 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
302 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
303 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
304 #define sc_rxdescs sc_control_data->wcd_rxdescs
305
306 #ifdef WM_EVENT_COUNTERS
307 /* Event counters. */
308 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
309 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
310 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
311 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
312 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
313 struct evcnt sc_ev_rxintr; /* Rx interrupts */
314 struct evcnt sc_ev_linkintr; /* Link interrupts */
315
316 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
317 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
318 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
319 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
320 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
321 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
322 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
323 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
324
325 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
326 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
327
328 struct evcnt sc_ev_tu; /* Tx underrun */
329
330 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
331 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
332 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
333 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
334 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
335 #endif /* WM_EVENT_COUNTERS */
336
337 bus_addr_t sc_tdt_reg; /* offset of TDT register */
338
339 int sc_txfree; /* number of free Tx descriptors */
340 int sc_txnext; /* next ready Tx descriptor */
341
342 int sc_txsfree; /* number of free Tx jobs */
343 int sc_txsnext; /* next free Tx job */
344 int sc_txsdirty; /* dirty Tx jobs */
345
346 /* These 5 variables are used only on the 82547. */
347 int sc_txfifo_size; /* Tx FIFO size */
348 int sc_txfifo_head; /* current head of FIFO */
349 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
350 int sc_txfifo_stall; /* Tx FIFO is stalled */
351 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
352
353 bus_addr_t sc_rdt_reg; /* offset of RDT register */
354
355 int sc_rxptr; /* next ready Rx descriptor/queue ent */
356 int sc_rxdiscard;
357 int sc_rxlen;
358 struct mbuf *sc_rxhead;
359 struct mbuf *sc_rxtail;
360 struct mbuf **sc_rxtailp;
361
362 uint32_t sc_ctrl; /* prototype CTRL register */
363 #if 0
364 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
365 #endif
366 uint32_t sc_icr; /* prototype interrupt bits */
367 uint32_t sc_itr; /* prototype intr throttling reg */
368 uint32_t sc_tctl; /* prototype TCTL register */
369 uint32_t sc_rctl; /* prototype RCTL register */
370 uint32_t sc_txcw; /* prototype TXCW register */
371 uint32_t sc_tipg; /* prototype TIPG register */
372 uint32_t sc_fcrtl; /* prototype FCRTL register */
373 uint32_t sc_pba; /* prototype PBA register */
374
375 int sc_tbi_linkup; /* TBI link status */
376 int sc_tbi_anegticks; /* autonegotiation ticks */
377 int sc_tbi_ticks; /* tbi ticks */
378 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
379 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
380
381 int sc_mchash_type; /* multicast filter offset */
382
383 krndsource_t rnd_source; /* random source */
384
385 kmutex_t *sc_tx_lock; /* lock for tx operations */
386 kmutex_t *sc_rx_lock; /* lock for rx operations */
387 };
388
389 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
390 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
391 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
392 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
393 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
394 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
395 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
396 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
397 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
398
399 #ifdef WM_MPSAFE
400 #define CALLOUT_FLAGS CALLOUT_MPSAFE
401 #else
402 #define CALLOUT_FLAGS 0
403 #endif
404
405 #define WM_RXCHAIN_RESET(sc) \
406 do { \
407 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
408 *(sc)->sc_rxtailp = NULL; \
409 (sc)->sc_rxlen = 0; \
410 } while (/*CONSTCOND*/0)
411
412 #define WM_RXCHAIN_LINK(sc, m) \
413 do { \
414 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
415 (sc)->sc_rxtailp = &(m)->m_next; \
416 } while (/*CONSTCOND*/0)
417
418 #ifdef WM_EVENT_COUNTERS
419 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
420 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
421 #else
422 #define WM_EVCNT_INCR(ev) /* nothing */
423 #define WM_EVCNT_ADD(ev, val) /* nothing */
424 #endif
425
426 #define CSR_READ(sc, reg) \
427 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
428 #define CSR_WRITE(sc, reg, val) \
429 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
430 #define CSR_WRITE_FLUSH(sc) \
431 (void) CSR_READ((sc), WMREG_STATUS)
432
433 #define ICH8_FLASH_READ32(sc, reg) \
434 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
435 #define ICH8_FLASH_WRITE32(sc, reg, data) \
436 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
437
438 #define ICH8_FLASH_READ16(sc, reg) \
439 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
440 #define ICH8_FLASH_WRITE16(sc, reg, data) \
441 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
442
443 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
444 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
445
446 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
447 #define WM_CDTXADDR_HI(sc, x) \
448 (sizeof(bus_addr_t) == 8 ? \
449 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
450
451 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
452 #define WM_CDRXADDR_HI(sc, x) \
453 (sizeof(bus_addr_t) == 8 ? \
454 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
455
456 #define WM_CDTXSYNC(sc, x, n, ops) \
457 do { \
458 int __x, __n; \
459 \
460 __x = (x); \
461 __n = (n); \
462 \
463 /* If it will wrap around, sync to the end of the ring. */ \
464 if ((__x + __n) > WM_NTXDESC(sc)) { \
465 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
466 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
467 (WM_NTXDESC(sc) - __x), (ops)); \
468 __n -= (WM_NTXDESC(sc) - __x); \
469 __x = 0; \
470 } \
471 \
472 /* Now sync whatever is left. */ \
473 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
474 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
475 } while (/*CONSTCOND*/0)
476
477 #define WM_CDRXSYNC(sc, x, ops) \
478 do { \
479 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
480 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
481 } while (/*CONSTCOND*/0)
482
483 #define WM_INIT_RXDESC(sc, x) \
484 do { \
485 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
486 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
487 struct mbuf *__m = __rxs->rxs_mbuf; \
488 \
489 /* \
490 * Note: We scoot the packet forward 2 bytes in the buffer \
491 * so that the payload after the Ethernet header is aligned \
492 * to a 4-byte boundary. \
493 * \
494 * XXX BRAINDAMAGE ALERT! \
495 * The stupid chip uses the same size for every buffer, which \
496 * is set in the Receive Control register. We are using the 2K \
497 * size option, but what we REALLY want is (2K - 2)! For this \
498 * reason, we can't "scoot" packets longer than the standard \
499 * Ethernet MTU. On strict-alignment platforms, if the total \
500 * size exceeds (2K - 2) we set align_tweak to 0 and let \
501 * the upper layer copy the headers. \
502 */ \
503 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
504 \
505 wm_set_dma_addr(&__rxd->wrx_addr, \
506 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
507 __rxd->wrx_len = 0; \
508 __rxd->wrx_cksum = 0; \
509 __rxd->wrx_status = 0; \
510 __rxd->wrx_errors = 0; \
511 __rxd->wrx_special = 0; \
512 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
513 \
514 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
515 } while (/*CONSTCOND*/0)
516
517 /*
518 * Register read/write functions.
519 * Other than CSR_{READ|WRITE}().
520 */
521 #if 0
522 static inline uint32_t wm_io_read(struct wm_softc *, int);
523 #endif
524 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
525 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
526 uint32_t, uint32_t);
527 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
528
529 /*
530 * Device driver interface functions and commonly used functions.
531 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
532 */
533 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
534 static int wm_match(device_t, cfdata_t, void *);
535 static void wm_attach(device_t, device_t, void *);
536 static int wm_detach(device_t, int);
537 static bool wm_suspend(device_t, const pmf_qual_t *);
538 static bool wm_resume(device_t, const pmf_qual_t *);
539 static void wm_watchdog(struct ifnet *);
540 static void wm_tick(void *);
541 static int wm_ifflags_cb(struct ethercom *);
542 static int wm_ioctl(struct ifnet *, u_long, void *);
543 /* MAC address related */
544 static int wm_check_alt_mac_addr(struct wm_softc *);
545 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
546 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
547 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
548 static void wm_set_filter(struct wm_softc *);
549 /* Reset and init related */
550 static void wm_set_vlan(struct wm_softc *);
551 static void wm_set_pcie_completion_timeout(struct wm_softc *);
552 static void wm_get_auto_rd_done(struct wm_softc *);
553 static void wm_lan_init_done(struct wm_softc *);
554 static void wm_get_cfg_done(struct wm_softc *);
555 static void wm_reset(struct wm_softc *);
556 static int wm_add_rxbuf(struct wm_softc *, int);
557 static void wm_rxdrain(struct wm_softc *);
558 static int wm_init(struct ifnet *);
559 static int wm_init_locked(struct ifnet *);
560 static void wm_stop(struct ifnet *, int);
561 static void wm_stop_locked(struct ifnet *, int);
562 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
563 uint32_t *, uint8_t *);
564 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
565 static void wm_82547_txfifo_stall(void *);
566 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
567 /* Start */
568 static void wm_start(struct ifnet *);
569 static void wm_start_locked(struct ifnet *);
570 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
571 uint32_t *, uint32_t *, bool *);
572 static void wm_nq_start(struct ifnet *);
573 static void wm_nq_start_locked(struct ifnet *);
574 /* Interrupt */
575 static void wm_txintr(struct wm_softc *);
576 static void wm_rxintr(struct wm_softc *);
577 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
578 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
579 static void wm_linkintr(struct wm_softc *, uint32_t);
580 static int wm_intr(void *);
581
582 /*
583 * Media related.
584 * GMII, SGMII, TBI (and SERDES)
585 */
586 /* GMII related */
587 static void wm_gmii_reset(struct wm_softc *);
588 static int wm_get_phy_id_82575(struct wm_softc *);
589 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
590 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
591 static int wm_gmii_mediachange(struct ifnet *);
592 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
593 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
594 static int wm_gmii_i82543_readreg(device_t, int, int);
595 static void wm_gmii_i82543_writereg(device_t, int, int, int);
596 static int wm_gmii_i82544_readreg(device_t, int, int);
597 static void wm_gmii_i82544_writereg(device_t, int, int, int);
598 static int wm_gmii_i80003_readreg(device_t, int, int);
599 static void wm_gmii_i80003_writereg(device_t, int, int, int);
600 static int wm_gmii_bm_readreg(device_t, int, int);
601 static void wm_gmii_bm_writereg(device_t, int, int, int);
602 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
603 static int wm_gmii_hv_readreg(device_t, int, int);
604 static void wm_gmii_hv_writereg(device_t, int, int, int);
605 static int wm_gmii_82580_readreg(device_t, int, int);
606 static void wm_gmii_82580_writereg(device_t, int, int, int);
607 static void wm_gmii_statchg(struct ifnet *);
608 static int wm_kmrn_readreg(struct wm_softc *, int);
609 static void wm_kmrn_writereg(struct wm_softc *, int, int);
610 /* SGMII */
611 static bool wm_sgmii_uses_mdio(struct wm_softc *);
612 static int wm_sgmii_readreg(device_t, int, int);
613 static void wm_sgmii_writereg(device_t, int, int, int);
614 /* TBI related */
615 static int wm_check_for_link(struct wm_softc *);
616 static void wm_tbi_mediainit(struct wm_softc *);
617 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
618 static int wm_tbi_mediachange(struct ifnet *);
619 static void wm_tbi_set_linkled(struct wm_softc *);
620 static void wm_tbi_check_link(struct wm_softc *);
621
622 /*
623 * NVM related.
624 * Microwire, SPI (w/wo EERD) and Flash.
625 */
626 /* Both spi and uwire */
627 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
628 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
629 /* Microwire */
630 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
631 /* SPI */
632 static void wm_set_spiaddrbits(struct wm_softc *);
633 static int wm_nvm_ready_spi(struct wm_softc *);
634 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
635 /* Using with EERD */
636 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
637 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
638 /* Flash */
639 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
640 unsigned int *);
641 static int32_t wm_ich8_cycle_init(struct wm_softc *);
642 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
643 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
644 uint16_t *);
645 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
646 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
647 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
648 /* Lock, detecting NVM type, validate checksum and read */
649 static int wm_nvm_acquire(struct wm_softc *);
650 static void wm_nvm_release(struct wm_softc *);
651 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
652 static int wm_nvm_validate_checksum(struct wm_softc *);
653 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
654
655 /*
656 * Hardware semaphores.
657 * Very complexed...
658 */
659 static int wm_get_swsm_semaphore(struct wm_softc *);
660 static void wm_put_swsm_semaphore(struct wm_softc *);
661 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
662 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
663 static int wm_get_swfwhw_semaphore(struct wm_softc *);
664 static void wm_put_swfwhw_semaphore(struct wm_softc *);
665 static int wm_get_hw_semaphore_82573(struct wm_softc *);
666 static void wm_put_hw_semaphore_82573(struct wm_softc *);
667
668 /*
669 * Management mode and power management related subroutines.
670 * BMC, AMT, suspend/resume and EEE.
671 */
672 static int wm_check_mng_mode(struct wm_softc *);
673 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
674 static int wm_check_mng_mode_82574(struct wm_softc *);
675 static int wm_check_mng_mode_generic(struct wm_softc *);
676 static int wm_enable_mng_pass_thru(struct wm_softc *);
677 static int wm_check_reset_block(struct wm_softc *);
678 static void wm_get_hw_control(struct wm_softc *);
679 static void wm_release_hw_control(struct wm_softc *);
680 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
681 static void wm_smbustopci(struct wm_softc *);
682 static void wm_init_manageability(struct wm_softc *);
683 static void wm_release_manageability(struct wm_softc *);
684 static void wm_get_wakeup(struct wm_softc *);
685 #ifdef WM_WOL
686 static void wm_enable_phy_wakeup(struct wm_softc *);
687 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
688 static void wm_enable_wakeup(struct wm_softc *);
689 #endif
690 /* EEE */
691 static void wm_set_eee_i350(struct wm_softc *);
692
693 /*
694 * Workarounds (mainly PHY related).
695 * Basically, PHY's workarounds are in the PHY drivers.
696 */
697 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
698 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
699 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
700 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
701 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
702 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
703 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
704 static void wm_reset_init_script_82575(struct wm_softc *);
705
706 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
707 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
708
709 /*
710 * Devices supported by this driver.
711 */
712 static const struct wm_product {
713 pci_vendor_id_t wmp_vendor;
714 pci_product_id_t wmp_product;
715 const char *wmp_name;
716 wm_chip_type wmp_type;
717 int wmp_flags;
718 #define WMP_F_1000X 0x01
719 #define WMP_F_1000T 0x02
720 #define WMP_F_SERDES 0x04
721 } wm_products[] = {
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
723 "Intel i82542 1000BASE-X Ethernet",
724 WM_T_82542_2_1, WMP_F_1000X },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
727 "Intel i82543GC 1000BASE-X Ethernet",
728 WM_T_82543, WMP_F_1000X },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
731 "Intel i82543GC 1000BASE-T Ethernet",
732 WM_T_82543, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
735 "Intel i82544EI 1000BASE-T Ethernet",
736 WM_T_82544, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
739 "Intel i82544EI 1000BASE-X Ethernet",
740 WM_T_82544, WMP_F_1000X },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
743 "Intel i82544GC 1000BASE-T Ethernet",
744 WM_T_82544, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
747 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
748 WM_T_82544, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
751 "Intel i82540EM 1000BASE-T Ethernet",
752 WM_T_82540, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
755 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
756 WM_T_82540, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
759 "Intel i82540EP 1000BASE-T Ethernet",
760 WM_T_82540, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
763 "Intel i82540EP 1000BASE-T Ethernet",
764 WM_T_82540, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
767 "Intel i82540EP 1000BASE-T Ethernet",
768 WM_T_82540, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
771 "Intel i82545EM 1000BASE-T Ethernet",
772 WM_T_82545, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
775 "Intel i82545GM 1000BASE-T Ethernet",
776 WM_T_82545_3, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
779 "Intel i82545GM 1000BASE-X Ethernet",
780 WM_T_82545_3, WMP_F_1000X },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
783 "Intel i82545GM Gigabit Ethernet (SERDES)",
784 WM_T_82545_3, WMP_F_SERDES },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
787 "Intel i82546EB 1000BASE-T Ethernet",
788 WM_T_82546, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
791 "Intel i82546EB 1000BASE-T Ethernet",
792 WM_T_82546, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
795 "Intel i82545EM 1000BASE-X Ethernet",
796 WM_T_82545, WMP_F_1000X },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
799 "Intel i82546EB 1000BASE-X Ethernet",
800 WM_T_82546, WMP_F_1000X },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
803 "Intel i82546GB 1000BASE-T Ethernet",
804 WM_T_82546_3, WMP_F_1000T },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
807 "Intel i82546GB 1000BASE-X Ethernet",
808 WM_T_82546_3, WMP_F_1000X },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
811 "Intel i82546GB Gigabit Ethernet (SERDES)",
812 WM_T_82546_3, WMP_F_SERDES },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
815 "i82546GB quad-port Gigabit Ethernet",
816 WM_T_82546_3, WMP_F_1000T },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
819 "i82546GB quad-port Gigabit Ethernet (KSP3)",
820 WM_T_82546_3, WMP_F_1000T },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
823 "Intel PRO/1000MT (82546GB)",
824 WM_T_82546_3, WMP_F_1000T },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
827 "Intel i82541EI 1000BASE-T Ethernet",
828 WM_T_82541, WMP_F_1000T },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
831 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
832 WM_T_82541, WMP_F_1000T },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
835 "Intel i82541EI Mobile 1000BASE-T Ethernet",
836 WM_T_82541, WMP_F_1000T },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
839 "Intel i82541ER 1000BASE-T Ethernet",
840 WM_T_82541_2, WMP_F_1000T },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
843 "Intel i82541GI 1000BASE-T Ethernet",
844 WM_T_82541_2, WMP_F_1000T },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
847 "Intel i82541GI Mobile 1000BASE-T Ethernet",
848 WM_T_82541_2, WMP_F_1000T },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
851 "Intel i82541PI 1000BASE-T Ethernet",
852 WM_T_82541_2, WMP_F_1000T },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
855 "Intel i82547EI 1000BASE-T Ethernet",
856 WM_T_82547, WMP_F_1000T },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
859 "Intel i82547EI Mobile 1000BASE-T Ethernet",
860 WM_T_82547, WMP_F_1000T },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
863 "Intel i82547GI 1000BASE-T Ethernet",
864 WM_T_82547_2, WMP_F_1000T },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
867 "Intel PRO/1000 PT (82571EB)",
868 WM_T_82571, WMP_F_1000T },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
871 "Intel PRO/1000 PF (82571EB)",
872 WM_T_82571, WMP_F_1000X },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
875 "Intel PRO/1000 PB (82571EB)",
876 WM_T_82571, WMP_F_SERDES },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
879 "Intel PRO/1000 QT (82571EB)",
880 WM_T_82571, WMP_F_1000T },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
883 "Intel i82572EI 1000baseT Ethernet",
884 WM_T_82572, WMP_F_1000T },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
887 "Intel PRO/1000 PT Quad Port Server Adapter",
888 WM_T_82571, WMP_F_1000T, },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
891 "Intel i82572EI 1000baseX Ethernet",
892 WM_T_82572, WMP_F_1000X },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
895 "Intel i82572EI Gigabit Ethernet (SERDES)",
896 WM_T_82572, WMP_F_SERDES },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
899 "Intel i82572EI 1000baseT Ethernet",
900 WM_T_82572, WMP_F_1000T },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
903 "Intel i82573E",
904 WM_T_82573, WMP_F_1000T },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
907 "Intel i82573E IAMT",
908 WM_T_82573, WMP_F_1000T },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
911 "Intel i82573L Gigabit Ethernet",
912 WM_T_82573, WMP_F_1000T },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
915 "Intel i82574L",
916 WM_T_82574, WMP_F_1000T },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
919 "Intel i82583V",
920 WM_T_82583, WMP_F_1000T },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
923 "i80003 dual 1000baseT Ethernet",
924 WM_T_80003, WMP_F_1000T },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
927 "i80003 dual 1000baseX Ethernet",
928 WM_T_80003, WMP_F_1000T },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
931 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
932 WM_T_80003, WMP_F_SERDES },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
935 "Intel i80003 1000baseT Ethernet",
936 WM_T_80003, WMP_F_1000T },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
939 "Intel i80003 Gigabit Ethernet (SERDES)",
940 WM_T_80003, WMP_F_SERDES },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
943 "Intel i82801H (M_AMT) LAN Controller",
944 WM_T_ICH8, WMP_F_1000T },
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
946 "Intel i82801H (AMT) LAN Controller",
947 WM_T_ICH8, WMP_F_1000T },
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
949 "Intel i82801H LAN Controller",
950 WM_T_ICH8, WMP_F_1000T },
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
952 "Intel i82801H (IFE) LAN Controller",
953 WM_T_ICH8, WMP_F_1000T },
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
955 "Intel i82801H (M) LAN Controller",
956 WM_T_ICH8, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
958 "Intel i82801H IFE (GT) LAN Controller",
959 WM_T_ICH8, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
961 "Intel i82801H IFE (G) LAN Controller",
962 WM_T_ICH8, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
964 "82801I (AMT) LAN Controller",
965 WM_T_ICH9, WMP_F_1000T },
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
967 "82801I LAN Controller",
968 WM_T_ICH9, WMP_F_1000T },
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
970 "82801I (G) LAN Controller",
971 WM_T_ICH9, WMP_F_1000T },
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
973 "82801I (GT) LAN Controller",
974 WM_T_ICH9, WMP_F_1000T },
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
976 "82801I (C) LAN Controller",
977 WM_T_ICH9, WMP_F_1000T },
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
979 "82801I mobile LAN Controller",
980 WM_T_ICH9, WMP_F_1000T },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
982 "82801I mobile (V) LAN Controller",
983 WM_T_ICH9, WMP_F_1000T },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
985 "82801I mobile (AMT) LAN Controller",
986 WM_T_ICH9, WMP_F_1000T },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
988 "82567LM-4 LAN Controller",
989 WM_T_ICH9, WMP_F_1000T },
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
991 "82567V-3 LAN Controller",
992 WM_T_ICH9, WMP_F_1000T },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
994 "82567LM-2 LAN Controller",
995 WM_T_ICH10, WMP_F_1000T },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
997 "82567LF-2 LAN Controller",
998 WM_T_ICH10, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1000 "82567LM-3 LAN Controller",
1001 WM_T_ICH10, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1003 "82567LF-3 LAN Controller",
1004 WM_T_ICH10, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1006 "82567V-2 LAN Controller",
1007 WM_T_ICH10, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1009 "82567V-3? LAN Controller",
1010 WM_T_ICH10, WMP_F_1000T },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1012 "HANKSVILLE LAN Controller",
1013 WM_T_ICH10, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1015 "PCH LAN (82577LM) Controller",
1016 WM_T_PCH, WMP_F_1000T },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1018 "PCH LAN (82577LC) Controller",
1019 WM_T_PCH, WMP_F_1000T },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1021 "PCH LAN (82578DM) Controller",
1022 WM_T_PCH, WMP_F_1000T },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1024 "PCH LAN (82578DC) Controller",
1025 WM_T_PCH, WMP_F_1000T },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1027 "PCH2 LAN (82579LM) Controller",
1028 WM_T_PCH2, WMP_F_1000T },
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1030 "PCH2 LAN (82579V) Controller",
1031 WM_T_PCH2, WMP_F_1000T },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1033 "82575EB dual-1000baseT Ethernet",
1034 WM_T_82575, WMP_F_1000T },
1035 #if 0
1036 /*
1037 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
1038 * disabled for now ...
1039 */
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1041 "82575EB dual-1000baseX Ethernet (SERDES)",
1042 WM_T_82575, WMP_F_SERDES },
1043 #endif
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1045 "82575GB quad-1000baseT Ethernet",
1046 WM_T_82575, WMP_F_1000T },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1048 "82575GB quad-1000baseT Ethernet (PM)",
1049 WM_T_82575, WMP_F_1000T },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1051 "82576 1000BaseT Ethernet",
1052 WM_T_82576, WMP_F_1000T },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1054 "82576 1000BaseX Ethernet",
1055 WM_T_82576, WMP_F_1000X },
1056
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1058 "82576 gigabit Ethernet (SERDES)",
1059 WM_T_82576, WMP_F_SERDES },
1060
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1062 "82576 quad-1000BaseT Ethernet",
1063 WM_T_82576, WMP_F_1000T },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1065 "82576 gigabit Ethernet",
1066 WM_T_82576, WMP_F_1000T },
1067
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1069 "82576 gigabit Ethernet (SERDES)",
1070 WM_T_82576, WMP_F_SERDES },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1072 "82576 quad-gigabit Ethernet (SERDES)",
1073 WM_T_82576, WMP_F_SERDES },
1074
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1076 "82580 1000BaseT Ethernet",
1077 WM_T_82580, WMP_F_1000T },
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1079 "82580 1000BaseX Ethernet",
1080 WM_T_82580, WMP_F_1000X },
1081
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1083 "82580 1000BaseT Ethernet (SERDES)",
1084 WM_T_82580, WMP_F_SERDES },
1085
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1087 "82580 gigabit Ethernet (SGMII)",
1088 WM_T_82580, WMP_F_1000T },
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1090 "82580 dual-1000BaseT Ethernet",
1091 WM_T_82580, WMP_F_1000T },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1093 "82580 1000BaseT Ethernet",
1094 WM_T_82580ER, WMP_F_1000T },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1096 "82580 dual-1000BaseT Ethernet",
1097 WM_T_82580ER, WMP_F_1000T },
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1099 "82580 quad-1000BaseX Ethernet",
1100 WM_T_82580, WMP_F_1000X },
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1102 "I350 Gigabit Network Connection",
1103 WM_T_I350, WMP_F_1000T },
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1105 "I350 Gigabit Fiber Network Connection",
1106 WM_T_I350, WMP_F_1000X },
1107
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1109 "I350 Gigabit Backplane Connection",
1110 WM_T_I350, WMP_F_SERDES },
1111 #if 0
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1113 "I350 Gigabit Connection",
1114 WM_T_I350, WMP_F_1000T },
1115 #endif
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1117 "I354 Gigabit Connection",
1118 WM_T_I354, WMP_F_1000T },
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1120 "I210-T1 Ethernet Server Adapter",
1121 WM_T_I210, WMP_F_1000T },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1123 "I210 Ethernet (Copper OEM)",
1124 WM_T_I210, WMP_F_1000T },
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1126 "I210 Ethernet (Copper IT)",
1127 WM_T_I210, WMP_F_1000T },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1129 "I210 Gigabit Ethernet (Fiber)",
1130 WM_T_I210, WMP_F_1000X },
1131
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1133 "I210 Gigabit Ethernet (SERDES)",
1134 WM_T_I210, WMP_F_SERDES },
1135 #if 0
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1137 "I210 Gigabit Ethernet (SGMII)",
1138 WM_T_I210, WMP_F_SERDES },
1139 #endif
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1141 "I211 Ethernet (COPPER)",
1142 WM_T_I211, WMP_F_1000T },
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1144 "I217 V Ethernet Connection",
1145 WM_T_PCH_LPT, WMP_F_1000T },
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1147 "I217 LM Ethernet Connection",
1148 WM_T_PCH_LPT, WMP_F_1000T },
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1150 "I218 V Ethernet Connection",
1151 WM_T_PCH_LPT, WMP_F_1000T },
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1153 "I218 LM Ethernet Connection",
1154 WM_T_PCH_LPT, WMP_F_1000T },
1155 { 0, 0,
1156 NULL,
1157 0, 0 },
1158 };
1159
1160 #ifdef WM_EVENT_COUNTERS
1161 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1162 #endif /* WM_EVENT_COUNTERS */
1163
1164
1165 /*
1166 * Register read/write functions.
1167 * Other than CSR_{READ|WRITE}().
1168 */
1169
1170 #if 0 /* Not currently used */
1171 static inline uint32_t
1172 wm_io_read(struct wm_softc *sc, int reg)
1173 {
1174
1175 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1176 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1177 }
1178 #endif
1179
1180 static inline void
1181 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1182 {
1183
1184 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1185 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1186 }
1187
1188 static inline void
1189 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1190 uint32_t data)
1191 {
1192 uint32_t regval;
1193 int i;
1194
1195 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1196
1197 CSR_WRITE(sc, reg, regval);
1198
1199 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1200 delay(5);
1201 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1202 break;
1203 }
1204 if (i == SCTL_CTL_POLL_TIMEOUT) {
1205 aprint_error("%s: WARNING:"
1206 " i82575 reg 0x%08x setup did not indicate ready\n",
1207 device_xname(sc->sc_dev), reg);
1208 }
1209 }
1210
1211 static inline void
1212 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1213 {
1214 wa->wa_low = htole32(v & 0xffffffffU);
1215 if (sizeof(bus_addr_t) == 8)
1216 wa->wa_high = htole32((uint64_t) v >> 32);
1217 else
1218 wa->wa_high = 0;
1219 }
1220
1221 /*
1222 * Device driver interface functions and commonly used functions.
1223 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1224 */
1225
1226 /* Lookup supported device table */
1227 static const struct wm_product *
1228 wm_lookup(const struct pci_attach_args *pa)
1229 {
1230 const struct wm_product *wmp;
1231
1232 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1233 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1234 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1235 return wmp;
1236 }
1237 return NULL;
1238 }
1239
1240 /* The match function (ca_match) */
1241 static int
1242 wm_match(device_t parent, cfdata_t cf, void *aux)
1243 {
1244 struct pci_attach_args *pa = aux;
1245
1246 if (wm_lookup(pa) != NULL)
1247 return 1;
1248
1249 return 0;
1250 }
1251
1252 /* The attach function (ca_attach) */
1253 static void
1254 wm_attach(device_t parent, device_t self, void *aux)
1255 {
1256 struct wm_softc *sc = device_private(self);
1257 struct pci_attach_args *pa = aux;
1258 prop_dictionary_t dict;
1259 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1260 pci_chipset_tag_t pc = pa->pa_pc;
1261 pci_intr_handle_t ih;
1262 const char *intrstr = NULL;
1263 const char *eetype, *xname;
1264 bus_space_tag_t memt;
1265 bus_space_handle_t memh;
1266 bus_size_t memsize;
1267 int memh_valid;
1268 int i, error;
1269 const struct wm_product *wmp;
1270 prop_data_t ea;
1271 prop_number_t pn;
1272 uint8_t enaddr[ETHER_ADDR_LEN];
1273 uint16_t cfg1, cfg2, swdpin, io3;
1274 pcireg_t preg, memtype;
1275 uint16_t eeprom_data, apme_mask;
1276 bool force_clear_smbi;
1277 uint32_t reg;
1278 char intrbuf[PCI_INTRSTR_LEN];
1279
1280 sc->sc_dev = self;
1281 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1282 sc->sc_stopping = false;
1283
1284 sc->sc_wmp = wmp = wm_lookup(pa);
1285 if (wmp == NULL) {
1286 printf("\n");
1287 panic("wm_attach: impossible");
1288 }
1289
1290 sc->sc_pc = pa->pa_pc;
1291 sc->sc_pcitag = pa->pa_tag;
1292
1293 if (pci_dma64_available(pa))
1294 sc->sc_dmat = pa->pa_dmat64;
1295 else
1296 sc->sc_dmat = pa->pa_dmat;
1297
1298 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1299 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1300
1301 sc->sc_type = wmp->wmp_type;
1302 if (sc->sc_type < WM_T_82543) {
1303 if (sc->sc_rev < 2) {
1304 aprint_error_dev(sc->sc_dev,
1305 "i82542 must be at least rev. 2\n");
1306 return;
1307 }
1308 if (sc->sc_rev < 3)
1309 sc->sc_type = WM_T_82542_2_0;
1310 }
1311
1312 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1313 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1314 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1315 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1316 sc->sc_flags |= WM_F_NEWQUEUE;
1317
1318 /* Set device properties (mactype) */
1319 dict = device_properties(sc->sc_dev);
1320 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1321
1322 /*
1323 * Map the device. All devices support memory-mapped acccess,
1324 * and it is really required for normal operation.
1325 */
1326 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1327 switch (memtype) {
1328 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1329 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1330 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1331 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1332 break;
1333 default:
1334 memh_valid = 0;
1335 break;
1336 }
1337
1338 if (memh_valid) {
1339 sc->sc_st = memt;
1340 sc->sc_sh = memh;
1341 sc->sc_ss = memsize;
1342 } else {
1343 aprint_error_dev(sc->sc_dev,
1344 "unable to map device registers\n");
1345 return;
1346 }
1347
1348 /*
1349 * In addition, i82544 and later support I/O mapped indirect
1350 * register access. It is not desirable (nor supported in
1351 * this driver) to use it for normal operation, though it is
1352 * required to work around bugs in some chip versions.
1353 */
1354 if (sc->sc_type >= WM_T_82544) {
1355 /* First we have to find the I/O BAR. */
1356 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1357 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1358 if (memtype == PCI_MAPREG_TYPE_IO)
1359 break;
1360 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1361 PCI_MAPREG_MEM_TYPE_64BIT)
1362 i += 4; /* skip high bits, too */
1363 }
1364 if (i < PCI_MAPREG_END) {
1365 /*
1366 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1367 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1368 * It's no problem because newer chips has no this
1369 * bug.
1370 *
1371 * The i8254x doesn't apparently respond when the
1372 * I/O BAR is 0, which looks somewhat like it's not
1373 * been configured.
1374 */
1375 preg = pci_conf_read(pc, pa->pa_tag, i);
1376 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1377 aprint_error_dev(sc->sc_dev,
1378 "WARNING: I/O BAR at zero.\n");
1379 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1380 0, &sc->sc_iot, &sc->sc_ioh,
1381 NULL, &sc->sc_ios) == 0) {
1382 sc->sc_flags |= WM_F_IOH_VALID;
1383 } else {
1384 aprint_error_dev(sc->sc_dev,
1385 "WARNING: unable to map I/O space\n");
1386 }
1387 }
1388
1389 }
1390
1391 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1392 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1393 preg |= PCI_COMMAND_MASTER_ENABLE;
1394 if (sc->sc_type < WM_T_82542_2_1)
1395 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1396 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1397
1398 /* power up chip */
1399 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1400 NULL)) && error != EOPNOTSUPP) {
1401 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1402 return;
1403 }
1404
1405 /*
1406 * Map and establish our interrupt.
1407 */
1408 if (pci_intr_map(pa, &ih)) {
1409 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1410 return;
1411 }
1412 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1413 #ifdef WM_MPSAFE
1414 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1415 #endif
1416 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1417 if (sc->sc_ih == NULL) {
1418 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1419 if (intrstr != NULL)
1420 aprint_error(" at %s", intrstr);
1421 aprint_error("\n");
1422 return;
1423 }
1424 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1425
1426 /*
1427 * Check the function ID (unit number of the chip).
1428 */
1429 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1430 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1431 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1432 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1433 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1434 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1435 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1436 else
1437 sc->sc_funcid = 0;
1438
1439 /*
1440 * Determine a few things about the bus we're connected to.
1441 */
1442 if (sc->sc_type < WM_T_82543) {
1443 /* We don't really know the bus characteristics here. */
1444 sc->sc_bus_speed = 33;
1445 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1446 /*
1447 * CSA (Communication Streaming Architecture) is about as fast
1448 * a 32-bit 66MHz PCI Bus.
1449 */
1450 sc->sc_flags |= WM_F_CSA;
1451 sc->sc_bus_speed = 66;
1452 aprint_verbose_dev(sc->sc_dev,
1453 "Communication Streaming Architecture\n");
1454 if (sc->sc_type == WM_T_82547) {
1455 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1456 callout_setfunc(&sc->sc_txfifo_ch,
1457 wm_82547_txfifo_stall, sc);
1458 aprint_verbose_dev(sc->sc_dev,
1459 "using 82547 Tx FIFO stall work-around\n");
1460 }
1461 } else if (sc->sc_type >= WM_T_82571) {
1462 sc->sc_flags |= WM_F_PCIE;
1463 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1464 && (sc->sc_type != WM_T_ICH10)
1465 && (sc->sc_type != WM_T_PCH)
1466 && (sc->sc_type != WM_T_PCH2)
1467 && (sc->sc_type != WM_T_PCH_LPT)) {
1468 /* ICH* and PCH* have no PCIe capability registers */
1469 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1470 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1471 NULL) == 0)
1472 aprint_error_dev(sc->sc_dev,
1473 "unable to find PCIe capability\n");
1474 }
1475 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1476 } else {
1477 reg = CSR_READ(sc, WMREG_STATUS);
1478 if (reg & STATUS_BUS64)
1479 sc->sc_flags |= WM_F_BUS64;
1480 if ((reg & STATUS_PCIX_MODE) != 0) {
1481 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1482
1483 sc->sc_flags |= WM_F_PCIX;
1484 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1485 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1486 aprint_error_dev(sc->sc_dev,
1487 "unable to find PCIX capability\n");
1488 else if (sc->sc_type != WM_T_82545_3 &&
1489 sc->sc_type != WM_T_82546_3) {
1490 /*
1491 * Work around a problem caused by the BIOS
1492 * setting the max memory read byte count
1493 * incorrectly.
1494 */
1495 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1496 sc->sc_pcixe_capoff + PCIX_CMD);
1497 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1498 sc->sc_pcixe_capoff + PCIX_STATUS);
1499
1500 bytecnt =
1501 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1502 PCIX_CMD_BYTECNT_SHIFT;
1503 maxb =
1504 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1505 PCIX_STATUS_MAXB_SHIFT;
1506 if (bytecnt > maxb) {
1507 aprint_verbose_dev(sc->sc_dev,
1508 "resetting PCI-X MMRBC: %d -> %d\n",
1509 512 << bytecnt, 512 << maxb);
1510 pcix_cmd = (pcix_cmd &
1511 ~PCIX_CMD_BYTECNT_MASK) |
1512 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1513 pci_conf_write(pa->pa_pc, pa->pa_tag,
1514 sc->sc_pcixe_capoff + PCIX_CMD,
1515 pcix_cmd);
1516 }
1517 }
1518 }
1519 /*
1520 * The quad port adapter is special; it has a PCIX-PCIX
1521 * bridge on the board, and can run the secondary bus at
1522 * a higher speed.
1523 */
1524 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1525 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1526 : 66;
1527 } else if (sc->sc_flags & WM_F_PCIX) {
1528 switch (reg & STATUS_PCIXSPD_MASK) {
1529 case STATUS_PCIXSPD_50_66:
1530 sc->sc_bus_speed = 66;
1531 break;
1532 case STATUS_PCIXSPD_66_100:
1533 sc->sc_bus_speed = 100;
1534 break;
1535 case STATUS_PCIXSPD_100_133:
1536 sc->sc_bus_speed = 133;
1537 break;
1538 default:
1539 aprint_error_dev(sc->sc_dev,
1540 "unknown PCIXSPD %d; assuming 66MHz\n",
1541 reg & STATUS_PCIXSPD_MASK);
1542 sc->sc_bus_speed = 66;
1543 break;
1544 }
1545 } else
1546 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1547 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1548 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1549 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1550 }
1551
1552 /*
1553 * Allocate the control data structures, and create and load the
1554 * DMA map for it.
1555 *
1556 * NOTE: All Tx descriptors must be in the same 4G segment of
1557 * memory. So must Rx descriptors. We simplify by allocating
1558 * both sets within the same 4G segment.
1559 */
1560 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1561 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1562 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1563 sizeof(struct wm_control_data_82542) :
1564 sizeof(struct wm_control_data_82544);
1565 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1566 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1567 &sc->sc_cd_rseg, 0)) != 0) {
1568 aprint_error_dev(sc->sc_dev,
1569 "unable to allocate control data, error = %d\n",
1570 error);
1571 goto fail_0;
1572 }
1573
1574 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1575 sc->sc_cd_rseg, sc->sc_cd_size,
1576 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1577 aprint_error_dev(sc->sc_dev,
1578 "unable to map control data, error = %d\n", error);
1579 goto fail_1;
1580 }
1581
1582 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1583 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1584 aprint_error_dev(sc->sc_dev,
1585 "unable to create control data DMA map, error = %d\n",
1586 error);
1587 goto fail_2;
1588 }
1589
1590 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1591 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1592 aprint_error_dev(sc->sc_dev,
1593 "unable to load control data DMA map, error = %d\n",
1594 error);
1595 goto fail_3;
1596 }
1597
1598 /* Create the transmit buffer DMA maps. */
1599 WM_TXQUEUELEN(sc) =
1600 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1601 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1602 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1603 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1604 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1605 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1606 aprint_error_dev(sc->sc_dev,
1607 "unable to create Tx DMA map %d, error = %d\n",
1608 i, error);
1609 goto fail_4;
1610 }
1611 }
1612
1613 /* Create the receive buffer DMA maps. */
1614 for (i = 0; i < WM_NRXDESC; i++) {
1615 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1616 MCLBYTES, 0, 0,
1617 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1618 aprint_error_dev(sc->sc_dev,
1619 "unable to create Rx DMA map %d error = %d\n",
1620 i, error);
1621 goto fail_5;
1622 }
1623 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1624 }
1625
1626 /* clear interesting stat counters */
1627 CSR_READ(sc, WMREG_COLC);
1628 CSR_READ(sc, WMREG_RXERRC);
1629
1630 /* get PHY control from SMBus to PCIe */
1631 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1632 || (sc->sc_type == WM_T_PCH_LPT))
1633 wm_smbustopci(sc);
1634
1635 /* Reset the chip to a known state. */
1636 wm_reset(sc);
1637
1638 /* Get some information about the EEPROM. */
1639 switch (sc->sc_type) {
1640 case WM_T_82542_2_0:
1641 case WM_T_82542_2_1:
1642 case WM_T_82543:
1643 case WM_T_82544:
1644 /* Microwire */
1645 sc->sc_ee_addrbits = 6;
1646 break;
1647 case WM_T_82540:
1648 case WM_T_82545:
1649 case WM_T_82545_3:
1650 case WM_T_82546:
1651 case WM_T_82546_3:
1652 /* Microwire */
1653 reg = CSR_READ(sc, WMREG_EECD);
1654 if (reg & EECD_EE_SIZE)
1655 sc->sc_ee_addrbits = 8;
1656 else
1657 sc->sc_ee_addrbits = 6;
1658 sc->sc_flags |= WM_F_LOCK_EECD;
1659 break;
1660 case WM_T_82541:
1661 case WM_T_82541_2:
1662 case WM_T_82547:
1663 case WM_T_82547_2:
1664 reg = CSR_READ(sc, WMREG_EECD);
1665 if (reg & EECD_EE_TYPE) {
1666 /* SPI */
1667 wm_set_spiaddrbits(sc);
1668 } else
1669 /* Microwire */
1670 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1671 sc->sc_flags |= WM_F_LOCK_EECD;
1672 break;
1673 case WM_T_82571:
1674 case WM_T_82572:
1675 /* SPI */
1676 wm_set_spiaddrbits(sc);
1677 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1678 break;
1679 case WM_T_82573:
1680 sc->sc_flags |= WM_F_LOCK_SWSM;
1681 /* FALLTHROUGH */
1682 case WM_T_82574:
1683 case WM_T_82583:
1684 if (wm_nvm_is_onboard_eeprom(sc) == 0)
1685 sc->sc_flags |= WM_F_EEPROM_FLASH;
1686 else {
1687 /* SPI */
1688 wm_set_spiaddrbits(sc);
1689 }
1690 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1691 break;
1692 case WM_T_82575:
1693 case WM_T_82576:
1694 case WM_T_82580:
1695 case WM_T_82580ER:
1696 case WM_T_I350:
1697 case WM_T_I354:
1698 case WM_T_80003:
1699 /* SPI */
1700 wm_set_spiaddrbits(sc);
1701 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1702 | WM_F_LOCK_SWSM;
1703 break;
1704 case WM_T_ICH8:
1705 case WM_T_ICH9:
1706 case WM_T_ICH10:
1707 case WM_T_PCH:
1708 case WM_T_PCH2:
1709 case WM_T_PCH_LPT:
1710 /* FLASH */
1711 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1712 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1713 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1714 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1715 aprint_error_dev(sc->sc_dev,
1716 "can't map FLASH registers\n");
1717 return;
1718 }
1719 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1720 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1721 ICH_FLASH_SECTOR_SIZE;
1722 sc->sc_ich8_flash_bank_size =
1723 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1724 sc->sc_ich8_flash_bank_size -=
1725 (reg & ICH_GFPREG_BASE_MASK);
1726 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1727 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1728 break;
1729 case WM_T_I210:
1730 case WM_T_I211:
1731 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1732 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1733 break;
1734 default:
1735 break;
1736 }
1737
1738 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1739 switch (sc->sc_type) {
1740 case WM_T_82571:
1741 case WM_T_82572:
1742 reg = CSR_READ(sc, WMREG_SWSM2);
1743 if ((reg & SWSM2_LOCK) != 0) {
1744 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1745 force_clear_smbi = true;
1746 } else
1747 force_clear_smbi = false;
1748 break;
1749 default:
1750 force_clear_smbi = true;
1751 break;
1752 }
1753 if (force_clear_smbi) {
1754 reg = CSR_READ(sc, WMREG_SWSM);
1755 if ((reg & ~SWSM_SMBI) != 0)
1756 aprint_error_dev(sc->sc_dev,
1757 "Please update the Bootagent\n");
1758 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1759 }
1760
1761 /*
1762 * Defer printing the EEPROM type until after verifying the checksum
1763 * This allows the EEPROM type to be printed correctly in the case
1764 * that no EEPROM is attached.
1765 */
1766 /*
1767 * Validate the EEPROM checksum. If the checksum fails, flag
1768 * this for later, so we can fail future reads from the EEPROM.
1769 */
1770 if (wm_nvm_validate_checksum(sc)) {
1771 /*
1772 * Read twice again because some PCI-e parts fail the
1773 * first check due to the link being in sleep state.
1774 */
1775 if (wm_nvm_validate_checksum(sc))
1776 sc->sc_flags |= WM_F_EEPROM_INVALID;
1777 }
1778
1779 /* Set device properties (macflags) */
1780 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1781
1782 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1783 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1784 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1785 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1786 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1787 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1788 } else {
1789 if (sc->sc_flags & WM_F_EEPROM_SPI)
1790 eetype = "SPI";
1791 else
1792 eetype = "MicroWire";
1793 aprint_verbose_dev(sc->sc_dev,
1794 "%u word (%d address bits) %s EEPROM\n",
1795 1U << sc->sc_ee_addrbits,
1796 sc->sc_ee_addrbits, eetype);
1797 }
1798
1799 switch (sc->sc_type) {
1800 case WM_T_82571:
1801 case WM_T_82572:
1802 case WM_T_82573:
1803 case WM_T_82574:
1804 case WM_T_82583:
1805 case WM_T_80003:
1806 case WM_T_ICH8:
1807 case WM_T_ICH9:
1808 case WM_T_ICH10:
1809 case WM_T_PCH:
1810 case WM_T_PCH2:
1811 case WM_T_PCH_LPT:
1812 if (wm_check_mng_mode(sc) != 0)
1813 wm_get_hw_control(sc);
1814 break;
1815 default:
1816 break;
1817 }
1818 wm_get_wakeup(sc);
1819 /*
1820 * Read the Ethernet address from the EEPROM, if not first found
1821 * in device properties.
1822 */
1823 ea = prop_dictionary_get(dict, "mac-address");
1824 if (ea != NULL) {
1825 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1826 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1827 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1828 } else {
1829 if (wm_read_mac_addr(sc, enaddr) != 0) {
1830 aprint_error_dev(sc->sc_dev,
1831 "unable to read Ethernet address\n");
1832 return;
1833 }
1834 }
1835
1836 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1837 ether_sprintf(enaddr));
1838
1839 /*
1840 * Read the config info from the EEPROM, and set up various
1841 * bits in the control registers based on their contents.
1842 */
1843 pn = prop_dictionary_get(dict, "i82543-cfg1");
1844 if (pn != NULL) {
1845 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1846 cfg1 = (uint16_t) prop_number_integer_value(pn);
1847 } else {
1848 if (wm_nvm_read(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1849 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1850 return;
1851 }
1852 }
1853
1854 pn = prop_dictionary_get(dict, "i82543-cfg2");
1855 if (pn != NULL) {
1856 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1857 cfg2 = (uint16_t) prop_number_integer_value(pn);
1858 } else {
1859 if (wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1860 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1861 return;
1862 }
1863 }
1864
1865 /* check for WM_F_WOL */
1866 switch (sc->sc_type) {
1867 case WM_T_82542_2_0:
1868 case WM_T_82542_2_1:
1869 case WM_T_82543:
1870 /* dummy? */
1871 eeprom_data = 0;
1872 apme_mask = EEPROM_CFG3_APME;
1873 break;
1874 case WM_T_82544:
1875 apme_mask = EEPROM_CFG2_82544_APM_EN;
1876 eeprom_data = cfg2;
1877 break;
1878 case WM_T_82546:
1879 case WM_T_82546_3:
1880 case WM_T_82571:
1881 case WM_T_82572:
1882 case WM_T_82573:
1883 case WM_T_82574:
1884 case WM_T_82583:
1885 case WM_T_80003:
1886 default:
1887 apme_mask = EEPROM_CFG3_APME;
1888 wm_nvm_read(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1889 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1890 break;
1891 case WM_T_82575:
1892 case WM_T_82576:
1893 case WM_T_82580:
1894 case WM_T_82580ER:
1895 case WM_T_I350:
1896 case WM_T_I354: /* XXX ok? */
1897 case WM_T_ICH8:
1898 case WM_T_ICH9:
1899 case WM_T_ICH10:
1900 case WM_T_PCH:
1901 case WM_T_PCH2:
1902 case WM_T_PCH_LPT:
1903 /* XXX The funcid should be checked on some devices */
1904 apme_mask = WUC_APME;
1905 eeprom_data = CSR_READ(sc, WMREG_WUC);
1906 break;
1907 }
1908
1909 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1910 if ((eeprom_data & apme_mask) != 0)
1911 sc->sc_flags |= WM_F_WOL;
1912 #ifdef WM_DEBUG
1913 if ((sc->sc_flags & WM_F_WOL) != 0)
1914 printf("WOL\n");
1915 #endif
1916
1917 /*
1918 * XXX need special handling for some multiple port cards
1919 * to disable a paticular port.
1920 */
1921
1922 if (sc->sc_type >= WM_T_82544) {
1923 pn = prop_dictionary_get(dict, "i82543-swdpin");
1924 if (pn != NULL) {
1925 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1926 swdpin = (uint16_t) prop_number_integer_value(pn);
1927 } else {
1928 if (wm_nvm_read(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1929 aprint_error_dev(sc->sc_dev,
1930 "unable to read SWDPIN\n");
1931 return;
1932 }
1933 }
1934 }
1935
1936 if (cfg1 & EEPROM_CFG1_ILOS)
1937 sc->sc_ctrl |= CTRL_ILOS;
1938 if (sc->sc_type >= WM_T_82544) {
1939 sc->sc_ctrl |=
1940 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1941 CTRL_SWDPIO_SHIFT;
1942 sc->sc_ctrl |=
1943 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1944 CTRL_SWDPINS_SHIFT;
1945 } else {
1946 sc->sc_ctrl |=
1947 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1948 CTRL_SWDPIO_SHIFT;
1949 }
1950
1951 #if 0
1952 if (sc->sc_type >= WM_T_82544) {
1953 if (cfg1 & EEPROM_CFG1_IPS0)
1954 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1955 if (cfg1 & EEPROM_CFG1_IPS1)
1956 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1957 sc->sc_ctrl_ext |=
1958 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1959 CTRL_EXT_SWDPIO_SHIFT;
1960 sc->sc_ctrl_ext |=
1961 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1962 CTRL_EXT_SWDPINS_SHIFT;
1963 } else {
1964 sc->sc_ctrl_ext |=
1965 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1966 CTRL_EXT_SWDPIO_SHIFT;
1967 }
1968 #endif
1969
1970 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1971 #if 0
1972 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1973 #endif
1974
1975 /*
1976 * Set up some register offsets that are different between
1977 * the i82542 and the i82543 and later chips.
1978 */
1979 if (sc->sc_type < WM_T_82543) {
1980 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1981 sc->sc_tdt_reg = WMREG_OLD_TDT;
1982 } else {
1983 sc->sc_rdt_reg = WMREG_RDT;
1984 sc->sc_tdt_reg = WMREG_TDT;
1985 }
1986
1987 if (sc->sc_type == WM_T_PCH) {
1988 uint16_t val;
1989
1990 /* Save the NVM K1 bit setting */
1991 wm_nvm_read(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1992
1993 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1994 sc->sc_nvm_k1_enabled = 1;
1995 else
1996 sc->sc_nvm_k1_enabled = 0;
1997 }
1998
1999 /*
2000 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2001 * media structures accordingly.
2002 */
2003 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2004 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2005 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2006 || sc->sc_type == WM_T_82573
2007 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2008 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2009 wm_gmii_mediainit(sc, wmp->wmp_product);
2010 } else if (sc->sc_type < WM_T_82543 ||
2011 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2012 if (wmp->wmp_flags & WMP_F_1000T)
2013 aprint_error_dev(sc->sc_dev,
2014 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2015 wm_tbi_mediainit(sc);
2016 } else {
2017 switch (sc->sc_type) {
2018 case WM_T_82575:
2019 case WM_T_82576:
2020 case WM_T_82580:
2021 case WM_T_82580ER:
2022 case WM_T_I350:
2023 case WM_T_I354:
2024 case WM_T_I210:
2025 case WM_T_I211:
2026 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2027 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
2028 case CTRL_EXT_LINK_MODE_1000KX:
2029 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2030 CSR_WRITE(sc, WMREG_CTRL_EXT,
2031 reg | CTRL_EXT_I2C_ENA);
2032 panic("not supported yet\n");
2033 break;
2034 case CTRL_EXT_LINK_MODE_SGMII:
2035 if (wm_sgmii_uses_mdio(sc)) {
2036 aprint_verbose_dev(sc->sc_dev,
2037 "SGMII(MDIO)\n");
2038 sc->sc_flags |= WM_F_SGMII;
2039 wm_gmii_mediainit(sc,
2040 wmp->wmp_product);
2041 break;
2042 }
2043 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2044 /*FALLTHROUGH*/
2045 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2046 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2047 CSR_WRITE(sc, WMREG_CTRL_EXT,
2048 reg | CTRL_EXT_I2C_ENA);
2049 panic("not supported yet\n");
2050 break;
2051 case CTRL_EXT_LINK_MODE_GMII:
2052 default:
2053 CSR_WRITE(sc, WMREG_CTRL_EXT,
2054 reg & ~CTRL_EXT_I2C_ENA);
2055 wm_gmii_mediainit(sc, wmp->wmp_product);
2056 break;
2057 }
2058 break;
2059 default:
2060 if (wmp->wmp_flags & WMP_F_1000X)
2061 aprint_error_dev(sc->sc_dev,
2062 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2063 wm_gmii_mediainit(sc, wmp->wmp_product);
2064 }
2065 }
2066
2067 ifp = &sc->sc_ethercom.ec_if;
2068 xname = device_xname(sc->sc_dev);
2069 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2070 ifp->if_softc = sc;
2071 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2072 ifp->if_ioctl = wm_ioctl;
2073 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2074 ifp->if_start = wm_nq_start;
2075 else
2076 ifp->if_start = wm_start;
2077 ifp->if_watchdog = wm_watchdog;
2078 ifp->if_init = wm_init;
2079 ifp->if_stop = wm_stop;
2080 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2081 IFQ_SET_READY(&ifp->if_snd);
2082
2083 /* Check for jumbo frame */
2084 switch (sc->sc_type) {
2085 case WM_T_82573:
2086 /* XXX limited to 9234 if ASPM is disabled */
2087 wm_nvm_read(sc, EEPROM_INIT_3GIO_3, 1, &io3);
2088 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
2089 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2090 break;
2091 case WM_T_82571:
2092 case WM_T_82572:
2093 case WM_T_82574:
2094 case WM_T_82575:
2095 case WM_T_82576:
2096 case WM_T_82580:
2097 case WM_T_82580ER:
2098 case WM_T_I350:
2099 case WM_T_I354: /* XXXX ok? */
2100 case WM_T_I210:
2101 case WM_T_I211:
2102 case WM_T_80003:
2103 case WM_T_ICH9:
2104 case WM_T_ICH10:
2105 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2106 case WM_T_PCH_LPT:
2107 /* XXX limited to 9234 */
2108 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2109 break;
2110 case WM_T_PCH:
2111 /* XXX limited to 4096 */
2112 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2113 break;
2114 case WM_T_82542_2_0:
2115 case WM_T_82542_2_1:
2116 case WM_T_82583:
2117 case WM_T_ICH8:
2118 /* No support for jumbo frame */
2119 break;
2120 default:
2121 /* ETHER_MAX_LEN_JUMBO */
2122 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2123 break;
2124 }
2125
2126 /* If we're a i82543 or greater, we can support VLANs. */
2127 if (sc->sc_type >= WM_T_82543)
2128 sc->sc_ethercom.ec_capabilities |=
2129 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2130
2131 /*
2132 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2133 * on i82543 and later.
2134 */
2135 if (sc->sc_type >= WM_T_82543) {
2136 ifp->if_capabilities |=
2137 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2138 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2139 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2140 IFCAP_CSUM_TCPv6_Tx |
2141 IFCAP_CSUM_UDPv6_Tx;
2142 }
2143
2144 /*
2145 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2146 *
2147 * 82541GI (8086:1076) ... no
2148 * 82572EI (8086:10b9) ... yes
2149 */
2150 if (sc->sc_type >= WM_T_82571) {
2151 ifp->if_capabilities |=
2152 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2153 }
2154
2155 /*
2156 * If we're a i82544 or greater (except i82547), we can do
2157 * TCP segmentation offload.
2158 */
2159 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2160 ifp->if_capabilities |= IFCAP_TSOv4;
2161 }
2162
2163 if (sc->sc_type >= WM_T_82571) {
2164 ifp->if_capabilities |= IFCAP_TSOv6;
2165 }
2166
2167 #ifdef WM_MPSAFE
2168 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2169 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2170 #else
2171 sc->sc_tx_lock = NULL;
2172 sc->sc_rx_lock = NULL;
2173 #endif
2174
2175 /* Attach the interface. */
2176 if_attach(ifp);
2177 ether_ifattach(ifp, enaddr);
2178 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2179 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2180
2181 #ifdef WM_EVENT_COUNTERS
2182 /* Attach event counters. */
2183 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2184 NULL, xname, "txsstall");
2185 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2186 NULL, xname, "txdstall");
2187 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2188 NULL, xname, "txfifo_stall");
2189 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2190 NULL, xname, "txdw");
2191 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2192 NULL, xname, "txqe");
2193 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2194 NULL, xname, "rxintr");
2195 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2196 NULL, xname, "linkintr");
2197
2198 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2199 NULL, xname, "rxipsum");
2200 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2201 NULL, xname, "rxtusum");
2202 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2203 NULL, xname, "txipsum");
2204 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2205 NULL, xname, "txtusum");
2206 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2207 NULL, xname, "txtusum6");
2208
2209 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2210 NULL, xname, "txtso");
2211 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2212 NULL, xname, "txtso6");
2213 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2214 NULL, xname, "txtsopain");
2215
2216 for (i = 0; i < WM_NTXSEGS; i++) {
2217 snprintf(wm_txseg_evcnt_names[i],
2218 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2219 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2220 NULL, xname, wm_txseg_evcnt_names[i]);
2221 }
2222
2223 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2224 NULL, xname, "txdrop");
2225
2226 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2227 NULL, xname, "tu");
2228
2229 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2230 NULL, xname, "tx_xoff");
2231 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2232 NULL, xname, "tx_xon");
2233 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2234 NULL, xname, "rx_xoff");
2235 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2236 NULL, xname, "rx_xon");
2237 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2238 NULL, xname, "rx_macctl");
2239 #endif /* WM_EVENT_COUNTERS */
2240
2241 if (pmf_device_register(self, wm_suspend, wm_resume))
2242 pmf_class_network_register(self, ifp);
2243 else
2244 aprint_error_dev(self, "couldn't establish power handler\n");
2245
2246 return;
2247
2248 /*
2249 * Free any resources we've allocated during the failed attach
2250 * attempt. Do this in reverse order and fall through.
2251 */
2252 fail_5:
2253 for (i = 0; i < WM_NRXDESC; i++) {
2254 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2255 bus_dmamap_destroy(sc->sc_dmat,
2256 sc->sc_rxsoft[i].rxs_dmamap);
2257 }
2258 fail_4:
2259 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2260 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2261 bus_dmamap_destroy(sc->sc_dmat,
2262 sc->sc_txsoft[i].txs_dmamap);
2263 }
2264 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2265 fail_3:
2266 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2267 fail_2:
2268 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2269 sc->sc_cd_size);
2270 fail_1:
2271 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2272 fail_0:
2273 return;
2274 }
2275
2276 /* The detach function (ca_detach) */
2277 static int
2278 wm_detach(device_t self, int flags __unused)
2279 {
2280 struct wm_softc *sc = device_private(self);
2281 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2282 int i;
2283 #ifndef WM_MPSAFE
2284 int s;
2285
2286 s = splnet();
2287 #endif
2288 /* Stop the interface. Callouts are stopped in it. */
2289 wm_stop(ifp, 1);
2290
2291 #ifndef WM_MPSAFE
2292 splx(s);
2293 #endif
2294
2295 pmf_device_deregister(self);
2296
2297 /* Tell the firmware about the release */
2298 WM_BOTH_LOCK(sc);
2299 wm_release_manageability(sc);
2300 wm_release_hw_control(sc);
2301 WM_BOTH_UNLOCK(sc);
2302
2303 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2304
2305 /* Delete all remaining media. */
2306 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2307
2308 ether_ifdetach(ifp);
2309 if_detach(ifp);
2310
2311
2312 /* Unload RX dmamaps and free mbufs */
2313 WM_RX_LOCK(sc);
2314 wm_rxdrain(sc);
2315 WM_RX_UNLOCK(sc);
2316 /* Must unlock here */
2317
2318 /* Free dmamap. It's the same as the end of the wm_attach() function */
2319 for (i = 0; i < WM_NRXDESC; i++) {
2320 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2321 bus_dmamap_destroy(sc->sc_dmat,
2322 sc->sc_rxsoft[i].rxs_dmamap);
2323 }
2324 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2325 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2326 bus_dmamap_destroy(sc->sc_dmat,
2327 sc->sc_txsoft[i].txs_dmamap);
2328 }
2329 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2330 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2331 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2332 sc->sc_cd_size);
2333 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2334
2335 /* Disestablish the interrupt handler */
2336 if (sc->sc_ih != NULL) {
2337 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2338 sc->sc_ih = NULL;
2339 }
2340
2341 /* Unmap the registers */
2342 if (sc->sc_ss) {
2343 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2344 sc->sc_ss = 0;
2345 }
2346
2347 if (sc->sc_ios) {
2348 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2349 sc->sc_ios = 0;
2350 }
2351
2352 if (sc->sc_tx_lock)
2353 mutex_obj_free(sc->sc_tx_lock);
2354 if (sc->sc_rx_lock)
2355 mutex_obj_free(sc->sc_rx_lock);
2356
2357 return 0;
2358 }
2359
2360 static bool
2361 wm_suspend(device_t self, const pmf_qual_t *qual)
2362 {
2363 struct wm_softc *sc = device_private(self);
2364
2365 wm_release_manageability(sc);
2366 wm_release_hw_control(sc);
2367 #ifdef WM_WOL
2368 wm_enable_wakeup(sc);
2369 #endif
2370
2371 return true;
2372 }
2373
2374 static bool
2375 wm_resume(device_t self, const pmf_qual_t *qual)
2376 {
2377 struct wm_softc *sc = device_private(self);
2378
2379 wm_init_manageability(sc);
2380
2381 return true;
2382 }
2383
2384 /*
2385 * wm_watchdog: [ifnet interface function]
2386 *
2387 * Watchdog timer handler.
2388 */
2389 static void
2390 wm_watchdog(struct ifnet *ifp)
2391 {
2392 struct wm_softc *sc = ifp->if_softc;
2393
2394 /*
2395 * Since we're using delayed interrupts, sweep up
2396 * before we report an error.
2397 */
2398 WM_TX_LOCK(sc);
2399 wm_txintr(sc);
2400 WM_TX_UNLOCK(sc);
2401
2402 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2403 #ifdef WM_DEBUG
2404 int i, j;
2405 struct wm_txsoft *txs;
2406 #endif
2407 log(LOG_ERR,
2408 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2409 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2410 sc->sc_txnext);
2411 ifp->if_oerrors++;
2412 #ifdef WM_DEBUG
2413 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2414 i = WM_NEXTTXS(sc, i)) {
2415 txs = &sc->sc_txsoft[i];
2416 printf("txs %d tx %d -> %d\n",
2417 i, txs->txs_firstdesc, txs->txs_lastdesc);
2418 for (j = txs->txs_firstdesc; ;
2419 j = WM_NEXTTX(sc, j)) {
2420 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2421 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2422 printf("\t %#08x%08x\n",
2423 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2424 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2425 if (j == txs->txs_lastdesc)
2426 break;
2427 }
2428 }
2429 #endif
2430 /* Reset the interface. */
2431 (void) wm_init(ifp);
2432 }
2433
2434 /* Try to get more packets going. */
2435 ifp->if_start(ifp);
2436 }
2437
2438 /*
2439 * wm_tick:
2440 *
2441 * One second timer, used to check link status, sweep up
2442 * completed transmit jobs, etc.
2443 */
2444 static void
2445 wm_tick(void *arg)
2446 {
2447 struct wm_softc *sc = arg;
2448 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2449 #ifndef WM_MPSAFE
2450 int s;
2451
2452 s = splnet();
2453 #endif
2454
2455 WM_TX_LOCK(sc);
2456
2457 if (sc->sc_stopping)
2458 goto out;
2459
2460 if (sc->sc_type >= WM_T_82542_2_1) {
2461 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2462 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2463 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2464 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2465 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2466 }
2467
2468 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2469 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2470 + CSR_READ(sc, WMREG_CRCERRS)
2471 + CSR_READ(sc, WMREG_ALGNERRC)
2472 + CSR_READ(sc, WMREG_SYMERRC)
2473 + CSR_READ(sc, WMREG_RXERRC)
2474 + CSR_READ(sc, WMREG_SEC)
2475 + CSR_READ(sc, WMREG_CEXTERR)
2476 + CSR_READ(sc, WMREG_RLEC);
2477 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2478
2479 if (sc->sc_flags & WM_F_HAS_MII)
2480 mii_tick(&sc->sc_mii);
2481 else
2482 wm_tbi_check_link(sc);
2483
2484 out:
2485 WM_TX_UNLOCK(sc);
2486 #ifndef WM_MPSAFE
2487 splx(s);
2488 #endif
2489
2490 if (!sc->sc_stopping)
2491 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2492 }
2493
2494 static int
2495 wm_ifflags_cb(struct ethercom *ec)
2496 {
2497 struct ifnet *ifp = &ec->ec_if;
2498 struct wm_softc *sc = ifp->if_softc;
2499 int change = ifp->if_flags ^ sc->sc_if_flags;
2500 int rc = 0;
2501
2502 WM_BOTH_LOCK(sc);
2503
2504 if (change != 0)
2505 sc->sc_if_flags = ifp->if_flags;
2506
2507 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2508 rc = ENETRESET;
2509 goto out;
2510 }
2511
2512 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2513 wm_set_filter(sc);
2514
2515 wm_set_vlan(sc);
2516
2517 out:
2518 WM_BOTH_UNLOCK(sc);
2519
2520 return rc;
2521 }
2522
2523 /*
2524 * wm_ioctl: [ifnet interface function]
2525 *
2526 * Handle control requests from the operator.
2527 */
2528 static int
2529 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2530 {
2531 struct wm_softc *sc = ifp->if_softc;
2532 struct ifreq *ifr = (struct ifreq *) data;
2533 struct ifaddr *ifa = (struct ifaddr *)data;
2534 struct sockaddr_dl *sdl;
2535 int s, error;
2536
2537 #ifndef WM_MPSAFE
2538 s = splnet();
2539 #endif
2540 WM_BOTH_LOCK(sc);
2541
2542 switch (cmd) {
2543 case SIOCSIFMEDIA:
2544 case SIOCGIFMEDIA:
2545 /* Flow control requires full-duplex mode. */
2546 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2547 (ifr->ifr_media & IFM_FDX) == 0)
2548 ifr->ifr_media &= ~IFM_ETH_FMASK;
2549 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2550 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2551 /* We can do both TXPAUSE and RXPAUSE. */
2552 ifr->ifr_media |=
2553 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2554 }
2555 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2556 }
2557 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2558 break;
2559 case SIOCINITIFADDR:
2560 if (ifa->ifa_addr->sa_family == AF_LINK) {
2561 sdl = satosdl(ifp->if_dl->ifa_addr);
2562 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2563 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2564 /* unicast address is first multicast entry */
2565 wm_set_filter(sc);
2566 error = 0;
2567 break;
2568 }
2569 /*FALLTHROUGH*/
2570 default:
2571 WM_BOTH_UNLOCK(sc);
2572 #ifdef WM_MPSAFE
2573 s = splnet();
2574 #endif
2575 /* It may call wm_start, so unlock here */
2576 error = ether_ioctl(ifp, cmd, data);
2577 #ifdef WM_MPSAFE
2578 splx(s);
2579 #endif
2580 WM_BOTH_LOCK(sc);
2581
2582 if (error != ENETRESET)
2583 break;
2584
2585 error = 0;
2586
2587 if (cmd == SIOCSIFCAP) {
2588 WM_BOTH_UNLOCK(sc);
2589 error = (*ifp->if_init)(ifp);
2590 WM_BOTH_LOCK(sc);
2591 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2592 ;
2593 else if (ifp->if_flags & IFF_RUNNING) {
2594 /*
2595 * Multicast list has changed; set the hardware filter
2596 * accordingly.
2597 */
2598 wm_set_filter(sc);
2599 }
2600 break;
2601 }
2602
2603 WM_BOTH_UNLOCK(sc);
2604
2605 /* Try to get more packets going. */
2606 ifp->if_start(ifp);
2607
2608 #ifndef WM_MPSAFE
2609 splx(s);
2610 #endif
2611 return error;
2612 }
2613
2614 /* MAC address related */
2615
2616 static int
2617 wm_check_alt_mac_addr(struct wm_softc *sc)
2618 {
2619 uint16_t myea[ETHER_ADDR_LEN / 2];
2620 uint16_t offset = EEPROM_OFF_MACADDR;
2621
2622 /* Try to read alternative MAC address pointer */
2623 if (wm_nvm_read(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2624 return -1;
2625
2626 /* Check pointer */
2627 if (offset == 0xffff)
2628 return -1;
2629
2630 /*
2631 * Check whether alternative MAC address is valid or not.
2632 * Some cards have non 0xffff pointer but those don't use
2633 * alternative MAC address in reality.
2634 *
2635 * Check whether the broadcast bit is set or not.
2636 */
2637 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2638 if (((myea[0] & 0xff) & 0x01) == 0)
2639 return 0; /* found! */
2640
2641 /* not found */
2642 return -1;
2643 }
2644
2645 static int
2646 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2647 {
2648 uint16_t myea[ETHER_ADDR_LEN / 2];
2649 uint16_t offset = EEPROM_OFF_MACADDR;
2650 int do_invert = 0;
2651
2652 switch (sc->sc_type) {
2653 case WM_T_82580:
2654 case WM_T_82580ER:
2655 case WM_T_I350:
2656 case WM_T_I354:
2657 switch (sc->sc_funcid) {
2658 case 0:
2659 /* default value (== EEPROM_OFF_MACADDR) */
2660 break;
2661 case 1:
2662 offset = EEPROM_OFF_LAN1;
2663 break;
2664 case 2:
2665 offset = EEPROM_OFF_LAN2;
2666 break;
2667 case 3:
2668 offset = EEPROM_OFF_LAN3;
2669 break;
2670 default:
2671 goto bad;
2672 /* NOTREACHED */
2673 break;
2674 }
2675 break;
2676 case WM_T_82571:
2677 case WM_T_82575:
2678 case WM_T_82576:
2679 case WM_T_80003:
2680 case WM_T_I210:
2681 case WM_T_I211:
2682 if (wm_check_alt_mac_addr(sc) != 0) {
2683 /* reset the offset to LAN0 */
2684 offset = EEPROM_OFF_MACADDR;
2685 if ((sc->sc_funcid & 0x01) == 1)
2686 do_invert = 1;
2687 goto do_read;
2688 }
2689 switch (sc->sc_funcid) {
2690 case 0:
2691 /*
2692 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
2693 * itself.
2694 */
2695 break;
2696 case 1:
2697 offset += EEPROM_OFF_MACADDR_LAN1;
2698 break;
2699 case 2:
2700 offset += EEPROM_OFF_MACADDR_LAN2;
2701 break;
2702 case 3:
2703 offset += EEPROM_OFF_MACADDR_LAN3;
2704 break;
2705 default:
2706 goto bad;
2707 /* NOTREACHED */
2708 break;
2709 }
2710 break;
2711 default:
2712 if ((sc->sc_funcid & 0x01) == 1)
2713 do_invert = 1;
2714 break;
2715 }
2716
2717 do_read:
2718 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2719 myea) != 0) {
2720 goto bad;
2721 }
2722
2723 enaddr[0] = myea[0] & 0xff;
2724 enaddr[1] = myea[0] >> 8;
2725 enaddr[2] = myea[1] & 0xff;
2726 enaddr[3] = myea[1] >> 8;
2727 enaddr[4] = myea[2] & 0xff;
2728 enaddr[5] = myea[2] >> 8;
2729
2730 /*
2731 * Toggle the LSB of the MAC address on the second port
2732 * of some dual port cards.
2733 */
2734 if (do_invert != 0)
2735 enaddr[5] ^= 1;
2736
2737 return 0;
2738
2739 bad:
2740 return -1;
2741 }
2742
2743 /*
2744 * wm_set_ral:
2745 *
2746 * Set an entery in the receive address list.
2747 */
2748 static void
2749 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2750 {
2751 uint32_t ral_lo, ral_hi;
2752
2753 if (enaddr != NULL) {
2754 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2755 (enaddr[3] << 24);
2756 ral_hi = enaddr[4] | (enaddr[5] << 8);
2757 ral_hi |= RAL_AV;
2758 } else {
2759 ral_lo = 0;
2760 ral_hi = 0;
2761 }
2762
2763 if (sc->sc_type >= WM_T_82544) {
2764 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2765 ral_lo);
2766 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2767 ral_hi);
2768 } else {
2769 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2770 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2771 }
2772 }
2773
2774 /*
2775 * wm_mchash:
2776 *
2777 * Compute the hash of the multicast address for the 4096-bit
2778 * multicast filter.
2779 */
2780 static uint32_t
2781 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2782 {
2783 static const int lo_shift[4] = { 4, 3, 2, 0 };
2784 static const int hi_shift[4] = { 4, 5, 6, 8 };
2785 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2786 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2787 uint32_t hash;
2788
2789 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2790 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2791 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2792 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2793 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2794 return (hash & 0x3ff);
2795 }
2796 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2797 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2798
2799 return (hash & 0xfff);
2800 }
2801
2802 /*
2803 * wm_set_filter:
2804 *
2805 * Set up the receive filter.
2806 */
2807 static void
2808 wm_set_filter(struct wm_softc *sc)
2809 {
2810 struct ethercom *ec = &sc->sc_ethercom;
2811 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2812 struct ether_multi *enm;
2813 struct ether_multistep step;
2814 bus_addr_t mta_reg;
2815 uint32_t hash, reg, bit;
2816 int i, size;
2817
2818 if (sc->sc_type >= WM_T_82544)
2819 mta_reg = WMREG_CORDOVA_MTA;
2820 else
2821 mta_reg = WMREG_MTA;
2822
2823 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2824
2825 if (ifp->if_flags & IFF_BROADCAST)
2826 sc->sc_rctl |= RCTL_BAM;
2827 if (ifp->if_flags & IFF_PROMISC) {
2828 sc->sc_rctl |= RCTL_UPE;
2829 goto allmulti;
2830 }
2831
2832 /*
2833 * Set the station address in the first RAL slot, and
2834 * clear the remaining slots.
2835 */
2836 if (sc->sc_type == WM_T_ICH8)
2837 size = WM_RAL_TABSIZE_ICH8 -1;
2838 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2839 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2840 || (sc->sc_type == WM_T_PCH_LPT))
2841 size = WM_RAL_TABSIZE_ICH8;
2842 else if (sc->sc_type == WM_T_82575)
2843 size = WM_RAL_TABSIZE_82575;
2844 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2845 size = WM_RAL_TABSIZE_82576;
2846 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2847 size = WM_RAL_TABSIZE_I350;
2848 else
2849 size = WM_RAL_TABSIZE;
2850 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2851 for (i = 1; i < size; i++)
2852 wm_set_ral(sc, NULL, i);
2853
2854 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2855 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2856 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2857 size = WM_ICH8_MC_TABSIZE;
2858 else
2859 size = WM_MC_TABSIZE;
2860 /* Clear out the multicast table. */
2861 for (i = 0; i < size; i++)
2862 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2863
2864 ETHER_FIRST_MULTI(step, ec, enm);
2865 while (enm != NULL) {
2866 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2867 /*
2868 * We must listen to a range of multicast addresses.
2869 * For now, just accept all multicasts, rather than
2870 * trying to set only those filter bits needed to match
2871 * the range. (At this time, the only use of address
2872 * ranges is for IP multicast routing, for which the
2873 * range is big enough to require all bits set.)
2874 */
2875 goto allmulti;
2876 }
2877
2878 hash = wm_mchash(sc, enm->enm_addrlo);
2879
2880 reg = (hash >> 5);
2881 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2882 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2883 || (sc->sc_type == WM_T_PCH2)
2884 || (sc->sc_type == WM_T_PCH_LPT))
2885 reg &= 0x1f;
2886 else
2887 reg &= 0x7f;
2888 bit = hash & 0x1f;
2889
2890 hash = CSR_READ(sc, mta_reg + (reg << 2));
2891 hash |= 1U << bit;
2892
2893 /* XXX Hardware bug?? */
2894 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2895 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2896 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2897 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2898 } else
2899 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2900
2901 ETHER_NEXT_MULTI(step, enm);
2902 }
2903
2904 ifp->if_flags &= ~IFF_ALLMULTI;
2905 goto setit;
2906
2907 allmulti:
2908 ifp->if_flags |= IFF_ALLMULTI;
2909 sc->sc_rctl |= RCTL_MPE;
2910
2911 setit:
2912 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2913 }
2914
2915 /* Reset and init related */
2916
2917 static void
2918 wm_set_vlan(struct wm_softc *sc)
2919 {
2920 /* Deal with VLAN enables. */
2921 if (VLAN_ATTACHED(&sc->sc_ethercom))
2922 sc->sc_ctrl |= CTRL_VME;
2923 else
2924 sc->sc_ctrl &= ~CTRL_VME;
2925
2926 /* Write the control registers. */
2927 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2928 }
2929
2930 static void
2931 wm_set_pcie_completion_timeout(struct wm_softc *sc)
2932 {
2933 uint32_t gcr;
2934 pcireg_t ctrl2;
2935
2936 gcr = CSR_READ(sc, WMREG_GCR);
2937
2938 /* Only take action if timeout value is defaulted to 0 */
2939 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
2940 goto out;
2941
2942 if ((gcr & GCR_CAP_VER2) == 0) {
2943 gcr |= GCR_CMPL_TMOUT_10MS;
2944 goto out;
2945 }
2946
2947 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
2948 sc->sc_pcixe_capoff + PCIE_DCSR2);
2949 ctrl2 |= WM_PCIE_DCSR2_16MS;
2950 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
2951 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
2952
2953 out:
2954 /* Disable completion timeout resend */
2955 gcr &= ~GCR_CMPL_TMOUT_RESEND;
2956
2957 CSR_WRITE(sc, WMREG_GCR, gcr);
2958 }
2959
2960 void
2961 wm_get_auto_rd_done(struct wm_softc *sc)
2962 {
2963 int i;
2964
2965 /* wait for eeprom to reload */
2966 switch (sc->sc_type) {
2967 case WM_T_82571:
2968 case WM_T_82572:
2969 case WM_T_82573:
2970 case WM_T_82574:
2971 case WM_T_82583:
2972 case WM_T_82575:
2973 case WM_T_82576:
2974 case WM_T_82580:
2975 case WM_T_82580ER:
2976 case WM_T_I350:
2977 case WM_T_I354:
2978 case WM_T_I210:
2979 case WM_T_I211:
2980 case WM_T_80003:
2981 case WM_T_ICH8:
2982 case WM_T_ICH9:
2983 for (i = 0; i < 10; i++) {
2984 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
2985 break;
2986 delay(1000);
2987 }
2988 if (i == 10) {
2989 log(LOG_ERR, "%s: auto read from eeprom failed to "
2990 "complete\n", device_xname(sc->sc_dev));
2991 }
2992 break;
2993 default:
2994 break;
2995 }
2996 }
2997
2998 void
2999 wm_lan_init_done(struct wm_softc *sc)
3000 {
3001 uint32_t reg = 0;
3002 int i;
3003
3004 /* wait for eeprom to reload */
3005 switch (sc->sc_type) {
3006 case WM_T_ICH10:
3007 case WM_T_PCH:
3008 case WM_T_PCH2:
3009 case WM_T_PCH_LPT:
3010 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3011 reg = CSR_READ(sc, WMREG_STATUS);
3012 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3013 break;
3014 delay(100);
3015 }
3016 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3017 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3018 "complete\n", device_xname(sc->sc_dev), __func__);
3019 }
3020 break;
3021 default:
3022 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3023 __func__);
3024 break;
3025 }
3026
3027 reg &= ~STATUS_LAN_INIT_DONE;
3028 CSR_WRITE(sc, WMREG_STATUS, reg);
3029 }
3030
3031 void
3032 wm_get_cfg_done(struct wm_softc *sc)
3033 {
3034 int mask;
3035 uint32_t reg;
3036 int i;
3037
3038 /* wait for eeprom to reload */
3039 switch (sc->sc_type) {
3040 case WM_T_82542_2_0:
3041 case WM_T_82542_2_1:
3042 /* null */
3043 break;
3044 case WM_T_82543:
3045 case WM_T_82544:
3046 case WM_T_82540:
3047 case WM_T_82545:
3048 case WM_T_82545_3:
3049 case WM_T_82546:
3050 case WM_T_82546_3:
3051 case WM_T_82541:
3052 case WM_T_82541_2:
3053 case WM_T_82547:
3054 case WM_T_82547_2:
3055 case WM_T_82573:
3056 case WM_T_82574:
3057 case WM_T_82583:
3058 /* generic */
3059 delay(10*1000);
3060 break;
3061 case WM_T_80003:
3062 case WM_T_82571:
3063 case WM_T_82572:
3064 case WM_T_82575:
3065 case WM_T_82576:
3066 case WM_T_82580:
3067 case WM_T_82580ER:
3068 case WM_T_I350:
3069 case WM_T_I354:
3070 case WM_T_I210:
3071 case WM_T_I211:
3072 if (sc->sc_type == WM_T_82571) {
3073 /* Only 82571 shares port 0 */
3074 mask = EEMNGCTL_CFGDONE_0;
3075 } else
3076 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3077 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3078 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3079 break;
3080 delay(1000);
3081 }
3082 if (i >= WM_PHY_CFG_TIMEOUT) {
3083 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3084 device_xname(sc->sc_dev), __func__));
3085 }
3086 break;
3087 case WM_T_ICH8:
3088 case WM_T_ICH9:
3089 case WM_T_ICH10:
3090 case WM_T_PCH:
3091 case WM_T_PCH2:
3092 case WM_T_PCH_LPT:
3093 delay(10*1000);
3094 if (sc->sc_type >= WM_T_ICH10)
3095 wm_lan_init_done(sc);
3096 else
3097 wm_get_auto_rd_done(sc);
3098
3099 reg = CSR_READ(sc, WMREG_STATUS);
3100 if ((reg & STATUS_PHYRA) != 0)
3101 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3102 break;
3103 default:
3104 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3105 __func__);
3106 break;
3107 }
3108 }
3109
3110 /*
3111 * wm_reset:
3112 *
3113 * Reset the i82542 chip.
3114 */
3115 static void
3116 wm_reset(struct wm_softc *sc)
3117 {
3118 int phy_reset = 0;
3119 int error = 0;
3120 uint32_t reg, mask;
3121
3122 /*
3123 * Allocate on-chip memory according to the MTU size.
3124 * The Packet Buffer Allocation register must be written
3125 * before the chip is reset.
3126 */
3127 switch (sc->sc_type) {
3128 case WM_T_82547:
3129 case WM_T_82547_2:
3130 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3131 PBA_22K : PBA_30K;
3132 sc->sc_txfifo_head = 0;
3133 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3134 sc->sc_txfifo_size =
3135 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3136 sc->sc_txfifo_stall = 0;
3137 break;
3138 case WM_T_82571:
3139 case WM_T_82572:
3140 case WM_T_82575: /* XXX need special handing for jumbo frames */
3141 case WM_T_I350:
3142 case WM_T_I354:
3143 case WM_T_80003:
3144 sc->sc_pba = PBA_32K;
3145 break;
3146 case WM_T_82580:
3147 case WM_T_82580ER:
3148 sc->sc_pba = PBA_35K;
3149 break;
3150 case WM_T_I210:
3151 case WM_T_I211:
3152 sc->sc_pba = PBA_34K;
3153 break;
3154 case WM_T_82576:
3155 sc->sc_pba = PBA_64K;
3156 break;
3157 case WM_T_82573:
3158 sc->sc_pba = PBA_12K;
3159 break;
3160 case WM_T_82574:
3161 case WM_T_82583:
3162 sc->sc_pba = PBA_20K;
3163 break;
3164 case WM_T_ICH8:
3165 sc->sc_pba = PBA_8K;
3166 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3167 break;
3168 case WM_T_ICH9:
3169 case WM_T_ICH10:
3170 sc->sc_pba = PBA_10K;
3171 break;
3172 case WM_T_PCH:
3173 case WM_T_PCH2:
3174 case WM_T_PCH_LPT:
3175 sc->sc_pba = PBA_26K;
3176 break;
3177 default:
3178 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3179 PBA_40K : PBA_48K;
3180 break;
3181 }
3182 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3183
3184 /* Prevent the PCI-E bus from sticking */
3185 if (sc->sc_flags & WM_F_PCIE) {
3186 int timeout = 800;
3187
3188 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3189 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3190
3191 while (timeout--) {
3192 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3193 == 0)
3194 break;
3195 delay(100);
3196 }
3197 }
3198
3199 /* Set the completion timeout for interface */
3200 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3201 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3202 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3203 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3204 wm_set_pcie_completion_timeout(sc);
3205
3206 /* Clear interrupt */
3207 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3208
3209 /* Stop the transmit and receive processes. */
3210 CSR_WRITE(sc, WMREG_RCTL, 0);
3211 sc->sc_rctl &= ~RCTL_EN;
3212 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3213 CSR_WRITE_FLUSH(sc);
3214
3215 /* XXX set_tbi_sbp_82543() */
3216
3217 delay(10*1000);
3218
3219 /* Must acquire the MDIO ownership before MAC reset */
3220 switch (sc->sc_type) {
3221 case WM_T_82573:
3222 case WM_T_82574:
3223 case WM_T_82583:
3224 error = wm_get_hw_semaphore_82573(sc);
3225 break;
3226 default:
3227 break;
3228 }
3229
3230 /*
3231 * 82541 Errata 29? & 82547 Errata 28?
3232 * See also the description about PHY_RST bit in CTRL register
3233 * in 8254x_GBe_SDM.pdf.
3234 */
3235 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3236 CSR_WRITE(sc, WMREG_CTRL,
3237 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3238 CSR_WRITE_FLUSH(sc);
3239 delay(5000);
3240 }
3241
3242 switch (sc->sc_type) {
3243 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3244 case WM_T_82541:
3245 case WM_T_82541_2:
3246 case WM_T_82547:
3247 case WM_T_82547_2:
3248 /*
3249 * On some chipsets, a reset through a memory-mapped write
3250 * cycle can cause the chip to reset before completing the
3251 * write cycle. This causes major headache that can be
3252 * avoided by issuing the reset via indirect register writes
3253 * through I/O space.
3254 *
3255 * So, if we successfully mapped the I/O BAR at attach time,
3256 * use that. Otherwise, try our luck with a memory-mapped
3257 * reset.
3258 */
3259 if (sc->sc_flags & WM_F_IOH_VALID)
3260 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3261 else
3262 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3263 break;
3264 case WM_T_82545_3:
3265 case WM_T_82546_3:
3266 /* Use the shadow control register on these chips. */
3267 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3268 break;
3269 case WM_T_80003:
3270 mask = swfwphysem[sc->sc_funcid];
3271 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3272 wm_get_swfw_semaphore(sc, mask);
3273 CSR_WRITE(sc, WMREG_CTRL, reg);
3274 wm_put_swfw_semaphore(sc, mask);
3275 break;
3276 case WM_T_ICH8:
3277 case WM_T_ICH9:
3278 case WM_T_ICH10:
3279 case WM_T_PCH:
3280 case WM_T_PCH2:
3281 case WM_T_PCH_LPT:
3282 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3283 if (wm_check_reset_block(sc) == 0) {
3284 /*
3285 * Gate automatic PHY configuration by hardware on
3286 * non-managed 82579
3287 */
3288 if ((sc->sc_type == WM_T_PCH2)
3289 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3290 != 0))
3291 wm_gate_hw_phy_config_ich8lan(sc, 1);
3292
3293
3294 reg |= CTRL_PHY_RESET;
3295 phy_reset = 1;
3296 }
3297 wm_get_swfwhw_semaphore(sc);
3298 CSR_WRITE(sc, WMREG_CTRL, reg);
3299 /* Don't insert a completion barrier when reset */
3300 delay(20*1000);
3301 wm_put_swfwhw_semaphore(sc);
3302 break;
3303 case WM_T_82542_2_0:
3304 case WM_T_82542_2_1:
3305 case WM_T_82543:
3306 case WM_T_82540:
3307 case WM_T_82545:
3308 case WM_T_82546:
3309 case WM_T_82571:
3310 case WM_T_82572:
3311 case WM_T_82573:
3312 case WM_T_82574:
3313 case WM_T_82575:
3314 case WM_T_82576:
3315 case WM_T_82580:
3316 case WM_T_82580ER:
3317 case WM_T_82583:
3318 case WM_T_I350:
3319 case WM_T_I354:
3320 case WM_T_I210:
3321 case WM_T_I211:
3322 default:
3323 /* Everything else can safely use the documented method. */
3324 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3325 break;
3326 }
3327
3328 /* Must release the MDIO ownership after MAC reset */
3329 switch (sc->sc_type) {
3330 case WM_T_82573:
3331 case WM_T_82574:
3332 case WM_T_82583:
3333 if (error == 0)
3334 wm_put_hw_semaphore_82573(sc);
3335 break;
3336 default:
3337 break;
3338 }
3339
3340 if (phy_reset != 0)
3341 wm_get_cfg_done(sc);
3342
3343 /* reload EEPROM */
3344 switch (sc->sc_type) {
3345 case WM_T_82542_2_0:
3346 case WM_T_82542_2_1:
3347 case WM_T_82543:
3348 case WM_T_82544:
3349 delay(10);
3350 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3351 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3352 CSR_WRITE_FLUSH(sc);
3353 delay(2000);
3354 break;
3355 case WM_T_82540:
3356 case WM_T_82545:
3357 case WM_T_82545_3:
3358 case WM_T_82546:
3359 case WM_T_82546_3:
3360 delay(5*1000);
3361 /* XXX Disable HW ARPs on ASF enabled adapters */
3362 break;
3363 case WM_T_82541:
3364 case WM_T_82541_2:
3365 case WM_T_82547:
3366 case WM_T_82547_2:
3367 delay(20000);
3368 /* XXX Disable HW ARPs on ASF enabled adapters */
3369 break;
3370 case WM_T_82571:
3371 case WM_T_82572:
3372 case WM_T_82573:
3373 case WM_T_82574:
3374 case WM_T_82583:
3375 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3376 delay(10);
3377 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3378 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3379 CSR_WRITE_FLUSH(sc);
3380 }
3381 /* check EECD_EE_AUTORD */
3382 wm_get_auto_rd_done(sc);
3383 /*
3384 * Phy configuration from NVM just starts after EECD_AUTO_RD
3385 * is set.
3386 */
3387 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3388 || (sc->sc_type == WM_T_82583))
3389 delay(25*1000);
3390 break;
3391 case WM_T_82575:
3392 case WM_T_82576:
3393 case WM_T_82580:
3394 case WM_T_82580ER:
3395 case WM_T_I350:
3396 case WM_T_I354:
3397 case WM_T_I210:
3398 case WM_T_I211:
3399 case WM_T_80003:
3400 /* check EECD_EE_AUTORD */
3401 wm_get_auto_rd_done(sc);
3402 break;
3403 case WM_T_ICH8:
3404 case WM_T_ICH9:
3405 case WM_T_ICH10:
3406 case WM_T_PCH:
3407 case WM_T_PCH2:
3408 case WM_T_PCH_LPT:
3409 break;
3410 default:
3411 panic("%s: unknown type\n", __func__);
3412 }
3413
3414 /* Check whether EEPROM is present or not */
3415 switch (sc->sc_type) {
3416 case WM_T_82575:
3417 case WM_T_82576:
3418 #if 0 /* XXX */
3419 case WM_T_82580:
3420 case WM_T_82580ER:
3421 #endif
3422 case WM_T_I350:
3423 case WM_T_I354:
3424 case WM_T_ICH8:
3425 case WM_T_ICH9:
3426 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3427 /* Not found */
3428 sc->sc_flags |= WM_F_EEPROM_INVALID;
3429 if ((sc->sc_type == WM_T_82575)
3430 || (sc->sc_type == WM_T_82576)
3431 || (sc->sc_type == WM_T_82580)
3432 || (sc->sc_type == WM_T_82580ER)
3433 || (sc->sc_type == WM_T_I350)
3434 || (sc->sc_type == WM_T_I354))
3435 wm_reset_init_script_82575(sc);
3436 }
3437 break;
3438 default:
3439 break;
3440 }
3441
3442 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3443 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3444 /* clear global device reset status bit */
3445 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3446 }
3447
3448 /* Clear any pending interrupt events. */
3449 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3450 reg = CSR_READ(sc, WMREG_ICR);
3451
3452 /* reload sc_ctrl */
3453 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3454
3455 if (sc->sc_type == WM_T_I350)
3456 wm_set_eee_i350(sc);
3457
3458 /* dummy read from WUC */
3459 if (sc->sc_type == WM_T_PCH)
3460 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3461 /*
3462 * For PCH, this write will make sure that any noise will be detected
3463 * as a CRC error and be dropped rather than show up as a bad packet
3464 * to the DMA engine
3465 */
3466 if (sc->sc_type == WM_T_PCH)
3467 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3468
3469 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3470 CSR_WRITE(sc, WMREG_WUC, 0);
3471
3472 /* XXX need special handling for 82580 */
3473 }
3474
3475 /*
3476 * wm_add_rxbuf:
3477 *
3478 * Add a receive buffer to the indiciated descriptor.
3479 */
3480 static int
3481 wm_add_rxbuf(struct wm_softc *sc, int idx)
3482 {
3483 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3484 struct mbuf *m;
3485 int error;
3486
3487 KASSERT(WM_RX_LOCKED(sc));
3488
3489 MGETHDR(m, M_DONTWAIT, MT_DATA);
3490 if (m == NULL)
3491 return ENOBUFS;
3492
3493 MCLGET(m, M_DONTWAIT);
3494 if ((m->m_flags & M_EXT) == 0) {
3495 m_freem(m);
3496 return ENOBUFS;
3497 }
3498
3499 if (rxs->rxs_mbuf != NULL)
3500 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3501
3502 rxs->rxs_mbuf = m;
3503
3504 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3505 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3506 BUS_DMA_READ|BUS_DMA_NOWAIT);
3507 if (error) {
3508 /* XXX XXX XXX */
3509 aprint_error_dev(sc->sc_dev,
3510 "unable to load rx DMA map %d, error = %d\n",
3511 idx, error);
3512 panic("wm_add_rxbuf");
3513 }
3514
3515 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3516 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3517
3518 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3519 if ((sc->sc_rctl & RCTL_EN) != 0)
3520 WM_INIT_RXDESC(sc, idx);
3521 } else
3522 WM_INIT_RXDESC(sc, idx);
3523
3524 return 0;
3525 }
3526
3527 /*
3528 * wm_rxdrain:
3529 *
3530 * Drain the receive queue.
3531 */
3532 static void
3533 wm_rxdrain(struct wm_softc *sc)
3534 {
3535 struct wm_rxsoft *rxs;
3536 int i;
3537
3538 KASSERT(WM_RX_LOCKED(sc));
3539
3540 for (i = 0; i < WM_NRXDESC; i++) {
3541 rxs = &sc->sc_rxsoft[i];
3542 if (rxs->rxs_mbuf != NULL) {
3543 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3544 m_freem(rxs->rxs_mbuf);
3545 rxs->rxs_mbuf = NULL;
3546 }
3547 }
3548 }
3549
3550 /*
3551 * wm_init: [ifnet interface function]
3552 *
3553 * Initialize the interface.
3554 */
3555 static int
3556 wm_init(struct ifnet *ifp)
3557 {
3558 struct wm_softc *sc = ifp->if_softc;
3559 int ret;
3560
3561 WM_BOTH_LOCK(sc);
3562 ret = wm_init_locked(ifp);
3563 WM_BOTH_UNLOCK(sc);
3564
3565 return ret;
3566 }
3567
3568 static int
3569 wm_init_locked(struct ifnet *ifp)
3570 {
3571 struct wm_softc *sc = ifp->if_softc;
3572 struct wm_rxsoft *rxs;
3573 int i, j, trynum, error = 0;
3574 uint32_t reg;
3575
3576 KASSERT(WM_BOTH_LOCKED(sc));
3577 /*
3578 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3579 * There is a small but measurable benefit to avoiding the adjusment
3580 * of the descriptor so that the headers are aligned, for normal mtu,
3581 * on such platforms. One possibility is that the DMA itself is
3582 * slightly more efficient if the front of the entire packet (instead
3583 * of the front of the headers) is aligned.
3584 *
3585 * Note we must always set align_tweak to 0 if we are using
3586 * jumbo frames.
3587 */
3588 #ifdef __NO_STRICT_ALIGNMENT
3589 sc->sc_align_tweak = 0;
3590 #else
3591 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3592 sc->sc_align_tweak = 0;
3593 else
3594 sc->sc_align_tweak = 2;
3595 #endif /* __NO_STRICT_ALIGNMENT */
3596
3597 /* Cancel any pending I/O. */
3598 wm_stop_locked(ifp, 0);
3599
3600 /* update statistics before reset */
3601 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3602 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3603
3604 /* Reset the chip to a known state. */
3605 wm_reset(sc);
3606
3607 switch (sc->sc_type) {
3608 case WM_T_82571:
3609 case WM_T_82572:
3610 case WM_T_82573:
3611 case WM_T_82574:
3612 case WM_T_82583:
3613 case WM_T_80003:
3614 case WM_T_ICH8:
3615 case WM_T_ICH9:
3616 case WM_T_ICH10:
3617 case WM_T_PCH:
3618 case WM_T_PCH2:
3619 case WM_T_PCH_LPT:
3620 if (wm_check_mng_mode(sc) != 0)
3621 wm_get_hw_control(sc);
3622 break;
3623 default:
3624 break;
3625 }
3626
3627 /* Reset the PHY. */
3628 if (sc->sc_flags & WM_F_HAS_MII)
3629 wm_gmii_reset(sc);
3630
3631 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3632 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3633 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3634 || (sc->sc_type == WM_T_PCH_LPT))
3635 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3636
3637 /* Initialize the transmit descriptor ring. */
3638 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3639 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3640 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3641 sc->sc_txfree = WM_NTXDESC(sc);
3642 sc->sc_txnext = 0;
3643
3644 if (sc->sc_type < WM_T_82543) {
3645 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3646 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3647 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3648 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3649 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3650 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3651 } else {
3652 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3653 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3654 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3655 CSR_WRITE(sc, WMREG_TDH, 0);
3656 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3657 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3658
3659 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3660 /*
3661 * Don't write TDT before TCTL.EN is set.
3662 * See the document.
3663 */
3664 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3665 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3666 | TXDCTL_WTHRESH(0));
3667 else {
3668 CSR_WRITE(sc, WMREG_TDT, 0);
3669 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3670 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3671 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3672 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3673 }
3674 }
3675 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3676 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3677
3678 /* Initialize the transmit job descriptors. */
3679 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3680 sc->sc_txsoft[i].txs_mbuf = NULL;
3681 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3682 sc->sc_txsnext = 0;
3683 sc->sc_txsdirty = 0;
3684
3685 /*
3686 * Initialize the receive descriptor and receive job
3687 * descriptor rings.
3688 */
3689 if (sc->sc_type < WM_T_82543) {
3690 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3691 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3692 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3693 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3694 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3695 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3696
3697 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3698 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3699 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3700 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3701 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3702 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3703 } else {
3704 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3705 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3706 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3707 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3708 CSR_WRITE(sc, WMREG_EITR(0), 450);
3709 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3710 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3711 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3712 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3713 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3714 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3715 | RXDCTL_WTHRESH(1));
3716 } else {
3717 CSR_WRITE(sc, WMREG_RDH, 0);
3718 CSR_WRITE(sc, WMREG_RDT, 0);
3719 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3720 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3721 }
3722 }
3723 for (i = 0; i < WM_NRXDESC; i++) {
3724 rxs = &sc->sc_rxsoft[i];
3725 if (rxs->rxs_mbuf == NULL) {
3726 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3727 log(LOG_ERR, "%s: unable to allocate or map "
3728 "rx buffer %d, error = %d\n",
3729 device_xname(sc->sc_dev), i, error);
3730 /*
3731 * XXX Should attempt to run with fewer receive
3732 * XXX buffers instead of just failing.
3733 */
3734 wm_rxdrain(sc);
3735 goto out;
3736 }
3737 } else {
3738 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3739 WM_INIT_RXDESC(sc, i);
3740 /*
3741 * For 82575 and newer device, the RX descriptors
3742 * must be initialized after the setting of RCTL.EN in
3743 * wm_set_filter()
3744 */
3745 }
3746 }
3747 sc->sc_rxptr = 0;
3748 sc->sc_rxdiscard = 0;
3749 WM_RXCHAIN_RESET(sc);
3750
3751 /*
3752 * Clear out the VLAN table -- we don't use it (yet).
3753 */
3754 CSR_WRITE(sc, WMREG_VET, 0);
3755 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3756 trynum = 10; /* Due to hw errata */
3757 else
3758 trynum = 1;
3759 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3760 for (j = 0; j < trynum; j++)
3761 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3762
3763 /*
3764 * Set up flow-control parameters.
3765 *
3766 * XXX Values could probably stand some tuning.
3767 */
3768 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3769 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3770 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3771 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3772 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3773 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3774 }
3775
3776 sc->sc_fcrtl = FCRTL_DFLT;
3777 if (sc->sc_type < WM_T_82543) {
3778 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3779 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3780 } else {
3781 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3782 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3783 }
3784
3785 if (sc->sc_type == WM_T_80003)
3786 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3787 else
3788 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3789
3790 /* Writes the control register. */
3791 wm_set_vlan(sc);
3792
3793 if (sc->sc_flags & WM_F_HAS_MII) {
3794 int val;
3795
3796 switch (sc->sc_type) {
3797 case WM_T_80003:
3798 case WM_T_ICH8:
3799 case WM_T_ICH9:
3800 case WM_T_ICH10:
3801 case WM_T_PCH:
3802 case WM_T_PCH2:
3803 case WM_T_PCH_LPT:
3804 /*
3805 * Set the mac to wait the maximum time between each
3806 * iteration and increase the max iterations when
3807 * polling the phy; this fixes erroneous timeouts at
3808 * 10Mbps.
3809 */
3810 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3811 0xFFFF);
3812 val = wm_kmrn_readreg(sc,
3813 KUMCTRLSTA_OFFSET_INB_PARAM);
3814 val |= 0x3F;
3815 wm_kmrn_writereg(sc,
3816 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3817 break;
3818 default:
3819 break;
3820 }
3821
3822 if (sc->sc_type == WM_T_80003) {
3823 val = CSR_READ(sc, WMREG_CTRL_EXT);
3824 val &= ~CTRL_EXT_LINK_MODE_MASK;
3825 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3826
3827 /* Bypass RX and TX FIFO's */
3828 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3829 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3830 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3831 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3832 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3833 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3834 }
3835 }
3836 #if 0
3837 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3838 #endif
3839
3840 /* Set up checksum offload parameters. */
3841 reg = CSR_READ(sc, WMREG_RXCSUM);
3842 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3843 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3844 reg |= RXCSUM_IPOFL;
3845 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3846 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3847 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3848 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3849 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3850
3851 /* Reset TBI's RXCFG count */
3852 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3853
3854 /* Set up the interrupt registers. */
3855 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3856 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3857 ICR_RXO | ICR_RXT0;
3858 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3859 sc->sc_icr |= ICR_RXCFG;
3860 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3861
3862 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3863 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3864 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3865 reg = CSR_READ(sc, WMREG_KABGTXD);
3866 reg |= KABGTXD_BGSQLBIAS;
3867 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3868 }
3869
3870 /* Set up the inter-packet gap. */
3871 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3872
3873 if (sc->sc_type >= WM_T_82543) {
3874 /*
3875 * Set up the interrupt throttling register (units of 256ns)
3876 * Note that a footnote in Intel's documentation says this
3877 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3878 * or 10Mbit mode. Empirically, it appears to be the case
3879 * that that is also true for the 1024ns units of the other
3880 * interrupt-related timer registers -- so, really, we ought
3881 * to divide this value by 4 when the link speed is low.
3882 *
3883 * XXX implement this division at link speed change!
3884 */
3885
3886 /*
3887 * For N interrupts/sec, set this value to:
3888 * 1000000000 / (N * 256). Note that we set the
3889 * absolute and packet timer values to this value
3890 * divided by 4 to get "simple timer" behavior.
3891 */
3892
3893 sc->sc_itr = 1500; /* 2604 ints/sec */
3894 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3895 }
3896
3897 /* Set the VLAN ethernetype. */
3898 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3899
3900 /*
3901 * Set up the transmit control register; we start out with
3902 * a collision distance suitable for FDX, but update it whe
3903 * we resolve the media type.
3904 */
3905 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3906 | TCTL_CT(TX_COLLISION_THRESHOLD)
3907 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3908 if (sc->sc_type >= WM_T_82571)
3909 sc->sc_tctl |= TCTL_MULR;
3910 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3911
3912 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3913 /* Write TDT after TCTL.EN is set. See the document. */
3914 CSR_WRITE(sc, WMREG_TDT, 0);
3915 }
3916
3917 if (sc->sc_type == WM_T_80003) {
3918 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3919 reg &= ~TCTL_EXT_GCEX_MASK;
3920 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3921 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3922 }
3923
3924 /* Set the media. */
3925 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3926 goto out;
3927
3928 /* Configure for OS presence */
3929 wm_init_manageability(sc);
3930
3931 /*
3932 * Set up the receive control register; we actually program
3933 * the register when we set the receive filter. Use multicast
3934 * address offset type 0.
3935 *
3936 * Only the i82544 has the ability to strip the incoming
3937 * CRC, so we don't enable that feature.
3938 */
3939 sc->sc_mchash_type = 0;
3940 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3941 | RCTL_MO(sc->sc_mchash_type);
3942
3943 /*
3944 * The I350 has a bug where it always strips the CRC whether
3945 * asked to or not. So ask for stripped CRC here and cope in rxeof
3946 */
3947 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3948 || (sc->sc_type == WM_T_I210))
3949 sc->sc_rctl |= RCTL_SECRC;
3950
3951 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3952 && (ifp->if_mtu > ETHERMTU)) {
3953 sc->sc_rctl |= RCTL_LPE;
3954 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3955 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
3956 }
3957
3958 if (MCLBYTES == 2048) {
3959 sc->sc_rctl |= RCTL_2k;
3960 } else {
3961 if (sc->sc_type >= WM_T_82543) {
3962 switch (MCLBYTES) {
3963 case 4096:
3964 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3965 break;
3966 case 8192:
3967 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3968 break;
3969 case 16384:
3970 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3971 break;
3972 default:
3973 panic("wm_init: MCLBYTES %d unsupported",
3974 MCLBYTES);
3975 break;
3976 }
3977 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3978 }
3979
3980 /* Set the receive filter. */
3981 wm_set_filter(sc);
3982
3983 /* Enable ECC */
3984 switch (sc->sc_type) {
3985 case WM_T_82571:
3986 reg = CSR_READ(sc, WMREG_PBA_ECC);
3987 reg |= PBA_ECC_CORR_EN;
3988 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
3989 break;
3990 case WM_T_PCH_LPT:
3991 reg = CSR_READ(sc, WMREG_PBECCSTS);
3992 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
3993 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
3994
3995 reg = CSR_READ(sc, WMREG_CTRL);
3996 reg |= CTRL_MEHE;
3997 CSR_WRITE(sc, WMREG_CTRL, reg);
3998 break;
3999 default:
4000 break;
4001 }
4002
4003 /* On 575 and later set RDT only if RX enabled */
4004 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4005 for (i = 0; i < WM_NRXDESC; i++)
4006 WM_INIT_RXDESC(sc, i);
4007
4008 sc->sc_stopping = false;
4009
4010 /* Start the one second link check clock. */
4011 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4012
4013 /* ...all done! */
4014 ifp->if_flags |= IFF_RUNNING;
4015 ifp->if_flags &= ~IFF_OACTIVE;
4016
4017 out:
4018 sc->sc_if_flags = ifp->if_flags;
4019 if (error)
4020 log(LOG_ERR, "%s: interface not running\n",
4021 device_xname(sc->sc_dev));
4022 return error;
4023 }
4024
4025 /*
4026 * wm_stop: [ifnet interface function]
4027 *
4028 * Stop transmission on the interface.
4029 */
4030 static void
4031 wm_stop(struct ifnet *ifp, int disable)
4032 {
4033 struct wm_softc *sc = ifp->if_softc;
4034
4035 WM_BOTH_LOCK(sc);
4036 wm_stop_locked(ifp, disable);
4037 WM_BOTH_UNLOCK(sc);
4038 }
4039
4040 static void
4041 wm_stop_locked(struct ifnet *ifp, int disable)
4042 {
4043 struct wm_softc *sc = ifp->if_softc;
4044 struct wm_txsoft *txs;
4045 int i;
4046
4047 KASSERT(WM_BOTH_LOCKED(sc));
4048
4049 sc->sc_stopping = true;
4050
4051 /* Stop the one second clock. */
4052 callout_stop(&sc->sc_tick_ch);
4053
4054 /* Stop the 82547 Tx FIFO stall check timer. */
4055 if (sc->sc_type == WM_T_82547)
4056 callout_stop(&sc->sc_txfifo_ch);
4057
4058 if (sc->sc_flags & WM_F_HAS_MII) {
4059 /* Down the MII. */
4060 mii_down(&sc->sc_mii);
4061 } else {
4062 #if 0
4063 /* Should we clear PHY's status properly? */
4064 wm_reset(sc);
4065 #endif
4066 }
4067
4068 /* Stop the transmit and receive processes. */
4069 CSR_WRITE(sc, WMREG_TCTL, 0);
4070 CSR_WRITE(sc, WMREG_RCTL, 0);
4071 sc->sc_rctl &= ~RCTL_EN;
4072
4073 /*
4074 * Clear the interrupt mask to ensure the device cannot assert its
4075 * interrupt line.
4076 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4077 * any currently pending or shared interrupt.
4078 */
4079 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4080 sc->sc_icr = 0;
4081
4082 /* Release any queued transmit buffers. */
4083 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4084 txs = &sc->sc_txsoft[i];
4085 if (txs->txs_mbuf != NULL) {
4086 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4087 m_freem(txs->txs_mbuf);
4088 txs->txs_mbuf = NULL;
4089 }
4090 }
4091
4092 /* Mark the interface as down and cancel the watchdog timer. */
4093 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4094 ifp->if_timer = 0;
4095
4096 if (disable)
4097 wm_rxdrain(sc);
4098
4099 #if 0 /* notyet */
4100 if (sc->sc_type >= WM_T_82544)
4101 CSR_WRITE(sc, WMREG_WUC, 0);
4102 #endif
4103 }
4104
4105 /*
4106 * wm_tx_offload:
4107 *
4108 * Set up TCP/IP checksumming parameters for the
4109 * specified packet.
4110 */
4111 static int
4112 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4113 uint8_t *fieldsp)
4114 {
4115 struct mbuf *m0 = txs->txs_mbuf;
4116 struct livengood_tcpip_ctxdesc *t;
4117 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4118 uint32_t ipcse;
4119 struct ether_header *eh;
4120 int offset, iphl;
4121 uint8_t fields;
4122
4123 /*
4124 * XXX It would be nice if the mbuf pkthdr had offset
4125 * fields for the protocol headers.
4126 */
4127
4128 eh = mtod(m0, struct ether_header *);
4129 switch (htons(eh->ether_type)) {
4130 case ETHERTYPE_IP:
4131 case ETHERTYPE_IPV6:
4132 offset = ETHER_HDR_LEN;
4133 break;
4134
4135 case ETHERTYPE_VLAN:
4136 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4137 break;
4138
4139 default:
4140 /*
4141 * Don't support this protocol or encapsulation.
4142 */
4143 *fieldsp = 0;
4144 *cmdp = 0;
4145 return 0;
4146 }
4147
4148 if ((m0->m_pkthdr.csum_flags &
4149 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4150 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4151 } else {
4152 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4153 }
4154 ipcse = offset + iphl - 1;
4155
4156 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4157 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4158 seg = 0;
4159 fields = 0;
4160
4161 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4162 int hlen = offset + iphl;
4163 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4164
4165 if (__predict_false(m0->m_len <
4166 (hlen + sizeof(struct tcphdr)))) {
4167 /*
4168 * TCP/IP headers are not in the first mbuf; we need
4169 * to do this the slow and painful way. Let's just
4170 * hope this doesn't happen very often.
4171 */
4172 struct tcphdr th;
4173
4174 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4175
4176 m_copydata(m0, hlen, sizeof(th), &th);
4177 if (v4) {
4178 struct ip ip;
4179
4180 m_copydata(m0, offset, sizeof(ip), &ip);
4181 ip.ip_len = 0;
4182 m_copyback(m0,
4183 offset + offsetof(struct ip, ip_len),
4184 sizeof(ip.ip_len), &ip.ip_len);
4185 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4186 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4187 } else {
4188 struct ip6_hdr ip6;
4189
4190 m_copydata(m0, offset, sizeof(ip6), &ip6);
4191 ip6.ip6_plen = 0;
4192 m_copyback(m0,
4193 offset + offsetof(struct ip6_hdr, ip6_plen),
4194 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4195 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4196 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4197 }
4198 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4199 sizeof(th.th_sum), &th.th_sum);
4200
4201 hlen += th.th_off << 2;
4202 } else {
4203 /*
4204 * TCP/IP headers are in the first mbuf; we can do
4205 * this the easy way.
4206 */
4207 struct tcphdr *th;
4208
4209 if (v4) {
4210 struct ip *ip =
4211 (void *)(mtod(m0, char *) + offset);
4212 th = (void *)(mtod(m0, char *) + hlen);
4213
4214 ip->ip_len = 0;
4215 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4216 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4217 } else {
4218 struct ip6_hdr *ip6 =
4219 (void *)(mtod(m0, char *) + offset);
4220 th = (void *)(mtod(m0, char *) + hlen);
4221
4222 ip6->ip6_plen = 0;
4223 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4224 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4225 }
4226 hlen += th->th_off << 2;
4227 }
4228
4229 if (v4) {
4230 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4231 cmdlen |= WTX_TCPIP_CMD_IP;
4232 } else {
4233 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4234 ipcse = 0;
4235 }
4236 cmd |= WTX_TCPIP_CMD_TSE;
4237 cmdlen |= WTX_TCPIP_CMD_TSE |
4238 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4239 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4240 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4241 }
4242
4243 /*
4244 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4245 * offload feature, if we load the context descriptor, we
4246 * MUST provide valid values for IPCSS and TUCSS fields.
4247 */
4248
4249 ipcs = WTX_TCPIP_IPCSS(offset) |
4250 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4251 WTX_TCPIP_IPCSE(ipcse);
4252 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4253 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4254 fields |= WTX_IXSM;
4255 }
4256
4257 offset += iphl;
4258
4259 if (m0->m_pkthdr.csum_flags &
4260 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4261 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4262 fields |= WTX_TXSM;
4263 tucs = WTX_TCPIP_TUCSS(offset) |
4264 WTX_TCPIP_TUCSO(offset +
4265 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4266 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4267 } else if ((m0->m_pkthdr.csum_flags &
4268 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4269 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4270 fields |= WTX_TXSM;
4271 tucs = WTX_TCPIP_TUCSS(offset) |
4272 WTX_TCPIP_TUCSO(offset +
4273 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4274 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4275 } else {
4276 /* Just initialize it to a valid TCP context. */
4277 tucs = WTX_TCPIP_TUCSS(offset) |
4278 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4279 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4280 }
4281
4282 /* Fill in the context descriptor. */
4283 t = (struct livengood_tcpip_ctxdesc *)
4284 &sc->sc_txdescs[sc->sc_txnext];
4285 t->tcpip_ipcs = htole32(ipcs);
4286 t->tcpip_tucs = htole32(tucs);
4287 t->tcpip_cmdlen = htole32(cmdlen);
4288 t->tcpip_seg = htole32(seg);
4289 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4290
4291 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4292 txs->txs_ndesc++;
4293
4294 *cmdp = cmd;
4295 *fieldsp = fields;
4296
4297 return 0;
4298 }
4299
4300 static void
4301 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4302 {
4303 struct mbuf *m;
4304 int i;
4305
4306 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4307 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4308 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4309 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4310 m->m_data, m->m_len, m->m_flags);
4311 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4312 i, i == 1 ? "" : "s");
4313 }
4314
4315 /*
4316 * wm_82547_txfifo_stall:
4317 *
4318 * Callout used to wait for the 82547 Tx FIFO to drain,
4319 * reset the FIFO pointers, and restart packet transmission.
4320 */
4321 static void
4322 wm_82547_txfifo_stall(void *arg)
4323 {
4324 struct wm_softc *sc = arg;
4325 #ifndef WM_MPSAFE
4326 int s;
4327
4328 s = splnet();
4329 #endif
4330 WM_TX_LOCK(sc);
4331
4332 if (sc->sc_stopping)
4333 goto out;
4334
4335 if (sc->sc_txfifo_stall) {
4336 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4337 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4338 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4339 /*
4340 * Packets have drained. Stop transmitter, reset
4341 * FIFO pointers, restart transmitter, and kick
4342 * the packet queue.
4343 */
4344 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4345 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4346 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4347 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4348 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4349 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4350 CSR_WRITE(sc, WMREG_TCTL, tctl);
4351 CSR_WRITE_FLUSH(sc);
4352
4353 sc->sc_txfifo_head = 0;
4354 sc->sc_txfifo_stall = 0;
4355 wm_start_locked(&sc->sc_ethercom.ec_if);
4356 } else {
4357 /*
4358 * Still waiting for packets to drain; try again in
4359 * another tick.
4360 */
4361 callout_schedule(&sc->sc_txfifo_ch, 1);
4362 }
4363 }
4364
4365 out:
4366 WM_TX_UNLOCK(sc);
4367 #ifndef WM_MPSAFE
4368 splx(s);
4369 #endif
4370 }
4371
4372 /*
4373 * wm_82547_txfifo_bugchk:
4374 *
4375 * Check for bug condition in the 82547 Tx FIFO. We need to
4376 * prevent enqueueing a packet that would wrap around the end
4377 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4378 *
4379 * We do this by checking the amount of space before the end
4380 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4381 * the Tx FIFO, wait for all remaining packets to drain, reset
4382 * the internal FIFO pointers to the beginning, and restart
4383 * transmission on the interface.
4384 */
4385 #define WM_FIFO_HDR 0x10
4386 #define WM_82547_PAD_LEN 0x3e0
4387 static int
4388 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4389 {
4390 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4391 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4392
4393 /* Just return if already stalled. */
4394 if (sc->sc_txfifo_stall)
4395 return 1;
4396
4397 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4398 /* Stall only occurs in half-duplex mode. */
4399 goto send_packet;
4400 }
4401
4402 if (len >= WM_82547_PAD_LEN + space) {
4403 sc->sc_txfifo_stall = 1;
4404 callout_schedule(&sc->sc_txfifo_ch, 1);
4405 return 1;
4406 }
4407
4408 send_packet:
4409 sc->sc_txfifo_head += len;
4410 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4411 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4412
4413 return 0;
4414 }
4415
4416 /*
4417 * wm_start: [ifnet interface function]
4418 *
4419 * Start packet transmission on the interface.
4420 */
4421 static void
4422 wm_start(struct ifnet *ifp)
4423 {
4424 struct wm_softc *sc = ifp->if_softc;
4425
4426 WM_TX_LOCK(sc);
4427 if (!sc->sc_stopping)
4428 wm_start_locked(ifp);
4429 WM_TX_UNLOCK(sc);
4430 }
4431
4432 static void
4433 wm_start_locked(struct ifnet *ifp)
4434 {
4435 struct wm_softc *sc = ifp->if_softc;
4436 struct mbuf *m0;
4437 struct m_tag *mtag;
4438 struct wm_txsoft *txs;
4439 bus_dmamap_t dmamap;
4440 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4441 bus_addr_t curaddr;
4442 bus_size_t seglen, curlen;
4443 uint32_t cksumcmd;
4444 uint8_t cksumfields;
4445
4446 KASSERT(WM_TX_LOCKED(sc));
4447
4448 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4449 return;
4450
4451 /* Remember the previous number of free descriptors. */
4452 ofree = sc->sc_txfree;
4453
4454 /*
4455 * Loop through the send queue, setting up transmit descriptors
4456 * until we drain the queue, or use up all available transmit
4457 * descriptors.
4458 */
4459 for (;;) {
4460 m0 = NULL;
4461
4462 /* Get a work queue entry. */
4463 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4464 wm_txintr(sc);
4465 if (sc->sc_txsfree == 0) {
4466 DPRINTF(WM_DEBUG_TX,
4467 ("%s: TX: no free job descriptors\n",
4468 device_xname(sc->sc_dev)));
4469 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4470 break;
4471 }
4472 }
4473
4474 /* Grab a packet off the queue. */
4475 IFQ_DEQUEUE(&ifp->if_snd, m0);
4476 if (m0 == NULL)
4477 break;
4478
4479 DPRINTF(WM_DEBUG_TX,
4480 ("%s: TX: have packet to transmit: %p\n",
4481 device_xname(sc->sc_dev), m0));
4482
4483 txs = &sc->sc_txsoft[sc->sc_txsnext];
4484 dmamap = txs->txs_dmamap;
4485
4486 use_tso = (m0->m_pkthdr.csum_flags &
4487 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4488
4489 /*
4490 * So says the Linux driver:
4491 * The controller does a simple calculation to make sure
4492 * there is enough room in the FIFO before initiating the
4493 * DMA for each buffer. The calc is:
4494 * 4 = ceil(buffer len / MSS)
4495 * To make sure we don't overrun the FIFO, adjust the max
4496 * buffer len if the MSS drops.
4497 */
4498 dmamap->dm_maxsegsz =
4499 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4500 ? m0->m_pkthdr.segsz << 2
4501 : WTX_MAX_LEN;
4502
4503 /*
4504 * Load the DMA map. If this fails, the packet either
4505 * didn't fit in the allotted number of segments, or we
4506 * were short on resources. For the too-many-segments
4507 * case, we simply report an error and drop the packet,
4508 * since we can't sanely copy a jumbo packet to a single
4509 * buffer.
4510 */
4511 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4512 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4513 if (error) {
4514 if (error == EFBIG) {
4515 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4516 log(LOG_ERR, "%s: Tx packet consumes too many "
4517 "DMA segments, dropping...\n",
4518 device_xname(sc->sc_dev));
4519 wm_dump_mbuf_chain(sc, m0);
4520 m_freem(m0);
4521 continue;
4522 }
4523 /* Short on resources, just stop for now. */
4524 DPRINTF(WM_DEBUG_TX,
4525 ("%s: TX: dmamap load failed: %d\n",
4526 device_xname(sc->sc_dev), error));
4527 break;
4528 }
4529
4530 segs_needed = dmamap->dm_nsegs;
4531 if (use_tso) {
4532 /* For sentinel descriptor; see below. */
4533 segs_needed++;
4534 }
4535
4536 /*
4537 * Ensure we have enough descriptors free to describe
4538 * the packet. Note, we always reserve one descriptor
4539 * at the end of the ring due to the semantics of the
4540 * TDT register, plus one more in the event we need
4541 * to load offload context.
4542 */
4543 if (segs_needed > sc->sc_txfree - 2) {
4544 /*
4545 * Not enough free descriptors to transmit this
4546 * packet. We haven't committed anything yet,
4547 * so just unload the DMA map, put the packet
4548 * pack on the queue, and punt. Notify the upper
4549 * layer that there are no more slots left.
4550 */
4551 DPRINTF(WM_DEBUG_TX,
4552 ("%s: TX: need %d (%d) descriptors, have %d\n",
4553 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4554 segs_needed, sc->sc_txfree - 1));
4555 ifp->if_flags |= IFF_OACTIVE;
4556 bus_dmamap_unload(sc->sc_dmat, dmamap);
4557 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4558 break;
4559 }
4560
4561 /*
4562 * Check for 82547 Tx FIFO bug. We need to do this
4563 * once we know we can transmit the packet, since we
4564 * do some internal FIFO space accounting here.
4565 */
4566 if (sc->sc_type == WM_T_82547 &&
4567 wm_82547_txfifo_bugchk(sc, m0)) {
4568 DPRINTF(WM_DEBUG_TX,
4569 ("%s: TX: 82547 Tx FIFO bug detected\n",
4570 device_xname(sc->sc_dev)));
4571 ifp->if_flags |= IFF_OACTIVE;
4572 bus_dmamap_unload(sc->sc_dmat, dmamap);
4573 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4574 break;
4575 }
4576
4577 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4578
4579 DPRINTF(WM_DEBUG_TX,
4580 ("%s: TX: packet has %d (%d) DMA segments\n",
4581 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4582
4583 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4584
4585 /*
4586 * Store a pointer to the packet so that we can free it
4587 * later.
4588 *
4589 * Initially, we consider the number of descriptors the
4590 * packet uses the number of DMA segments. This may be
4591 * incremented by 1 if we do checksum offload (a descriptor
4592 * is used to set the checksum context).
4593 */
4594 txs->txs_mbuf = m0;
4595 txs->txs_firstdesc = sc->sc_txnext;
4596 txs->txs_ndesc = segs_needed;
4597
4598 /* Set up offload parameters for this packet. */
4599 if (m0->m_pkthdr.csum_flags &
4600 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4601 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4602 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4603 if (wm_tx_offload(sc, txs, &cksumcmd,
4604 &cksumfields) != 0) {
4605 /* Error message already displayed. */
4606 bus_dmamap_unload(sc->sc_dmat, dmamap);
4607 continue;
4608 }
4609 } else {
4610 cksumcmd = 0;
4611 cksumfields = 0;
4612 }
4613
4614 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4615
4616 /* Sync the DMA map. */
4617 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4618 BUS_DMASYNC_PREWRITE);
4619
4620 /* Initialize the transmit descriptor. */
4621 for (nexttx = sc->sc_txnext, seg = 0;
4622 seg < dmamap->dm_nsegs; seg++) {
4623 for (seglen = dmamap->dm_segs[seg].ds_len,
4624 curaddr = dmamap->dm_segs[seg].ds_addr;
4625 seglen != 0;
4626 curaddr += curlen, seglen -= curlen,
4627 nexttx = WM_NEXTTX(sc, nexttx)) {
4628 curlen = seglen;
4629
4630 /*
4631 * So says the Linux driver:
4632 * Work around for premature descriptor
4633 * write-backs in TSO mode. Append a
4634 * 4-byte sentinel descriptor.
4635 */
4636 if (use_tso &&
4637 seg == dmamap->dm_nsegs - 1 &&
4638 curlen > 8)
4639 curlen -= 4;
4640
4641 wm_set_dma_addr(
4642 &sc->sc_txdescs[nexttx].wtx_addr,
4643 curaddr);
4644 sc->sc_txdescs[nexttx].wtx_cmdlen =
4645 htole32(cksumcmd | curlen);
4646 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4647 0;
4648 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4649 cksumfields;
4650 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4651 lasttx = nexttx;
4652
4653 DPRINTF(WM_DEBUG_TX,
4654 ("%s: TX: desc %d: low %#" PRIx64 ", "
4655 "len %#04zx\n",
4656 device_xname(sc->sc_dev), nexttx,
4657 (uint64_t)curaddr, curlen));
4658 }
4659 }
4660
4661 KASSERT(lasttx != -1);
4662
4663 /*
4664 * Set up the command byte on the last descriptor of
4665 * the packet. If we're in the interrupt delay window,
4666 * delay the interrupt.
4667 */
4668 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4669 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4670
4671 /*
4672 * If VLANs are enabled and the packet has a VLAN tag, set
4673 * up the descriptor to encapsulate the packet for us.
4674 *
4675 * This is only valid on the last descriptor of the packet.
4676 */
4677 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4678 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4679 htole32(WTX_CMD_VLE);
4680 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4681 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4682 }
4683
4684 txs->txs_lastdesc = lasttx;
4685
4686 DPRINTF(WM_DEBUG_TX,
4687 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4688 device_xname(sc->sc_dev),
4689 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4690
4691 /* Sync the descriptors we're using. */
4692 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4693 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4694
4695 /* Give the packet to the chip. */
4696 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4697
4698 DPRINTF(WM_DEBUG_TX,
4699 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4700
4701 DPRINTF(WM_DEBUG_TX,
4702 ("%s: TX: finished transmitting packet, job %d\n",
4703 device_xname(sc->sc_dev), sc->sc_txsnext));
4704
4705 /* Advance the tx pointer. */
4706 sc->sc_txfree -= txs->txs_ndesc;
4707 sc->sc_txnext = nexttx;
4708
4709 sc->sc_txsfree--;
4710 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4711
4712 /* Pass the packet to any BPF listeners. */
4713 bpf_mtap(ifp, m0);
4714 }
4715
4716 if (m0 != NULL) {
4717 ifp->if_flags |= IFF_OACTIVE;
4718 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4719 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4720 m_freem(m0);
4721 }
4722
4723 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4724 /* No more slots; notify upper layer. */
4725 ifp->if_flags |= IFF_OACTIVE;
4726 }
4727
4728 if (sc->sc_txfree != ofree) {
4729 /* Set a watchdog timer in case the chip flakes out. */
4730 ifp->if_timer = 5;
4731 }
4732 }
4733
4734 /*
4735 * wm_nq_tx_offload:
4736 *
4737 * Set up TCP/IP checksumming parameters for the
4738 * specified packet, for NEWQUEUE devices
4739 */
4740 static int
4741 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4742 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4743 {
4744 struct mbuf *m0 = txs->txs_mbuf;
4745 struct m_tag *mtag;
4746 uint32_t vl_len, mssidx, cmdc;
4747 struct ether_header *eh;
4748 int offset, iphl;
4749
4750 /*
4751 * XXX It would be nice if the mbuf pkthdr had offset
4752 * fields for the protocol headers.
4753 */
4754 *cmdlenp = 0;
4755 *fieldsp = 0;
4756
4757 eh = mtod(m0, struct ether_header *);
4758 switch (htons(eh->ether_type)) {
4759 case ETHERTYPE_IP:
4760 case ETHERTYPE_IPV6:
4761 offset = ETHER_HDR_LEN;
4762 break;
4763
4764 case ETHERTYPE_VLAN:
4765 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4766 break;
4767
4768 default:
4769 /* Don't support this protocol or encapsulation. */
4770 *do_csum = false;
4771 return 0;
4772 }
4773 *do_csum = true;
4774 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4775 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4776
4777 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4778 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4779
4780 if ((m0->m_pkthdr.csum_flags &
4781 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4782 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4783 } else {
4784 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4785 }
4786 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4787 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4788
4789 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4790 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4791 << NQTXC_VLLEN_VLAN_SHIFT);
4792 *cmdlenp |= NQTX_CMD_VLE;
4793 }
4794
4795 mssidx = 0;
4796
4797 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4798 int hlen = offset + iphl;
4799 int tcp_hlen;
4800 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4801
4802 if (__predict_false(m0->m_len <
4803 (hlen + sizeof(struct tcphdr)))) {
4804 /*
4805 * TCP/IP headers are not in the first mbuf; we need
4806 * to do this the slow and painful way. Let's just
4807 * hope this doesn't happen very often.
4808 */
4809 struct tcphdr th;
4810
4811 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4812
4813 m_copydata(m0, hlen, sizeof(th), &th);
4814 if (v4) {
4815 struct ip ip;
4816
4817 m_copydata(m0, offset, sizeof(ip), &ip);
4818 ip.ip_len = 0;
4819 m_copyback(m0,
4820 offset + offsetof(struct ip, ip_len),
4821 sizeof(ip.ip_len), &ip.ip_len);
4822 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4823 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4824 } else {
4825 struct ip6_hdr ip6;
4826
4827 m_copydata(m0, offset, sizeof(ip6), &ip6);
4828 ip6.ip6_plen = 0;
4829 m_copyback(m0,
4830 offset + offsetof(struct ip6_hdr, ip6_plen),
4831 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4832 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4833 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4834 }
4835 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4836 sizeof(th.th_sum), &th.th_sum);
4837
4838 tcp_hlen = th.th_off << 2;
4839 } else {
4840 /*
4841 * TCP/IP headers are in the first mbuf; we can do
4842 * this the easy way.
4843 */
4844 struct tcphdr *th;
4845
4846 if (v4) {
4847 struct ip *ip =
4848 (void *)(mtod(m0, char *) + offset);
4849 th = (void *)(mtod(m0, char *) + hlen);
4850
4851 ip->ip_len = 0;
4852 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4853 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4854 } else {
4855 struct ip6_hdr *ip6 =
4856 (void *)(mtod(m0, char *) + offset);
4857 th = (void *)(mtod(m0, char *) + hlen);
4858
4859 ip6->ip6_plen = 0;
4860 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4861 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4862 }
4863 tcp_hlen = th->th_off << 2;
4864 }
4865 hlen += tcp_hlen;
4866 *cmdlenp |= NQTX_CMD_TSE;
4867
4868 if (v4) {
4869 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4870 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4871 } else {
4872 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4873 *fieldsp |= NQTXD_FIELDS_TUXSM;
4874 }
4875 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4876 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4877 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4878 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4879 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4880 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4881 } else {
4882 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4883 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4884 }
4885
4886 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
4887 *fieldsp |= NQTXD_FIELDS_IXSM;
4888 cmdc |= NQTXC_CMD_IP4;
4889 }
4890
4891 if (m0->m_pkthdr.csum_flags &
4892 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4893 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4894 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4895 cmdc |= NQTXC_CMD_TCP;
4896 } else {
4897 cmdc |= NQTXC_CMD_UDP;
4898 }
4899 cmdc |= NQTXC_CMD_IP4;
4900 *fieldsp |= NQTXD_FIELDS_TUXSM;
4901 }
4902 if (m0->m_pkthdr.csum_flags &
4903 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4904 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4905 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4906 cmdc |= NQTXC_CMD_TCP;
4907 } else {
4908 cmdc |= NQTXC_CMD_UDP;
4909 }
4910 cmdc |= NQTXC_CMD_IP6;
4911 *fieldsp |= NQTXD_FIELDS_TUXSM;
4912 }
4913
4914 /* Fill in the context descriptor. */
4915 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
4916 htole32(vl_len);
4917 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
4918 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
4919 htole32(cmdc);
4920 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
4921 htole32(mssidx);
4922 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4923 DPRINTF(WM_DEBUG_TX,
4924 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
4925 sc->sc_txnext, 0, vl_len));
4926 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
4927 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4928 txs->txs_ndesc++;
4929 return 0;
4930 }
4931
4932 /*
4933 * wm_nq_start: [ifnet interface function]
4934 *
4935 * Start packet transmission on the interface for NEWQUEUE devices
4936 */
4937 static void
4938 wm_nq_start(struct ifnet *ifp)
4939 {
4940 struct wm_softc *sc = ifp->if_softc;
4941
4942 WM_TX_LOCK(sc);
4943 if (!sc->sc_stopping)
4944 wm_nq_start_locked(ifp);
4945 WM_TX_UNLOCK(sc);
4946 }
4947
4948 static void
4949 wm_nq_start_locked(struct ifnet *ifp)
4950 {
4951 struct wm_softc *sc = ifp->if_softc;
4952 struct mbuf *m0;
4953 struct m_tag *mtag;
4954 struct wm_txsoft *txs;
4955 bus_dmamap_t dmamap;
4956 int error, nexttx, lasttx = -1, seg, segs_needed;
4957 bool do_csum, sent;
4958
4959 KASSERT(WM_TX_LOCKED(sc));
4960
4961 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4962 return;
4963
4964 sent = false;
4965
4966 /*
4967 * Loop through the send queue, setting up transmit descriptors
4968 * until we drain the queue, or use up all available transmit
4969 * descriptors.
4970 */
4971 for (;;) {
4972 m0 = NULL;
4973
4974 /* Get a work queue entry. */
4975 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4976 wm_txintr(sc);
4977 if (sc->sc_txsfree == 0) {
4978 DPRINTF(WM_DEBUG_TX,
4979 ("%s: TX: no free job descriptors\n",
4980 device_xname(sc->sc_dev)));
4981 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4982 break;
4983 }
4984 }
4985
4986 /* Grab a packet off the queue. */
4987 IFQ_DEQUEUE(&ifp->if_snd, m0);
4988 if (m0 == NULL)
4989 break;
4990
4991 DPRINTF(WM_DEBUG_TX,
4992 ("%s: TX: have packet to transmit: %p\n",
4993 device_xname(sc->sc_dev), m0));
4994
4995 txs = &sc->sc_txsoft[sc->sc_txsnext];
4996 dmamap = txs->txs_dmamap;
4997
4998 /*
4999 * Load the DMA map. If this fails, the packet either
5000 * didn't fit in the allotted number of segments, or we
5001 * were short on resources. For the too-many-segments
5002 * case, we simply report an error and drop the packet,
5003 * since we can't sanely copy a jumbo packet to a single
5004 * buffer.
5005 */
5006 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5007 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5008 if (error) {
5009 if (error == EFBIG) {
5010 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5011 log(LOG_ERR, "%s: Tx packet consumes too many "
5012 "DMA segments, dropping...\n",
5013 device_xname(sc->sc_dev));
5014 wm_dump_mbuf_chain(sc, m0);
5015 m_freem(m0);
5016 continue;
5017 }
5018 /* Short on resources, just stop for now. */
5019 DPRINTF(WM_DEBUG_TX,
5020 ("%s: TX: dmamap load failed: %d\n",
5021 device_xname(sc->sc_dev), error));
5022 break;
5023 }
5024
5025 segs_needed = dmamap->dm_nsegs;
5026
5027 /*
5028 * Ensure we have enough descriptors free to describe
5029 * the packet. Note, we always reserve one descriptor
5030 * at the end of the ring due to the semantics of the
5031 * TDT register, plus one more in the event we need
5032 * to load offload context.
5033 */
5034 if (segs_needed > sc->sc_txfree - 2) {
5035 /*
5036 * Not enough free descriptors to transmit this
5037 * packet. We haven't committed anything yet,
5038 * so just unload the DMA map, put the packet
5039 * pack on the queue, and punt. Notify the upper
5040 * layer that there are no more slots left.
5041 */
5042 DPRINTF(WM_DEBUG_TX,
5043 ("%s: TX: need %d (%d) descriptors, have %d\n",
5044 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5045 segs_needed, sc->sc_txfree - 1));
5046 ifp->if_flags |= IFF_OACTIVE;
5047 bus_dmamap_unload(sc->sc_dmat, dmamap);
5048 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5049 break;
5050 }
5051
5052 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5053
5054 DPRINTF(WM_DEBUG_TX,
5055 ("%s: TX: packet has %d (%d) DMA segments\n",
5056 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5057
5058 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5059
5060 /*
5061 * Store a pointer to the packet so that we can free it
5062 * later.
5063 *
5064 * Initially, we consider the number of descriptors the
5065 * packet uses the number of DMA segments. This may be
5066 * incremented by 1 if we do checksum offload (a descriptor
5067 * is used to set the checksum context).
5068 */
5069 txs->txs_mbuf = m0;
5070 txs->txs_firstdesc = sc->sc_txnext;
5071 txs->txs_ndesc = segs_needed;
5072
5073 /* Set up offload parameters for this packet. */
5074 uint32_t cmdlen, fields, dcmdlen;
5075 if (m0->m_pkthdr.csum_flags &
5076 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5077 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5078 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5079 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5080 &do_csum) != 0) {
5081 /* Error message already displayed. */
5082 bus_dmamap_unload(sc->sc_dmat, dmamap);
5083 continue;
5084 }
5085 } else {
5086 do_csum = false;
5087 cmdlen = 0;
5088 fields = 0;
5089 }
5090
5091 /* Sync the DMA map. */
5092 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5093 BUS_DMASYNC_PREWRITE);
5094
5095 /* Initialize the first transmit descriptor. */
5096 nexttx = sc->sc_txnext;
5097 if (!do_csum) {
5098 /* setup a legacy descriptor */
5099 wm_set_dma_addr(
5100 &sc->sc_txdescs[nexttx].wtx_addr,
5101 dmamap->dm_segs[0].ds_addr);
5102 sc->sc_txdescs[nexttx].wtx_cmdlen =
5103 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5104 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5105 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5106 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5107 NULL) {
5108 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5109 htole32(WTX_CMD_VLE);
5110 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5111 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5112 } else {
5113 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5114 }
5115 dcmdlen = 0;
5116 } else {
5117 /* setup an advanced data descriptor */
5118 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5119 htole64(dmamap->dm_segs[0].ds_addr);
5120 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5121 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5122 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5123 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5124 htole32(fields);
5125 DPRINTF(WM_DEBUG_TX,
5126 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5127 device_xname(sc->sc_dev), nexttx,
5128 (uint64_t)dmamap->dm_segs[0].ds_addr));
5129 DPRINTF(WM_DEBUG_TX,
5130 ("\t 0x%08x%08x\n", fields,
5131 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5132 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5133 }
5134
5135 lasttx = nexttx;
5136 nexttx = WM_NEXTTX(sc, nexttx);
5137 /*
5138 * fill in the next descriptors. legacy or adcanced format
5139 * is the same here
5140 */
5141 for (seg = 1; seg < dmamap->dm_nsegs;
5142 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5143 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5144 htole64(dmamap->dm_segs[seg].ds_addr);
5145 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5146 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5147 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5148 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5149 lasttx = nexttx;
5150
5151 DPRINTF(WM_DEBUG_TX,
5152 ("%s: TX: desc %d: %#" PRIx64 ", "
5153 "len %#04zx\n",
5154 device_xname(sc->sc_dev), nexttx,
5155 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5156 dmamap->dm_segs[seg].ds_len));
5157 }
5158
5159 KASSERT(lasttx != -1);
5160
5161 /*
5162 * Set up the command byte on the last descriptor of
5163 * the packet. If we're in the interrupt delay window,
5164 * delay the interrupt.
5165 */
5166 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5167 (NQTX_CMD_EOP | NQTX_CMD_RS));
5168 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5169 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5170
5171 txs->txs_lastdesc = lasttx;
5172
5173 DPRINTF(WM_DEBUG_TX,
5174 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5175 device_xname(sc->sc_dev),
5176 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5177
5178 /* Sync the descriptors we're using. */
5179 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5180 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5181
5182 /* Give the packet to the chip. */
5183 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5184 sent = true;
5185
5186 DPRINTF(WM_DEBUG_TX,
5187 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5188
5189 DPRINTF(WM_DEBUG_TX,
5190 ("%s: TX: finished transmitting packet, job %d\n",
5191 device_xname(sc->sc_dev), sc->sc_txsnext));
5192
5193 /* Advance the tx pointer. */
5194 sc->sc_txfree -= txs->txs_ndesc;
5195 sc->sc_txnext = nexttx;
5196
5197 sc->sc_txsfree--;
5198 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5199
5200 /* Pass the packet to any BPF listeners. */
5201 bpf_mtap(ifp, m0);
5202 }
5203
5204 if (m0 != NULL) {
5205 ifp->if_flags |= IFF_OACTIVE;
5206 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5207 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5208 m_freem(m0);
5209 }
5210
5211 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5212 /* No more slots; notify upper layer. */
5213 ifp->if_flags |= IFF_OACTIVE;
5214 }
5215
5216 if (sent) {
5217 /* Set a watchdog timer in case the chip flakes out. */
5218 ifp->if_timer = 5;
5219 }
5220 }
5221
5222 /* Interrupt */
5223
5224 /*
5225 * wm_txintr:
5226 *
5227 * Helper; handle transmit interrupts.
5228 */
5229 static void
5230 wm_txintr(struct wm_softc *sc)
5231 {
5232 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5233 struct wm_txsoft *txs;
5234 uint8_t status;
5235 int i;
5236
5237 if (sc->sc_stopping)
5238 return;
5239
5240 ifp->if_flags &= ~IFF_OACTIVE;
5241
5242 /*
5243 * Go through the Tx list and free mbufs for those
5244 * frames which have been transmitted.
5245 */
5246 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5247 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5248 txs = &sc->sc_txsoft[i];
5249
5250 DPRINTF(WM_DEBUG_TX,
5251 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5252
5253 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5254 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5255
5256 status =
5257 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5258 if ((status & WTX_ST_DD) == 0) {
5259 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5260 BUS_DMASYNC_PREREAD);
5261 break;
5262 }
5263
5264 DPRINTF(WM_DEBUG_TX,
5265 ("%s: TX: job %d done: descs %d..%d\n",
5266 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5267 txs->txs_lastdesc));
5268
5269 /*
5270 * XXX We should probably be using the statistics
5271 * XXX registers, but I don't know if they exist
5272 * XXX on chips before the i82544.
5273 */
5274
5275 #ifdef WM_EVENT_COUNTERS
5276 if (status & WTX_ST_TU)
5277 WM_EVCNT_INCR(&sc->sc_ev_tu);
5278 #endif /* WM_EVENT_COUNTERS */
5279
5280 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5281 ifp->if_oerrors++;
5282 if (status & WTX_ST_LC)
5283 log(LOG_WARNING, "%s: late collision\n",
5284 device_xname(sc->sc_dev));
5285 else if (status & WTX_ST_EC) {
5286 ifp->if_collisions += 16;
5287 log(LOG_WARNING, "%s: excessive collisions\n",
5288 device_xname(sc->sc_dev));
5289 }
5290 } else
5291 ifp->if_opackets++;
5292
5293 sc->sc_txfree += txs->txs_ndesc;
5294 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5295 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5296 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5297 m_freem(txs->txs_mbuf);
5298 txs->txs_mbuf = NULL;
5299 }
5300
5301 /* Update the dirty transmit buffer pointer. */
5302 sc->sc_txsdirty = i;
5303 DPRINTF(WM_DEBUG_TX,
5304 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5305
5306 /*
5307 * If there are no more pending transmissions, cancel the watchdog
5308 * timer.
5309 */
5310 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5311 ifp->if_timer = 0;
5312 }
5313
5314 /*
5315 * wm_rxintr:
5316 *
5317 * Helper; handle receive interrupts.
5318 */
5319 static void
5320 wm_rxintr(struct wm_softc *sc)
5321 {
5322 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5323 struct wm_rxsoft *rxs;
5324 struct mbuf *m;
5325 int i, len;
5326 uint8_t status, errors;
5327 uint16_t vlantag;
5328
5329 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5330 rxs = &sc->sc_rxsoft[i];
5331
5332 DPRINTF(WM_DEBUG_RX,
5333 ("%s: RX: checking descriptor %d\n",
5334 device_xname(sc->sc_dev), i));
5335
5336 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5337
5338 status = sc->sc_rxdescs[i].wrx_status;
5339 errors = sc->sc_rxdescs[i].wrx_errors;
5340 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5341 vlantag = sc->sc_rxdescs[i].wrx_special;
5342
5343 if ((status & WRX_ST_DD) == 0) {
5344 /* We have processed all of the receive descriptors. */
5345 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5346 break;
5347 }
5348
5349 if (__predict_false(sc->sc_rxdiscard)) {
5350 DPRINTF(WM_DEBUG_RX,
5351 ("%s: RX: discarding contents of descriptor %d\n",
5352 device_xname(sc->sc_dev), i));
5353 WM_INIT_RXDESC(sc, i);
5354 if (status & WRX_ST_EOP) {
5355 /* Reset our state. */
5356 DPRINTF(WM_DEBUG_RX,
5357 ("%s: RX: resetting rxdiscard -> 0\n",
5358 device_xname(sc->sc_dev)));
5359 sc->sc_rxdiscard = 0;
5360 }
5361 continue;
5362 }
5363
5364 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5365 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5366
5367 m = rxs->rxs_mbuf;
5368
5369 /*
5370 * Add a new receive buffer to the ring, unless of
5371 * course the length is zero. Treat the latter as a
5372 * failed mapping.
5373 */
5374 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5375 /*
5376 * Failed, throw away what we've done so
5377 * far, and discard the rest of the packet.
5378 */
5379 ifp->if_ierrors++;
5380 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5381 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5382 WM_INIT_RXDESC(sc, i);
5383 if ((status & WRX_ST_EOP) == 0)
5384 sc->sc_rxdiscard = 1;
5385 if (sc->sc_rxhead != NULL)
5386 m_freem(sc->sc_rxhead);
5387 WM_RXCHAIN_RESET(sc);
5388 DPRINTF(WM_DEBUG_RX,
5389 ("%s: RX: Rx buffer allocation failed, "
5390 "dropping packet%s\n", device_xname(sc->sc_dev),
5391 sc->sc_rxdiscard ? " (discard)" : ""));
5392 continue;
5393 }
5394
5395 m->m_len = len;
5396 sc->sc_rxlen += len;
5397 DPRINTF(WM_DEBUG_RX,
5398 ("%s: RX: buffer at %p len %d\n",
5399 device_xname(sc->sc_dev), m->m_data, len));
5400
5401 /* If this is not the end of the packet, keep looking. */
5402 if ((status & WRX_ST_EOP) == 0) {
5403 WM_RXCHAIN_LINK(sc, m);
5404 DPRINTF(WM_DEBUG_RX,
5405 ("%s: RX: not yet EOP, rxlen -> %d\n",
5406 device_xname(sc->sc_dev), sc->sc_rxlen));
5407 continue;
5408 }
5409
5410 /*
5411 * Okay, we have the entire packet now. The chip is
5412 * configured to include the FCS except I350 and I21[01]
5413 * (not all chips can be configured to strip it),
5414 * so we need to trim it.
5415 * May need to adjust length of previous mbuf in the
5416 * chain if the current mbuf is too short.
5417 * For an eratta, the RCTL_SECRC bit in RCTL register
5418 * is always set in I350, so we don't trim it.
5419 */
5420 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5421 && (sc->sc_type != WM_T_I210)
5422 && (sc->sc_type != WM_T_I211)) {
5423 if (m->m_len < ETHER_CRC_LEN) {
5424 sc->sc_rxtail->m_len
5425 -= (ETHER_CRC_LEN - m->m_len);
5426 m->m_len = 0;
5427 } else
5428 m->m_len -= ETHER_CRC_LEN;
5429 len = sc->sc_rxlen - ETHER_CRC_LEN;
5430 } else
5431 len = sc->sc_rxlen;
5432
5433 WM_RXCHAIN_LINK(sc, m);
5434
5435 *sc->sc_rxtailp = NULL;
5436 m = sc->sc_rxhead;
5437
5438 WM_RXCHAIN_RESET(sc);
5439
5440 DPRINTF(WM_DEBUG_RX,
5441 ("%s: RX: have entire packet, len -> %d\n",
5442 device_xname(sc->sc_dev), len));
5443
5444 /* If an error occurred, update stats and drop the packet. */
5445 if (errors &
5446 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5447 if (errors & WRX_ER_SE)
5448 log(LOG_WARNING, "%s: symbol error\n",
5449 device_xname(sc->sc_dev));
5450 else if (errors & WRX_ER_SEQ)
5451 log(LOG_WARNING, "%s: receive sequence error\n",
5452 device_xname(sc->sc_dev));
5453 else if (errors & WRX_ER_CE)
5454 log(LOG_WARNING, "%s: CRC error\n",
5455 device_xname(sc->sc_dev));
5456 m_freem(m);
5457 continue;
5458 }
5459
5460 /* No errors. Receive the packet. */
5461 m->m_pkthdr.rcvif = ifp;
5462 m->m_pkthdr.len = len;
5463
5464 /*
5465 * If VLANs are enabled, VLAN packets have been unwrapped
5466 * for us. Associate the tag with the packet.
5467 */
5468 /* XXXX should check for i350 and i354 */
5469 if ((status & WRX_ST_VP) != 0) {
5470 VLAN_INPUT_TAG(ifp, m,
5471 le16toh(vlantag),
5472 continue);
5473 }
5474
5475 /* Set up checksum info for this packet. */
5476 if ((status & WRX_ST_IXSM) == 0) {
5477 if (status & WRX_ST_IPCS) {
5478 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5479 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5480 if (errors & WRX_ER_IPE)
5481 m->m_pkthdr.csum_flags |=
5482 M_CSUM_IPv4_BAD;
5483 }
5484 if (status & WRX_ST_TCPCS) {
5485 /*
5486 * Note: we don't know if this was TCP or UDP,
5487 * so we just set both bits, and expect the
5488 * upper layers to deal.
5489 */
5490 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5491 m->m_pkthdr.csum_flags |=
5492 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5493 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5494 if (errors & WRX_ER_TCPE)
5495 m->m_pkthdr.csum_flags |=
5496 M_CSUM_TCP_UDP_BAD;
5497 }
5498 }
5499
5500 ifp->if_ipackets++;
5501
5502 WM_RX_UNLOCK(sc);
5503
5504 /* Pass this up to any BPF listeners. */
5505 bpf_mtap(ifp, m);
5506
5507 /* Pass it on. */
5508 (*ifp->if_input)(ifp, m);
5509
5510 WM_RX_LOCK(sc);
5511
5512 if (sc->sc_stopping)
5513 break;
5514 }
5515
5516 /* Update the receive pointer. */
5517 sc->sc_rxptr = i;
5518
5519 DPRINTF(WM_DEBUG_RX,
5520 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5521 }
5522
5523 /*
5524 * wm_linkintr_gmii:
5525 *
5526 * Helper; handle link interrupts for GMII.
5527 */
5528 static void
5529 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5530 {
5531
5532 KASSERT(WM_TX_LOCKED(sc));
5533
5534 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5535 __func__));
5536
5537 if (icr & ICR_LSC) {
5538 DPRINTF(WM_DEBUG_LINK,
5539 ("%s: LINK: LSC -> mii_pollstat\n",
5540 device_xname(sc->sc_dev)));
5541 mii_pollstat(&sc->sc_mii);
5542 if (sc->sc_type == WM_T_82543) {
5543 int miistatus, active;
5544
5545 /*
5546 * With 82543, we need to force speed and
5547 * duplex on the MAC equal to what the PHY
5548 * speed and duplex configuration is.
5549 */
5550 miistatus = sc->sc_mii.mii_media_status;
5551
5552 if (miistatus & IFM_ACTIVE) {
5553 active = sc->sc_mii.mii_media_active;
5554 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5555 switch (IFM_SUBTYPE(active)) {
5556 case IFM_10_T:
5557 sc->sc_ctrl |= CTRL_SPEED_10;
5558 break;
5559 case IFM_100_TX:
5560 sc->sc_ctrl |= CTRL_SPEED_100;
5561 break;
5562 case IFM_1000_T:
5563 sc->sc_ctrl |= CTRL_SPEED_1000;
5564 break;
5565 default:
5566 /*
5567 * fiber?
5568 * Shoud not enter here.
5569 */
5570 printf("unknown media (%x)\n",
5571 active);
5572 break;
5573 }
5574 if (active & IFM_FDX)
5575 sc->sc_ctrl |= CTRL_FD;
5576 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5577 }
5578 } else if ((sc->sc_type == WM_T_ICH8)
5579 && (sc->sc_phytype == WMPHY_IGP_3)) {
5580 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5581 } else if (sc->sc_type == WM_T_PCH) {
5582 wm_k1_gig_workaround_hv(sc,
5583 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5584 }
5585
5586 if ((sc->sc_phytype == WMPHY_82578)
5587 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5588 == IFM_1000_T)) {
5589
5590 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5591 delay(200*1000); /* XXX too big */
5592
5593 /* Link stall fix for link up */
5594 wm_gmii_hv_writereg(sc->sc_dev, 1,
5595 HV_MUX_DATA_CTRL,
5596 HV_MUX_DATA_CTRL_GEN_TO_MAC
5597 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5598 wm_gmii_hv_writereg(sc->sc_dev, 1,
5599 HV_MUX_DATA_CTRL,
5600 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5601 }
5602 }
5603 } else if (icr & ICR_RXSEQ) {
5604 DPRINTF(WM_DEBUG_LINK,
5605 ("%s: LINK Receive sequence error\n",
5606 device_xname(sc->sc_dev)));
5607 }
5608 }
5609
5610 /*
5611 * wm_linkintr_tbi:
5612 *
5613 * Helper; handle link interrupts for TBI mode.
5614 */
5615 static void
5616 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5617 {
5618 uint32_t status;
5619
5620 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5621 __func__));
5622
5623 status = CSR_READ(sc, WMREG_STATUS);
5624 if (icr & ICR_LSC) {
5625 if (status & STATUS_LU) {
5626 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5627 device_xname(sc->sc_dev),
5628 (status & STATUS_FD) ? "FDX" : "HDX"));
5629 /*
5630 * NOTE: CTRL will update TFCE and RFCE automatically,
5631 * so we should update sc->sc_ctrl
5632 */
5633
5634 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5635 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5636 sc->sc_fcrtl &= ~FCRTL_XONE;
5637 if (status & STATUS_FD)
5638 sc->sc_tctl |=
5639 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5640 else
5641 sc->sc_tctl |=
5642 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5643 if (sc->sc_ctrl & CTRL_TFCE)
5644 sc->sc_fcrtl |= FCRTL_XONE;
5645 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5646 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5647 WMREG_OLD_FCRTL : WMREG_FCRTL,
5648 sc->sc_fcrtl);
5649 sc->sc_tbi_linkup = 1;
5650 } else {
5651 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5652 device_xname(sc->sc_dev)));
5653 sc->sc_tbi_linkup = 0;
5654 }
5655 wm_tbi_set_linkled(sc);
5656 } else if (icr & ICR_RXCFG) {
5657 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
5658 device_xname(sc->sc_dev)));
5659 sc->sc_tbi_nrxcfg++;
5660 wm_check_for_link(sc);
5661 } else if (icr & ICR_RXSEQ) {
5662 DPRINTF(WM_DEBUG_LINK,
5663 ("%s: LINK: Receive sequence error\n",
5664 device_xname(sc->sc_dev)));
5665 }
5666 }
5667
5668 /*
5669 * wm_linkintr:
5670 *
5671 * Helper; handle link interrupts.
5672 */
5673 static void
5674 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5675 {
5676
5677 if (sc->sc_flags & WM_F_HAS_MII)
5678 wm_linkintr_gmii(sc, icr);
5679 else
5680 wm_linkintr_tbi(sc, icr);
5681 }
5682
5683 /*
5684 * wm_intr:
5685 *
5686 * Interrupt service routine.
5687 */
5688 static int
5689 wm_intr(void *arg)
5690 {
5691 struct wm_softc *sc = arg;
5692 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5693 uint32_t icr;
5694 int handled = 0;
5695
5696 while (1 /* CONSTCOND */) {
5697 icr = CSR_READ(sc, WMREG_ICR);
5698 if ((icr & sc->sc_icr) == 0)
5699 break;
5700 rnd_add_uint32(&sc->rnd_source, icr);
5701
5702 WM_RX_LOCK(sc);
5703
5704 if (sc->sc_stopping) {
5705 WM_RX_UNLOCK(sc);
5706 break;
5707 }
5708
5709 handled = 1;
5710
5711 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5712 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5713 DPRINTF(WM_DEBUG_RX,
5714 ("%s: RX: got Rx intr 0x%08x\n",
5715 device_xname(sc->sc_dev),
5716 icr & (ICR_RXDMT0|ICR_RXT0)));
5717 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5718 }
5719 #endif
5720 wm_rxintr(sc);
5721
5722 WM_RX_UNLOCK(sc);
5723 WM_TX_LOCK(sc);
5724
5725 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5726 if (icr & ICR_TXDW) {
5727 DPRINTF(WM_DEBUG_TX,
5728 ("%s: TX: got TXDW interrupt\n",
5729 device_xname(sc->sc_dev)));
5730 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5731 }
5732 #endif
5733 wm_txintr(sc);
5734
5735 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
5736 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5737 wm_linkintr(sc, icr);
5738 }
5739
5740 WM_TX_UNLOCK(sc);
5741
5742 if (icr & ICR_RXO) {
5743 #if defined(WM_DEBUG)
5744 log(LOG_WARNING, "%s: Receive overrun\n",
5745 device_xname(sc->sc_dev));
5746 #endif /* defined(WM_DEBUG) */
5747 }
5748 }
5749
5750 if (handled) {
5751 /* Try to get more packets going. */
5752 ifp->if_start(ifp);
5753 }
5754
5755 return handled;
5756 }
5757
5758 /*
5759 * Media related.
5760 * GMII, SGMII, TBI (and SERDES)
5761 */
5762
5763 /* GMII related */
5764
5765 /*
5766 * wm_gmii_reset:
5767 *
5768 * Reset the PHY.
5769 */
5770 static void
5771 wm_gmii_reset(struct wm_softc *sc)
5772 {
5773 uint32_t reg;
5774 int rv;
5775
5776 /* get phy semaphore */
5777 switch (sc->sc_type) {
5778 case WM_T_82571:
5779 case WM_T_82572:
5780 case WM_T_82573:
5781 case WM_T_82574:
5782 case WM_T_82583:
5783 /* XXX should get sw semaphore, too */
5784 rv = wm_get_swsm_semaphore(sc);
5785 break;
5786 case WM_T_82575:
5787 case WM_T_82576:
5788 case WM_T_82580:
5789 case WM_T_82580ER:
5790 case WM_T_I350:
5791 case WM_T_I354:
5792 case WM_T_I210:
5793 case WM_T_I211:
5794 case WM_T_80003:
5795 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5796 break;
5797 case WM_T_ICH8:
5798 case WM_T_ICH9:
5799 case WM_T_ICH10:
5800 case WM_T_PCH:
5801 case WM_T_PCH2:
5802 case WM_T_PCH_LPT:
5803 rv = wm_get_swfwhw_semaphore(sc);
5804 break;
5805 default:
5806 /* nothing to do*/
5807 rv = 0;
5808 break;
5809 }
5810 if (rv != 0) {
5811 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5812 __func__);
5813 return;
5814 }
5815
5816 switch (sc->sc_type) {
5817 case WM_T_82542_2_0:
5818 case WM_T_82542_2_1:
5819 /* null */
5820 break;
5821 case WM_T_82543:
5822 /*
5823 * With 82543, we need to force speed and duplex on the MAC
5824 * equal to what the PHY speed and duplex configuration is.
5825 * In addition, we need to perform a hardware reset on the PHY
5826 * to take it out of reset.
5827 */
5828 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5829 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5830
5831 /* The PHY reset pin is active-low. */
5832 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5833 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5834 CTRL_EXT_SWDPIN(4));
5835 reg |= CTRL_EXT_SWDPIO(4);
5836
5837 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5838 CSR_WRITE_FLUSH(sc);
5839 delay(10*1000);
5840
5841 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5842 CSR_WRITE_FLUSH(sc);
5843 delay(150);
5844 #if 0
5845 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5846 #endif
5847 delay(20*1000); /* XXX extra delay to get PHY ID? */
5848 break;
5849 case WM_T_82544: /* reset 10000us */
5850 case WM_T_82540:
5851 case WM_T_82545:
5852 case WM_T_82545_3:
5853 case WM_T_82546:
5854 case WM_T_82546_3:
5855 case WM_T_82541:
5856 case WM_T_82541_2:
5857 case WM_T_82547:
5858 case WM_T_82547_2:
5859 case WM_T_82571: /* reset 100us */
5860 case WM_T_82572:
5861 case WM_T_82573:
5862 case WM_T_82574:
5863 case WM_T_82575:
5864 case WM_T_82576:
5865 case WM_T_82580:
5866 case WM_T_82580ER:
5867 case WM_T_I350:
5868 case WM_T_I354:
5869 case WM_T_I210:
5870 case WM_T_I211:
5871 case WM_T_82583:
5872 case WM_T_80003:
5873 /* generic reset */
5874 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5875 CSR_WRITE_FLUSH(sc);
5876 delay(20000);
5877 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5878 CSR_WRITE_FLUSH(sc);
5879 delay(20000);
5880
5881 if ((sc->sc_type == WM_T_82541)
5882 || (sc->sc_type == WM_T_82541_2)
5883 || (sc->sc_type == WM_T_82547)
5884 || (sc->sc_type == WM_T_82547_2)) {
5885 /* workaround for igp are done in igp_reset() */
5886 /* XXX add code to set LED after phy reset */
5887 }
5888 break;
5889 case WM_T_ICH8:
5890 case WM_T_ICH9:
5891 case WM_T_ICH10:
5892 case WM_T_PCH:
5893 case WM_T_PCH2:
5894 case WM_T_PCH_LPT:
5895 /* generic reset */
5896 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5897 CSR_WRITE_FLUSH(sc);
5898 delay(100);
5899 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5900 CSR_WRITE_FLUSH(sc);
5901 delay(150);
5902 break;
5903 default:
5904 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5905 __func__);
5906 break;
5907 }
5908
5909 /* release PHY semaphore */
5910 switch (sc->sc_type) {
5911 case WM_T_82571:
5912 case WM_T_82572:
5913 case WM_T_82573:
5914 case WM_T_82574:
5915 case WM_T_82583:
5916 /* XXX should put sw semaphore, too */
5917 wm_put_swsm_semaphore(sc);
5918 break;
5919 case WM_T_82575:
5920 case WM_T_82576:
5921 case WM_T_82580:
5922 case WM_T_82580ER:
5923 case WM_T_I350:
5924 case WM_T_I354:
5925 case WM_T_I210:
5926 case WM_T_I211:
5927 case WM_T_80003:
5928 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5929 break;
5930 case WM_T_ICH8:
5931 case WM_T_ICH9:
5932 case WM_T_ICH10:
5933 case WM_T_PCH:
5934 case WM_T_PCH2:
5935 case WM_T_PCH_LPT:
5936 wm_put_swfwhw_semaphore(sc);
5937 break;
5938 default:
5939 /* nothing to do*/
5940 rv = 0;
5941 break;
5942 }
5943
5944 /* get_cfg_done */
5945 wm_get_cfg_done(sc);
5946
5947 /* extra setup */
5948 switch (sc->sc_type) {
5949 case WM_T_82542_2_0:
5950 case WM_T_82542_2_1:
5951 case WM_T_82543:
5952 case WM_T_82544:
5953 case WM_T_82540:
5954 case WM_T_82545:
5955 case WM_T_82545_3:
5956 case WM_T_82546:
5957 case WM_T_82546_3:
5958 case WM_T_82541_2:
5959 case WM_T_82547_2:
5960 case WM_T_82571:
5961 case WM_T_82572:
5962 case WM_T_82573:
5963 case WM_T_82574:
5964 case WM_T_82575:
5965 case WM_T_82576:
5966 case WM_T_82580:
5967 case WM_T_82580ER:
5968 case WM_T_I350:
5969 case WM_T_I354:
5970 case WM_T_I210:
5971 case WM_T_I211:
5972 case WM_T_82583:
5973 case WM_T_80003:
5974 /* null */
5975 break;
5976 case WM_T_82541:
5977 case WM_T_82547:
5978 /* XXX Configure actively LED after PHY reset */
5979 break;
5980 case WM_T_ICH8:
5981 case WM_T_ICH9:
5982 case WM_T_ICH10:
5983 case WM_T_PCH:
5984 case WM_T_PCH2:
5985 case WM_T_PCH_LPT:
5986 /* Allow time for h/w to get to a quiescent state afer reset */
5987 delay(10*1000);
5988
5989 if (sc->sc_type == WM_T_PCH)
5990 wm_hv_phy_workaround_ich8lan(sc);
5991
5992 if (sc->sc_type == WM_T_PCH2)
5993 wm_lv_phy_workaround_ich8lan(sc);
5994
5995 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5996 /*
5997 * dummy read to clear the phy wakeup bit after lcd
5998 * reset
5999 */
6000 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6001 }
6002
6003 /*
6004 * XXX Configure the LCD with th extended configuration region
6005 * in NVM
6006 */
6007
6008 /* Configure the LCD with the OEM bits in NVM */
6009 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6010 || (sc->sc_type == WM_T_PCH_LPT)) {
6011 /*
6012 * Disable LPLU.
6013 * XXX It seems that 82567 has LPLU, too.
6014 */
6015 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6016 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6017 reg |= HV_OEM_BITS_ANEGNOW;
6018 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6019 }
6020 break;
6021 default:
6022 panic("%s: unknown type\n", __func__);
6023 break;
6024 }
6025 }
6026
6027 /*
6028 * wm_get_phy_id_82575:
6029 *
6030 * Return PHY ID. Return -1 if it failed.
6031 */
6032 static int
6033 wm_get_phy_id_82575(struct wm_softc *sc)
6034 {
6035 uint32_t reg;
6036 int phyid = -1;
6037
6038 /* XXX */
6039 if ((sc->sc_flags & WM_F_SGMII) == 0)
6040 return -1;
6041
6042 if (wm_sgmii_uses_mdio(sc)) {
6043 switch (sc->sc_type) {
6044 case WM_T_82575:
6045 case WM_T_82576:
6046 reg = CSR_READ(sc, WMREG_MDIC);
6047 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6048 break;
6049 case WM_T_82580:
6050 case WM_T_I350:
6051 case WM_T_I354:
6052 case WM_T_I210:
6053 case WM_T_I211:
6054 reg = CSR_READ(sc, WMREG_MDICNFG);
6055 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6056 break;
6057 default:
6058 return -1;
6059 }
6060 }
6061
6062 return phyid;
6063 }
6064
6065
6066 /*
6067 * wm_gmii_mediainit:
6068 *
6069 * Initialize media for use on 1000BASE-T devices.
6070 */
6071 static void
6072 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6073 {
6074 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6075 struct mii_data *mii = &sc->sc_mii;
6076 uint32_t reg;
6077
6078 /* We have MII. */
6079 sc->sc_flags |= WM_F_HAS_MII;
6080
6081 if (sc->sc_type == WM_T_80003)
6082 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6083 else
6084 sc->sc_tipg = TIPG_1000T_DFLT;
6085
6086 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6087 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6088 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6089 || (sc->sc_type == WM_T_I211)) {
6090 reg = CSR_READ(sc, WMREG_PHPM);
6091 reg &= ~PHPM_GO_LINK_D;
6092 CSR_WRITE(sc, WMREG_PHPM, reg);
6093 }
6094
6095 /*
6096 * Let the chip set speed/duplex on its own based on
6097 * signals from the PHY.
6098 * XXXbouyer - I'm not sure this is right for the 80003,
6099 * the em driver only sets CTRL_SLU here - but it seems to work.
6100 */
6101 sc->sc_ctrl |= CTRL_SLU;
6102 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6103
6104 /* Initialize our media structures and probe the GMII. */
6105 mii->mii_ifp = ifp;
6106
6107 /*
6108 * Determine the PHY access method.
6109 *
6110 * For SGMII, use SGMII specific method.
6111 *
6112 * For some devices, we can determine the PHY access method
6113 * from sc_type.
6114 *
6115 * For ICH8 variants, it's difficult to detemine the PHY access
6116 * method by sc_type, so use the PCI product ID for some devices.
6117 * For other ICH8 variants, try to use igp's method. If the PHY
6118 * can't detect, then use bm's method.
6119 */
6120 switch (prodid) {
6121 case PCI_PRODUCT_INTEL_PCH_M_LM:
6122 case PCI_PRODUCT_INTEL_PCH_M_LC:
6123 /* 82577 */
6124 sc->sc_phytype = WMPHY_82577;
6125 mii->mii_readreg = wm_gmii_hv_readreg;
6126 mii->mii_writereg = wm_gmii_hv_writereg;
6127 break;
6128 case PCI_PRODUCT_INTEL_PCH_D_DM:
6129 case PCI_PRODUCT_INTEL_PCH_D_DC:
6130 /* 82578 */
6131 sc->sc_phytype = WMPHY_82578;
6132 mii->mii_readreg = wm_gmii_hv_readreg;
6133 mii->mii_writereg = wm_gmii_hv_writereg;
6134 break;
6135 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6136 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6137 /* 82579 */
6138 sc->sc_phytype = WMPHY_82579;
6139 mii->mii_readreg = wm_gmii_hv_readreg;
6140 mii->mii_writereg = wm_gmii_hv_writereg;
6141 break;
6142 case PCI_PRODUCT_INTEL_I217_LM:
6143 case PCI_PRODUCT_INTEL_I217_V:
6144 case PCI_PRODUCT_INTEL_I218_LM:
6145 case PCI_PRODUCT_INTEL_I218_V:
6146 /* I21[78] */
6147 mii->mii_readreg = wm_gmii_hv_readreg;
6148 mii->mii_writereg = wm_gmii_hv_writereg;
6149 break;
6150 case PCI_PRODUCT_INTEL_82801I_BM:
6151 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6152 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6153 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6154 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6155 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6156 /* 82567 */
6157 sc->sc_phytype = WMPHY_BM;
6158 mii->mii_readreg = wm_gmii_bm_readreg;
6159 mii->mii_writereg = wm_gmii_bm_writereg;
6160 break;
6161 default:
6162 if (((sc->sc_flags & WM_F_SGMII) != 0)
6163 && !wm_sgmii_uses_mdio(sc)){
6164 mii->mii_readreg = wm_sgmii_readreg;
6165 mii->mii_writereg = wm_sgmii_writereg;
6166 } else if (sc->sc_type >= WM_T_80003) {
6167 mii->mii_readreg = wm_gmii_i80003_readreg;
6168 mii->mii_writereg = wm_gmii_i80003_writereg;
6169 } else if (sc->sc_type >= WM_T_I210) {
6170 mii->mii_readreg = wm_gmii_i82544_readreg;
6171 mii->mii_writereg = wm_gmii_i82544_writereg;
6172 } else if (sc->sc_type >= WM_T_82580) {
6173 sc->sc_phytype = WMPHY_82580;
6174 mii->mii_readreg = wm_gmii_82580_readreg;
6175 mii->mii_writereg = wm_gmii_82580_writereg;
6176 } else if (sc->sc_type >= WM_T_82544) {
6177 mii->mii_readreg = wm_gmii_i82544_readreg;
6178 mii->mii_writereg = wm_gmii_i82544_writereg;
6179 } else {
6180 mii->mii_readreg = wm_gmii_i82543_readreg;
6181 mii->mii_writereg = wm_gmii_i82543_writereg;
6182 }
6183 break;
6184 }
6185 mii->mii_statchg = wm_gmii_statchg;
6186
6187 wm_gmii_reset(sc);
6188
6189 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6190 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6191 wm_gmii_mediastatus);
6192
6193 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6194 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6195 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6196 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6197 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6198 /* Attach only one port */
6199 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6200 MII_OFFSET_ANY, MIIF_DOPAUSE);
6201 } else {
6202 int i, id;
6203 uint32_t ctrl_ext;
6204
6205 id = wm_get_phy_id_82575(sc);
6206 if (id != -1) {
6207 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6208 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6209 }
6210 if ((id == -1)
6211 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6212 /* Power on sgmii phy if it is disabled */
6213 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6214 CSR_WRITE(sc, WMREG_CTRL_EXT,
6215 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6216 CSR_WRITE_FLUSH(sc);
6217 delay(300*1000); /* XXX too long */
6218
6219 /* from 1 to 8 */
6220 for (i = 1; i < 8; i++)
6221 mii_attach(sc->sc_dev, &sc->sc_mii,
6222 0xffffffff, i, MII_OFFSET_ANY,
6223 MIIF_DOPAUSE);
6224
6225 /* restore previous sfp cage power state */
6226 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6227 }
6228 }
6229 } else {
6230 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6231 MII_OFFSET_ANY, MIIF_DOPAUSE);
6232 }
6233
6234 /*
6235 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6236 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6237 */
6238 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6239 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6240 wm_set_mdio_slow_mode_hv(sc);
6241 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6242 MII_OFFSET_ANY, MIIF_DOPAUSE);
6243 }
6244
6245 /*
6246 * (For ICH8 variants)
6247 * If PHY detection failed, use BM's r/w function and retry.
6248 */
6249 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6250 /* if failed, retry with *_bm_* */
6251 mii->mii_readreg = wm_gmii_bm_readreg;
6252 mii->mii_writereg = wm_gmii_bm_writereg;
6253
6254 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6255 MII_OFFSET_ANY, MIIF_DOPAUSE);
6256 }
6257
6258 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6259 /* Any PHY wasn't find */
6260 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6261 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6262 sc->sc_phytype = WMPHY_NONE;
6263 } else {
6264 /*
6265 * PHY Found!
6266 * Check PHY type.
6267 */
6268 uint32_t model;
6269 struct mii_softc *child;
6270
6271 child = LIST_FIRST(&mii->mii_phys);
6272 if (device_is_a(child->mii_dev, "igphy")) {
6273 struct igphy_softc *isc = (struct igphy_softc *)child;
6274
6275 model = isc->sc_mii.mii_mpd_model;
6276 if (model == MII_MODEL_yyINTEL_I82566)
6277 sc->sc_phytype = WMPHY_IGP_3;
6278 }
6279
6280 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6281 }
6282 }
6283
6284 /*
6285 * wm_gmii_mediastatus: [ifmedia interface function]
6286 *
6287 * Get the current interface media status on a 1000BASE-T device.
6288 */
6289 static void
6290 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6291 {
6292 struct wm_softc *sc = ifp->if_softc;
6293
6294 ether_mediastatus(ifp, ifmr);
6295 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6296 | sc->sc_flowflags;
6297 }
6298
6299 /*
6300 * wm_gmii_mediachange: [ifmedia interface function]
6301 *
6302 * Set hardware to newly-selected media on a 1000BASE-T device.
6303 */
6304 static int
6305 wm_gmii_mediachange(struct ifnet *ifp)
6306 {
6307 struct wm_softc *sc = ifp->if_softc;
6308 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6309 int rc;
6310
6311 if ((ifp->if_flags & IFF_UP) == 0)
6312 return 0;
6313
6314 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6315 sc->sc_ctrl |= CTRL_SLU;
6316 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6317 || (sc->sc_type > WM_T_82543)) {
6318 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6319 } else {
6320 sc->sc_ctrl &= ~CTRL_ASDE;
6321 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6322 if (ife->ifm_media & IFM_FDX)
6323 sc->sc_ctrl |= CTRL_FD;
6324 switch (IFM_SUBTYPE(ife->ifm_media)) {
6325 case IFM_10_T:
6326 sc->sc_ctrl |= CTRL_SPEED_10;
6327 break;
6328 case IFM_100_TX:
6329 sc->sc_ctrl |= CTRL_SPEED_100;
6330 break;
6331 case IFM_1000_T:
6332 sc->sc_ctrl |= CTRL_SPEED_1000;
6333 break;
6334 default:
6335 panic("wm_gmii_mediachange: bad media 0x%x",
6336 ife->ifm_media);
6337 }
6338 }
6339 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6340 if (sc->sc_type <= WM_T_82543)
6341 wm_gmii_reset(sc);
6342
6343 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6344 return 0;
6345 return rc;
6346 }
6347
6348 #define MDI_IO CTRL_SWDPIN(2)
6349 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6350 #define MDI_CLK CTRL_SWDPIN(3)
6351
6352 static void
6353 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6354 {
6355 uint32_t i, v;
6356
6357 v = CSR_READ(sc, WMREG_CTRL);
6358 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6359 v |= MDI_DIR | CTRL_SWDPIO(3);
6360
6361 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6362 if (data & i)
6363 v |= MDI_IO;
6364 else
6365 v &= ~MDI_IO;
6366 CSR_WRITE(sc, WMREG_CTRL, v);
6367 CSR_WRITE_FLUSH(sc);
6368 delay(10);
6369 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6370 CSR_WRITE_FLUSH(sc);
6371 delay(10);
6372 CSR_WRITE(sc, WMREG_CTRL, v);
6373 CSR_WRITE_FLUSH(sc);
6374 delay(10);
6375 }
6376 }
6377
6378 static uint32_t
6379 wm_i82543_mii_recvbits(struct wm_softc *sc)
6380 {
6381 uint32_t v, i, data = 0;
6382
6383 v = CSR_READ(sc, WMREG_CTRL);
6384 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6385 v |= CTRL_SWDPIO(3);
6386
6387 CSR_WRITE(sc, WMREG_CTRL, v);
6388 CSR_WRITE_FLUSH(sc);
6389 delay(10);
6390 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6391 CSR_WRITE_FLUSH(sc);
6392 delay(10);
6393 CSR_WRITE(sc, WMREG_CTRL, v);
6394 CSR_WRITE_FLUSH(sc);
6395 delay(10);
6396
6397 for (i = 0; i < 16; i++) {
6398 data <<= 1;
6399 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6400 CSR_WRITE_FLUSH(sc);
6401 delay(10);
6402 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6403 data |= 1;
6404 CSR_WRITE(sc, WMREG_CTRL, v);
6405 CSR_WRITE_FLUSH(sc);
6406 delay(10);
6407 }
6408
6409 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6410 CSR_WRITE_FLUSH(sc);
6411 delay(10);
6412 CSR_WRITE(sc, WMREG_CTRL, v);
6413 CSR_WRITE_FLUSH(sc);
6414 delay(10);
6415
6416 return data;
6417 }
6418
6419 #undef MDI_IO
6420 #undef MDI_DIR
6421 #undef MDI_CLK
6422
6423 /*
6424 * wm_gmii_i82543_readreg: [mii interface function]
6425 *
6426 * Read a PHY register on the GMII (i82543 version).
6427 */
6428 static int
6429 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6430 {
6431 struct wm_softc *sc = device_private(self);
6432 int rv;
6433
6434 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6435 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6436 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6437 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6438
6439 DPRINTF(WM_DEBUG_GMII,
6440 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6441 device_xname(sc->sc_dev), phy, reg, rv));
6442
6443 return rv;
6444 }
6445
6446 /*
6447 * wm_gmii_i82543_writereg: [mii interface function]
6448 *
6449 * Write a PHY register on the GMII (i82543 version).
6450 */
6451 static void
6452 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6453 {
6454 struct wm_softc *sc = device_private(self);
6455
6456 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6457 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6458 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6459 (MII_COMMAND_START << 30), 32);
6460 }
6461
6462 /*
6463 * wm_gmii_i82544_readreg: [mii interface function]
6464 *
6465 * Read a PHY register on the GMII.
6466 */
6467 static int
6468 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6469 {
6470 struct wm_softc *sc = device_private(self);
6471 uint32_t mdic = 0;
6472 int i, rv;
6473
6474 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6475 MDIC_REGADD(reg));
6476
6477 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6478 mdic = CSR_READ(sc, WMREG_MDIC);
6479 if (mdic & MDIC_READY)
6480 break;
6481 delay(50);
6482 }
6483
6484 if ((mdic & MDIC_READY) == 0) {
6485 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6486 device_xname(sc->sc_dev), phy, reg);
6487 rv = 0;
6488 } else if (mdic & MDIC_E) {
6489 #if 0 /* This is normal if no PHY is present. */
6490 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6491 device_xname(sc->sc_dev), phy, reg);
6492 #endif
6493 rv = 0;
6494 } else {
6495 rv = MDIC_DATA(mdic);
6496 if (rv == 0xffff)
6497 rv = 0;
6498 }
6499
6500 return rv;
6501 }
6502
6503 /*
6504 * wm_gmii_i82544_writereg: [mii interface function]
6505 *
6506 * Write a PHY register on the GMII.
6507 */
6508 static void
6509 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6510 {
6511 struct wm_softc *sc = device_private(self);
6512 uint32_t mdic = 0;
6513 int i;
6514
6515 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6516 MDIC_REGADD(reg) | MDIC_DATA(val));
6517
6518 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6519 mdic = CSR_READ(sc, WMREG_MDIC);
6520 if (mdic & MDIC_READY)
6521 break;
6522 delay(50);
6523 }
6524
6525 if ((mdic & MDIC_READY) == 0)
6526 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6527 device_xname(sc->sc_dev), phy, reg);
6528 else if (mdic & MDIC_E)
6529 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6530 device_xname(sc->sc_dev), phy, reg);
6531 }
6532
6533 /*
6534 * wm_gmii_i80003_readreg: [mii interface function]
6535 *
6536 * Read a PHY register on the kumeran
6537 * This could be handled by the PHY layer if we didn't have to lock the
6538 * ressource ...
6539 */
6540 static int
6541 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6542 {
6543 struct wm_softc *sc = device_private(self);
6544 int sem;
6545 int rv;
6546
6547 if (phy != 1) /* only one PHY on kumeran bus */
6548 return 0;
6549
6550 sem = swfwphysem[sc->sc_funcid];
6551 if (wm_get_swfw_semaphore(sc, sem)) {
6552 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6553 __func__);
6554 return 0;
6555 }
6556
6557 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6558 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6559 reg >> GG82563_PAGE_SHIFT);
6560 } else {
6561 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6562 reg >> GG82563_PAGE_SHIFT);
6563 }
6564 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6565 delay(200);
6566 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6567 delay(200);
6568
6569 wm_put_swfw_semaphore(sc, sem);
6570 return rv;
6571 }
6572
6573 /*
6574 * wm_gmii_i80003_writereg: [mii interface function]
6575 *
6576 * Write a PHY register on the kumeran.
6577 * This could be handled by the PHY layer if we didn't have to lock the
6578 * ressource ...
6579 */
6580 static void
6581 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6582 {
6583 struct wm_softc *sc = device_private(self);
6584 int sem;
6585
6586 if (phy != 1) /* only one PHY on kumeran bus */
6587 return;
6588
6589 sem = swfwphysem[sc->sc_funcid];
6590 if (wm_get_swfw_semaphore(sc, sem)) {
6591 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6592 __func__);
6593 return;
6594 }
6595
6596 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6597 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6598 reg >> GG82563_PAGE_SHIFT);
6599 } else {
6600 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6601 reg >> GG82563_PAGE_SHIFT);
6602 }
6603 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6604 delay(200);
6605 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6606 delay(200);
6607
6608 wm_put_swfw_semaphore(sc, sem);
6609 }
6610
6611 /*
6612 * wm_gmii_bm_readreg: [mii interface function]
6613 *
6614 * Read a PHY register on the kumeran
6615 * This could be handled by the PHY layer if we didn't have to lock the
6616 * ressource ...
6617 */
6618 static int
6619 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6620 {
6621 struct wm_softc *sc = device_private(self);
6622 int sem;
6623 int rv;
6624
6625 sem = swfwphysem[sc->sc_funcid];
6626 if (wm_get_swfw_semaphore(sc, sem)) {
6627 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6628 __func__);
6629 return 0;
6630 }
6631
6632 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6633 if (phy == 1)
6634 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6635 reg);
6636 else
6637 wm_gmii_i82544_writereg(self, phy,
6638 GG82563_PHY_PAGE_SELECT,
6639 reg >> GG82563_PAGE_SHIFT);
6640 }
6641
6642 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6643 wm_put_swfw_semaphore(sc, sem);
6644 return rv;
6645 }
6646
6647 /*
6648 * wm_gmii_bm_writereg: [mii interface function]
6649 *
6650 * Write a PHY register on the kumeran.
6651 * This could be handled by the PHY layer if we didn't have to lock the
6652 * ressource ...
6653 */
6654 static void
6655 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6656 {
6657 struct wm_softc *sc = device_private(self);
6658 int sem;
6659
6660 sem = swfwphysem[sc->sc_funcid];
6661 if (wm_get_swfw_semaphore(sc, sem)) {
6662 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6663 __func__);
6664 return;
6665 }
6666
6667 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6668 if (phy == 1)
6669 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6670 reg);
6671 else
6672 wm_gmii_i82544_writereg(self, phy,
6673 GG82563_PHY_PAGE_SELECT,
6674 reg >> GG82563_PAGE_SHIFT);
6675 }
6676
6677 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6678 wm_put_swfw_semaphore(sc, sem);
6679 }
6680
6681 static void
6682 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6683 {
6684 struct wm_softc *sc = device_private(self);
6685 uint16_t regnum = BM_PHY_REG_NUM(offset);
6686 uint16_t wuce;
6687
6688 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6689 if (sc->sc_type == WM_T_PCH) {
6690 /* XXX e1000 driver do nothing... why? */
6691 }
6692
6693 /* Set page 769 */
6694 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6695 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6696
6697 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6698
6699 wuce &= ~BM_WUC_HOST_WU_BIT;
6700 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6701 wuce | BM_WUC_ENABLE_BIT);
6702
6703 /* Select page 800 */
6704 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6705 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6706
6707 /* Write page 800 */
6708 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6709
6710 if (rd)
6711 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6712 else
6713 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6714
6715 /* Set page 769 */
6716 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6717 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6718
6719 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6720 }
6721
6722 /*
6723 * wm_gmii_hv_readreg: [mii interface function]
6724 *
6725 * Read a PHY register on the kumeran
6726 * This could be handled by the PHY layer if we didn't have to lock the
6727 * ressource ...
6728 */
6729 static int
6730 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6731 {
6732 struct wm_softc *sc = device_private(self);
6733 uint16_t page = BM_PHY_REG_PAGE(reg);
6734 uint16_t regnum = BM_PHY_REG_NUM(reg);
6735 uint16_t val;
6736 int rv;
6737
6738 if (wm_get_swfwhw_semaphore(sc)) {
6739 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6740 __func__);
6741 return 0;
6742 }
6743
6744 /* XXX Workaround failure in MDIO access while cable is disconnected */
6745 if (sc->sc_phytype == WMPHY_82577) {
6746 /* XXX must write */
6747 }
6748
6749 /* Page 800 works differently than the rest so it has its own func */
6750 if (page == BM_WUC_PAGE) {
6751 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6752 return val;
6753 }
6754
6755 /*
6756 * Lower than page 768 works differently than the rest so it has its
6757 * own func
6758 */
6759 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6760 printf("gmii_hv_readreg!!!\n");
6761 return 0;
6762 }
6763
6764 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6765 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6766 page << BME1000_PAGE_SHIFT);
6767 }
6768
6769 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6770 wm_put_swfwhw_semaphore(sc);
6771 return rv;
6772 }
6773
6774 /*
6775 * wm_gmii_hv_writereg: [mii interface function]
6776 *
6777 * Write a PHY register on the kumeran.
6778 * This could be handled by the PHY layer if we didn't have to lock the
6779 * ressource ...
6780 */
6781 static void
6782 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6783 {
6784 struct wm_softc *sc = device_private(self);
6785 uint16_t page = BM_PHY_REG_PAGE(reg);
6786 uint16_t regnum = BM_PHY_REG_NUM(reg);
6787
6788 if (wm_get_swfwhw_semaphore(sc)) {
6789 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6790 __func__);
6791 return;
6792 }
6793
6794 /* XXX Workaround failure in MDIO access while cable is disconnected */
6795
6796 /* Page 800 works differently than the rest so it has its own func */
6797 if (page == BM_WUC_PAGE) {
6798 uint16_t tmp;
6799
6800 tmp = val;
6801 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6802 return;
6803 }
6804
6805 /*
6806 * Lower than page 768 works differently than the rest so it has its
6807 * own func
6808 */
6809 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6810 printf("gmii_hv_writereg!!!\n");
6811 return;
6812 }
6813
6814 /*
6815 * XXX Workaround MDIO accesses being disabled after entering IEEE
6816 * Power Down (whenever bit 11 of the PHY control register is set)
6817 */
6818
6819 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6820 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6821 page << BME1000_PAGE_SHIFT);
6822 }
6823
6824 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6825 wm_put_swfwhw_semaphore(sc);
6826 }
6827
6828 /*
6829 * wm_gmii_82580_readreg: [mii interface function]
6830 *
6831 * Read a PHY register on the 82580 and I350.
6832 * This could be handled by the PHY layer if we didn't have to lock the
6833 * ressource ...
6834 */
6835 static int
6836 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6837 {
6838 struct wm_softc *sc = device_private(self);
6839 int sem;
6840 int rv;
6841
6842 sem = swfwphysem[sc->sc_funcid];
6843 if (wm_get_swfw_semaphore(sc, sem)) {
6844 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6845 __func__);
6846 return 0;
6847 }
6848
6849 rv = wm_gmii_i82544_readreg(self, phy, reg);
6850
6851 wm_put_swfw_semaphore(sc, sem);
6852 return rv;
6853 }
6854
6855 /*
6856 * wm_gmii_82580_writereg: [mii interface function]
6857 *
6858 * Write a PHY register on the 82580 and I350.
6859 * This could be handled by the PHY layer if we didn't have to lock the
6860 * ressource ...
6861 */
6862 static void
6863 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6864 {
6865 struct wm_softc *sc = device_private(self);
6866 int sem;
6867
6868 sem = swfwphysem[sc->sc_funcid];
6869 if (wm_get_swfw_semaphore(sc, sem)) {
6870 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6871 __func__);
6872 return;
6873 }
6874
6875 wm_gmii_i82544_writereg(self, phy, reg, val);
6876
6877 wm_put_swfw_semaphore(sc, sem);
6878 }
6879
6880 /*
6881 * wm_gmii_statchg: [mii interface function]
6882 *
6883 * Callback from MII layer when media changes.
6884 */
6885 static void
6886 wm_gmii_statchg(struct ifnet *ifp)
6887 {
6888 struct wm_softc *sc = ifp->if_softc;
6889 struct mii_data *mii = &sc->sc_mii;
6890
6891 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6892 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6893 sc->sc_fcrtl &= ~FCRTL_XONE;
6894
6895 /*
6896 * Get flow control negotiation result.
6897 */
6898 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6899 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6900 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6901 mii->mii_media_active &= ~IFM_ETH_FMASK;
6902 }
6903
6904 if (sc->sc_flowflags & IFM_FLOW) {
6905 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6906 sc->sc_ctrl |= CTRL_TFCE;
6907 sc->sc_fcrtl |= FCRTL_XONE;
6908 }
6909 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6910 sc->sc_ctrl |= CTRL_RFCE;
6911 }
6912
6913 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6914 DPRINTF(WM_DEBUG_LINK,
6915 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
6916 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6917 } else {
6918 DPRINTF(WM_DEBUG_LINK,
6919 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
6920 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6921 }
6922
6923 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6924 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6925 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6926 : WMREG_FCRTL, sc->sc_fcrtl);
6927 if (sc->sc_type == WM_T_80003) {
6928 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6929 case IFM_1000_T:
6930 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6931 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6932 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6933 break;
6934 default:
6935 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6936 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6937 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6938 break;
6939 }
6940 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6941 }
6942 }
6943
6944 /*
6945 * wm_kmrn_readreg:
6946 *
6947 * Read a kumeran register
6948 */
6949 static int
6950 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6951 {
6952 int rv;
6953
6954 if (sc->sc_flags == WM_F_LOCK_SWFW) {
6955 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6956 aprint_error_dev(sc->sc_dev,
6957 "%s: failed to get semaphore\n", __func__);
6958 return 0;
6959 }
6960 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
6961 if (wm_get_swfwhw_semaphore(sc)) {
6962 aprint_error_dev(sc->sc_dev,
6963 "%s: failed to get semaphore\n", __func__);
6964 return 0;
6965 }
6966 }
6967
6968 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6969 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6970 KUMCTRLSTA_REN);
6971 CSR_WRITE_FLUSH(sc);
6972 delay(2);
6973
6974 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6975
6976 if (sc->sc_flags == WM_F_LOCK_SWFW)
6977 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6978 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
6979 wm_put_swfwhw_semaphore(sc);
6980
6981 return rv;
6982 }
6983
6984 /*
6985 * wm_kmrn_writereg:
6986 *
6987 * Write a kumeran register
6988 */
6989 static void
6990 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6991 {
6992
6993 if (sc->sc_flags == WM_F_LOCK_SWFW) {
6994 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6995 aprint_error_dev(sc->sc_dev,
6996 "%s: failed to get semaphore\n", __func__);
6997 return;
6998 }
6999 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7000 if (wm_get_swfwhw_semaphore(sc)) {
7001 aprint_error_dev(sc->sc_dev,
7002 "%s: failed to get semaphore\n", __func__);
7003 return;
7004 }
7005 }
7006
7007 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7008 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7009 (val & KUMCTRLSTA_MASK));
7010
7011 if (sc->sc_flags == WM_F_LOCK_SWFW)
7012 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7013 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7014 wm_put_swfwhw_semaphore(sc);
7015 }
7016
7017 /* SGMII related */
7018
7019 /*
7020 * wm_sgmii_uses_mdio
7021 *
7022 * Check whether the transaction is to the internal PHY or the external
7023 * MDIO interface. Return true if it's MDIO.
7024 */
7025 static bool
7026 wm_sgmii_uses_mdio(struct wm_softc *sc)
7027 {
7028 uint32_t reg;
7029 bool ismdio = false;
7030
7031 switch (sc->sc_type) {
7032 case WM_T_82575:
7033 case WM_T_82576:
7034 reg = CSR_READ(sc, WMREG_MDIC);
7035 ismdio = ((reg & MDIC_DEST) != 0);
7036 break;
7037 case WM_T_82580:
7038 case WM_T_82580ER:
7039 case WM_T_I350:
7040 case WM_T_I354:
7041 case WM_T_I210:
7042 case WM_T_I211:
7043 reg = CSR_READ(sc, WMREG_MDICNFG);
7044 ismdio = ((reg & MDICNFG_DEST) != 0);
7045 break;
7046 default:
7047 break;
7048 }
7049
7050 return ismdio;
7051 }
7052
7053 /*
7054 * wm_sgmii_readreg: [mii interface function]
7055 *
7056 * Read a PHY register on the SGMII
7057 * This could be handled by the PHY layer if we didn't have to lock the
7058 * ressource ...
7059 */
7060 static int
7061 wm_sgmii_readreg(device_t self, int phy, int reg)
7062 {
7063 struct wm_softc *sc = device_private(self);
7064 uint32_t i2ccmd;
7065 int i, rv;
7066
7067 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7068 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7069 __func__);
7070 return 0;
7071 }
7072
7073 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7074 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7075 | I2CCMD_OPCODE_READ;
7076 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7077
7078 /* Poll the ready bit */
7079 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7080 delay(50);
7081 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7082 if (i2ccmd & I2CCMD_READY)
7083 break;
7084 }
7085 if ((i2ccmd & I2CCMD_READY) == 0)
7086 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7087 if ((i2ccmd & I2CCMD_ERROR) != 0)
7088 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7089
7090 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7091
7092 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7093 return rv;
7094 }
7095
7096 /*
7097 * wm_sgmii_writereg: [mii interface function]
7098 *
7099 * Write a PHY register on the SGMII.
7100 * This could be handled by the PHY layer if we didn't have to lock the
7101 * ressource ...
7102 */
7103 static void
7104 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7105 {
7106 struct wm_softc *sc = device_private(self);
7107 uint32_t i2ccmd;
7108 int i;
7109
7110 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7111 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7112 __func__);
7113 return;
7114 }
7115
7116 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7117 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7118 | I2CCMD_OPCODE_WRITE;
7119 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7120
7121 /* Poll the ready bit */
7122 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7123 delay(50);
7124 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7125 if (i2ccmd & I2CCMD_READY)
7126 break;
7127 }
7128 if ((i2ccmd & I2CCMD_READY) == 0)
7129 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7130 if ((i2ccmd & I2CCMD_ERROR) != 0)
7131 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7132
7133 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7134 }
7135
7136 /* TBI related */
7137
7138 /* XXX Currently TBI only */
7139 static int
7140 wm_check_for_link(struct wm_softc *sc)
7141 {
7142 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7143 uint32_t rxcw;
7144 uint32_t ctrl;
7145 uint32_t status;
7146 uint32_t sig;
7147
7148 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
7149 sc->sc_tbi_linkup = 1;
7150 return 0;
7151 }
7152
7153 rxcw = CSR_READ(sc, WMREG_RXCW);
7154 ctrl = CSR_READ(sc, WMREG_CTRL);
7155 status = CSR_READ(sc, WMREG_STATUS);
7156
7157 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7158
7159 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7160 device_xname(sc->sc_dev), __func__,
7161 ((ctrl & CTRL_SWDPIN(1)) == sig),
7162 ((status & STATUS_LU) != 0),
7163 ((rxcw & RXCW_C) != 0)
7164 ));
7165
7166 /*
7167 * SWDPIN LU RXCW
7168 * 0 0 0
7169 * 0 0 1 (should not happen)
7170 * 0 1 0 (should not happen)
7171 * 0 1 1 (should not happen)
7172 * 1 0 0 Disable autonego and force linkup
7173 * 1 0 1 got /C/ but not linkup yet
7174 * 1 1 0 (linkup)
7175 * 1 1 1 If IFM_AUTO, back to autonego
7176 *
7177 */
7178 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7179 && ((status & STATUS_LU) == 0)
7180 && ((rxcw & RXCW_C) == 0)) {
7181 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7182 __func__));
7183 sc->sc_tbi_linkup = 0;
7184 /* Disable auto-negotiation in the TXCW register */
7185 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7186
7187 /*
7188 * Force link-up and also force full-duplex.
7189 *
7190 * NOTE: CTRL was updated TFCE and RFCE automatically,
7191 * so we should update sc->sc_ctrl
7192 */
7193 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7194 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7195 } else if (((status & STATUS_LU) != 0)
7196 && ((rxcw & RXCW_C) != 0)
7197 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7198 sc->sc_tbi_linkup = 1;
7199 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7200 __func__));
7201 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7202 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7203 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7204 && ((rxcw & RXCW_C) != 0)) {
7205 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7206 } else {
7207 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7208 status));
7209 }
7210
7211 return 0;
7212 }
7213
7214 /*
7215 * wm_tbi_mediainit:
7216 *
7217 * Initialize media for use on 1000BASE-X devices.
7218 */
7219 static void
7220 wm_tbi_mediainit(struct wm_softc *sc)
7221 {
7222 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7223 const char *sep = "";
7224
7225 if (sc->sc_type < WM_T_82543)
7226 sc->sc_tipg = TIPG_WM_DFLT;
7227 else
7228 sc->sc_tipg = TIPG_LG_DFLT;
7229
7230 sc->sc_tbi_anegticks = 5;
7231
7232 /* Initialize our media structures */
7233 sc->sc_mii.mii_ifp = ifp;
7234
7235 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7236 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7237 wm_tbi_mediastatus);
7238
7239 /*
7240 * SWD Pins:
7241 *
7242 * 0 = Link LED (output)
7243 * 1 = Loss Of Signal (input)
7244 */
7245 sc->sc_ctrl |= CTRL_SWDPIO(0);
7246 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7247 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
7248 sc->sc_ctrl &= ~CTRL_LRST;
7249
7250 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7251
7252 #define ADD(ss, mm, dd) \
7253 do { \
7254 aprint_normal("%s%s", sep, ss); \
7255 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7256 sep = ", "; \
7257 } while (/*CONSTCOND*/0)
7258
7259 aprint_normal_dev(sc->sc_dev, "");
7260 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7261 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7262 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7263 aprint_normal("\n");
7264
7265 #undef ADD
7266
7267 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7268 }
7269
7270 /*
7271 * wm_tbi_mediastatus: [ifmedia interface function]
7272 *
7273 * Get the current interface media status on a 1000BASE-X device.
7274 */
7275 static void
7276 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7277 {
7278 struct wm_softc *sc = ifp->if_softc;
7279 uint32_t ctrl, status;
7280
7281 ifmr->ifm_status = IFM_AVALID;
7282 ifmr->ifm_active = IFM_ETHER;
7283
7284 status = CSR_READ(sc, WMREG_STATUS);
7285 if ((status & STATUS_LU) == 0) {
7286 ifmr->ifm_active |= IFM_NONE;
7287 return;
7288 }
7289
7290 ifmr->ifm_status |= IFM_ACTIVE;
7291 ifmr->ifm_active |= IFM_1000_SX;
7292 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7293 ifmr->ifm_active |= IFM_FDX;
7294 else
7295 ifmr->ifm_active |= IFM_HDX;
7296 ctrl = CSR_READ(sc, WMREG_CTRL);
7297 if (ctrl & CTRL_RFCE)
7298 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7299 if (ctrl & CTRL_TFCE)
7300 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7301 }
7302
7303 /*
7304 * wm_tbi_mediachange: [ifmedia interface function]
7305 *
7306 * Set hardware to newly-selected media on a 1000BASE-X device.
7307 */
7308 static int
7309 wm_tbi_mediachange(struct ifnet *ifp)
7310 {
7311 struct wm_softc *sc = ifp->if_softc;
7312 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7313 uint32_t status;
7314 int i;
7315
7316 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
7317 return 0;
7318
7319 sc->sc_txcw = 0;
7320 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
7321 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7322 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7323 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7324 sc->sc_txcw |= TXCW_ANE;
7325 } else {
7326 /*
7327 * If autonegotiation is turned off, force link up and turn on
7328 * full duplex
7329 */
7330 sc->sc_txcw &= ~TXCW_ANE;
7331 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
7332 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7333 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7334 CSR_WRITE_FLUSH(sc);
7335 delay(1000);
7336 }
7337
7338 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7339 device_xname(sc->sc_dev),sc->sc_txcw));
7340 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7341 CSR_WRITE_FLUSH(sc);
7342 delay(10000);
7343
7344 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7345 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7346
7347 /*
7348 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7349 * optics detect a signal, 0 if they don't.
7350 */
7351 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7352 /* Have signal; wait for the link to come up. */
7353
7354 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7355 /*
7356 * Reset the link, and let autonegotiation do its thing
7357 */
7358 sc->sc_ctrl |= CTRL_LRST;
7359 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7360 CSR_WRITE_FLUSH(sc);
7361 delay(1000);
7362 sc->sc_ctrl &= ~CTRL_LRST;
7363 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7364 CSR_WRITE_FLUSH(sc);
7365 delay(1000);
7366 }
7367
7368 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7369 delay(10000);
7370 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7371 break;
7372 }
7373
7374 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7375 device_xname(sc->sc_dev),i));
7376
7377 status = CSR_READ(sc, WMREG_STATUS);
7378 DPRINTF(WM_DEBUG_LINK,
7379 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7380 device_xname(sc->sc_dev),status, STATUS_LU));
7381 if (status & STATUS_LU) {
7382 /* Link is up. */
7383 DPRINTF(WM_DEBUG_LINK,
7384 ("%s: LINK: set media -> link up %s\n",
7385 device_xname(sc->sc_dev),
7386 (status & STATUS_FD) ? "FDX" : "HDX"));
7387
7388 /*
7389 * NOTE: CTRL will update TFCE and RFCE automatically,
7390 * so we should update sc->sc_ctrl
7391 */
7392 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7393 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7394 sc->sc_fcrtl &= ~FCRTL_XONE;
7395 if (status & STATUS_FD)
7396 sc->sc_tctl |=
7397 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7398 else
7399 sc->sc_tctl |=
7400 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7401 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7402 sc->sc_fcrtl |= FCRTL_XONE;
7403 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7404 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7405 WMREG_OLD_FCRTL : WMREG_FCRTL,
7406 sc->sc_fcrtl);
7407 sc->sc_tbi_linkup = 1;
7408 } else {
7409 if (i == WM_LINKUP_TIMEOUT)
7410 wm_check_for_link(sc);
7411 /* Link is down. */
7412 DPRINTF(WM_DEBUG_LINK,
7413 ("%s: LINK: set media -> link down\n",
7414 device_xname(sc->sc_dev)));
7415 sc->sc_tbi_linkup = 0;
7416 }
7417 } else {
7418 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7419 device_xname(sc->sc_dev)));
7420 sc->sc_tbi_linkup = 0;
7421 }
7422
7423 wm_tbi_set_linkled(sc);
7424
7425 return 0;
7426 }
7427
7428 /*
7429 * wm_tbi_set_linkled:
7430 *
7431 * Update the link LED on 1000BASE-X devices.
7432 */
7433 static void
7434 wm_tbi_set_linkled(struct wm_softc *sc)
7435 {
7436
7437 if (sc->sc_tbi_linkup)
7438 sc->sc_ctrl |= CTRL_SWDPIN(0);
7439 else
7440 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7441
7442 /* 82540 or newer devices are active low */
7443 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7444
7445 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7446 }
7447
7448 /*
7449 * wm_tbi_check_link:
7450 *
7451 * Check the link on 1000BASE-X devices.
7452 */
7453 static void
7454 wm_tbi_check_link(struct wm_softc *sc)
7455 {
7456 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7457 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7458 uint32_t status;
7459
7460 KASSERT(WM_TX_LOCKED(sc));
7461
7462 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
7463 sc->sc_tbi_linkup = 1;
7464 return;
7465 }
7466
7467 status = CSR_READ(sc, WMREG_STATUS);
7468
7469 /* XXX is this needed? */
7470 (void)CSR_READ(sc, WMREG_RXCW);
7471 (void)CSR_READ(sc, WMREG_CTRL);
7472
7473 /* set link status */
7474 if ((status & STATUS_LU) == 0) {
7475 DPRINTF(WM_DEBUG_LINK,
7476 ("%s: LINK: checklink -> down\n",
7477 device_xname(sc->sc_dev)));
7478 sc->sc_tbi_linkup = 0;
7479 } else if (sc->sc_tbi_linkup == 0) {
7480 DPRINTF(WM_DEBUG_LINK,
7481 ("%s: LINK: checklink -> up %s\n",
7482 device_xname(sc->sc_dev),
7483 (status & STATUS_FD) ? "FDX" : "HDX"));
7484 sc->sc_tbi_linkup = 1;
7485 }
7486
7487 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7488 && ((status & STATUS_LU) == 0)) {
7489 sc->sc_tbi_linkup = 0;
7490 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
7491 /* RXCFG storm! */
7492 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
7493 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
7494 wm_init_locked(ifp);
7495 WM_TX_UNLOCK(sc);
7496 ifp->if_start(ifp);
7497 WM_TX_LOCK(sc);
7498 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7499 /* If the timer expired, retry autonegotiation */
7500 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7501 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7502 sc->sc_tbi_ticks = 0;
7503 /*
7504 * Reset the link, and let autonegotiation do
7505 * its thing
7506 */
7507 sc->sc_ctrl |= CTRL_LRST;
7508 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7509 CSR_WRITE_FLUSH(sc);
7510 delay(1000);
7511 sc->sc_ctrl &= ~CTRL_LRST;
7512 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7513 CSR_WRITE_FLUSH(sc);
7514 delay(1000);
7515 CSR_WRITE(sc, WMREG_TXCW,
7516 sc->sc_txcw & ~TXCW_ANE);
7517 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7518 }
7519 }
7520 }
7521
7522 wm_tbi_set_linkled(sc);
7523 }
7524
7525 /*
7526 * NVM related.
7527 * Microwire, SPI (w/wo EERD) and Flash.
7528 */
7529
7530 /* Both spi and uwire */
7531
7532 /*
7533 * wm_eeprom_sendbits:
7534 *
7535 * Send a series of bits to the EEPROM.
7536 */
7537 static void
7538 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7539 {
7540 uint32_t reg;
7541 int x;
7542
7543 reg = CSR_READ(sc, WMREG_EECD);
7544
7545 for (x = nbits; x > 0; x--) {
7546 if (bits & (1U << (x - 1)))
7547 reg |= EECD_DI;
7548 else
7549 reg &= ~EECD_DI;
7550 CSR_WRITE(sc, WMREG_EECD, reg);
7551 CSR_WRITE_FLUSH(sc);
7552 delay(2);
7553 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7554 CSR_WRITE_FLUSH(sc);
7555 delay(2);
7556 CSR_WRITE(sc, WMREG_EECD, reg);
7557 CSR_WRITE_FLUSH(sc);
7558 delay(2);
7559 }
7560 }
7561
7562 /*
7563 * wm_eeprom_recvbits:
7564 *
7565 * Receive a series of bits from the EEPROM.
7566 */
7567 static void
7568 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7569 {
7570 uint32_t reg, val;
7571 int x;
7572
7573 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7574
7575 val = 0;
7576 for (x = nbits; x > 0; x--) {
7577 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7578 CSR_WRITE_FLUSH(sc);
7579 delay(2);
7580 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7581 val |= (1U << (x - 1));
7582 CSR_WRITE(sc, WMREG_EECD, reg);
7583 CSR_WRITE_FLUSH(sc);
7584 delay(2);
7585 }
7586 *valp = val;
7587 }
7588
7589 /* Microwire */
7590
7591 /*
7592 * wm_nvm_read_uwire:
7593 *
7594 * Read a word from the EEPROM using the MicroWire protocol.
7595 */
7596 static int
7597 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7598 {
7599 uint32_t reg, val;
7600 int i;
7601
7602 for (i = 0; i < wordcnt; i++) {
7603 /* Clear SK and DI. */
7604 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7605 CSR_WRITE(sc, WMREG_EECD, reg);
7606
7607 /*
7608 * XXX: workaround for a bug in qemu-0.12.x and prior
7609 * and Xen.
7610 *
7611 * We use this workaround only for 82540 because qemu's
7612 * e1000 act as 82540.
7613 */
7614 if (sc->sc_type == WM_T_82540) {
7615 reg |= EECD_SK;
7616 CSR_WRITE(sc, WMREG_EECD, reg);
7617 reg &= ~EECD_SK;
7618 CSR_WRITE(sc, WMREG_EECD, reg);
7619 CSR_WRITE_FLUSH(sc);
7620 delay(2);
7621 }
7622 /* XXX: end of workaround */
7623
7624 /* Set CHIP SELECT. */
7625 reg |= EECD_CS;
7626 CSR_WRITE(sc, WMREG_EECD, reg);
7627 CSR_WRITE_FLUSH(sc);
7628 delay(2);
7629
7630 /* Shift in the READ command. */
7631 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7632
7633 /* Shift in address. */
7634 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
7635
7636 /* Shift out the data. */
7637 wm_eeprom_recvbits(sc, &val, 16);
7638 data[i] = val & 0xffff;
7639
7640 /* Clear CHIP SELECT. */
7641 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7642 CSR_WRITE(sc, WMREG_EECD, reg);
7643 CSR_WRITE_FLUSH(sc);
7644 delay(2);
7645 }
7646
7647 return 0;
7648 }
7649
7650 /* SPI */
7651
7652 /* Set SPI related information */
7653 static void
7654 wm_set_spiaddrbits(struct wm_softc *sc)
7655 {
7656 uint32_t reg;
7657
7658 sc->sc_flags |= WM_F_EEPROM_SPI;
7659 reg = CSR_READ(sc, WMREG_EECD);
7660 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7661 }
7662
7663 /*
7664 * wm_nvm_ready_spi:
7665 *
7666 * Wait for a SPI EEPROM to be ready for commands.
7667 */
7668 static int
7669 wm_nvm_ready_spi(struct wm_softc *sc)
7670 {
7671 uint32_t val;
7672 int usec;
7673
7674 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7675 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7676 wm_eeprom_recvbits(sc, &val, 8);
7677 if ((val & SPI_SR_RDY) == 0)
7678 break;
7679 }
7680 if (usec >= SPI_MAX_RETRIES) {
7681 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7682 return 1;
7683 }
7684 return 0;
7685 }
7686
7687 /*
7688 * wm_nvm_read_spi:
7689 *
7690 * Read a work from the EEPROM using the SPI protocol.
7691 */
7692 static int
7693 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7694 {
7695 uint32_t reg, val;
7696 int i;
7697 uint8_t opc;
7698
7699 /* Clear SK and CS. */
7700 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7701 CSR_WRITE(sc, WMREG_EECD, reg);
7702 CSR_WRITE_FLUSH(sc);
7703 delay(2);
7704
7705 if (wm_nvm_ready_spi(sc))
7706 return 1;
7707
7708 /* Toggle CS to flush commands. */
7709 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7710 CSR_WRITE_FLUSH(sc);
7711 delay(2);
7712 CSR_WRITE(sc, WMREG_EECD, reg);
7713 CSR_WRITE_FLUSH(sc);
7714 delay(2);
7715
7716 opc = SPI_OPC_READ;
7717 if (sc->sc_ee_addrbits == 8 && word >= 128)
7718 opc |= SPI_OPC_A8;
7719
7720 wm_eeprom_sendbits(sc, opc, 8);
7721 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
7722
7723 for (i = 0; i < wordcnt; i++) {
7724 wm_eeprom_recvbits(sc, &val, 16);
7725 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7726 }
7727
7728 /* Raise CS and clear SK. */
7729 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7730 CSR_WRITE(sc, WMREG_EECD, reg);
7731 CSR_WRITE_FLUSH(sc);
7732 delay(2);
7733
7734 return 0;
7735 }
7736
7737 /* Using with EERD */
7738
7739 static int
7740 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7741 {
7742 uint32_t attempts = 100000;
7743 uint32_t i, reg = 0;
7744 int32_t done = -1;
7745
7746 for (i = 0; i < attempts; i++) {
7747 reg = CSR_READ(sc, rw);
7748
7749 if (reg & EERD_DONE) {
7750 done = 0;
7751 break;
7752 }
7753 delay(5);
7754 }
7755
7756 return done;
7757 }
7758
7759 static int
7760 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7761 uint16_t *data)
7762 {
7763 int i, eerd = 0;
7764 int error = 0;
7765
7766 for (i = 0; i < wordcnt; i++) {
7767 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7768
7769 CSR_WRITE(sc, WMREG_EERD, eerd);
7770 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
7771 if (error != 0)
7772 break;
7773
7774 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
7775 }
7776
7777 return error;
7778 }
7779
7780 /* Flash */
7781
7782 static int
7783 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7784 {
7785 uint32_t eecd;
7786 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7787 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7788 uint8_t sig_byte = 0;
7789
7790 switch (sc->sc_type) {
7791 case WM_T_ICH8:
7792 case WM_T_ICH9:
7793 eecd = CSR_READ(sc, WMREG_EECD);
7794 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7795 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7796 return 0;
7797 }
7798 /* FALLTHROUGH */
7799 default:
7800 /* Default to 0 */
7801 *bank = 0;
7802
7803 /* Check bank 0 */
7804 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7805 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7806 *bank = 0;
7807 return 0;
7808 }
7809
7810 /* Check bank 1 */
7811 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7812 &sig_byte);
7813 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7814 *bank = 1;
7815 return 0;
7816 }
7817 }
7818
7819 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7820 device_xname(sc->sc_dev)));
7821 return -1;
7822 }
7823
7824 /******************************************************************************
7825 * This function does initial flash setup so that a new read/write/erase cycle
7826 * can be started.
7827 *
7828 * sc - The pointer to the hw structure
7829 ****************************************************************************/
7830 static int32_t
7831 wm_ich8_cycle_init(struct wm_softc *sc)
7832 {
7833 uint16_t hsfsts;
7834 int32_t error = 1;
7835 int32_t i = 0;
7836
7837 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7838
7839 /* May be check the Flash Des Valid bit in Hw status */
7840 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7841 return error;
7842 }
7843
7844 /* Clear FCERR in Hw status by writing 1 */
7845 /* Clear DAEL in Hw status by writing a 1 */
7846 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7847
7848 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7849
7850 /*
7851 * Either we should have a hardware SPI cycle in progress bit to check
7852 * against, in order to start a new cycle or FDONE bit should be
7853 * changed in the hardware so that it is 1 after harware reset, which
7854 * can then be used as an indication whether a cycle is in progress or
7855 * has been completed .. we should also have some software semaphore
7856 * mechanism to guard FDONE or the cycle in progress bit so that two
7857 * threads access to those bits can be sequentiallized or a way so that
7858 * 2 threads dont start the cycle at the same time
7859 */
7860
7861 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7862 /*
7863 * There is no cycle running at present, so we can start a
7864 * cycle
7865 */
7866
7867 /* Begin by setting Flash Cycle Done. */
7868 hsfsts |= HSFSTS_DONE;
7869 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7870 error = 0;
7871 } else {
7872 /*
7873 * otherwise poll for sometime so the current cycle has a
7874 * chance to end before giving up.
7875 */
7876 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7877 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7878 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7879 error = 0;
7880 break;
7881 }
7882 delay(1);
7883 }
7884 if (error == 0) {
7885 /*
7886 * Successful in waiting for previous cycle to timeout,
7887 * now set the Flash Cycle Done.
7888 */
7889 hsfsts |= HSFSTS_DONE;
7890 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7891 }
7892 }
7893 return error;
7894 }
7895
7896 /******************************************************************************
7897 * This function starts a flash cycle and waits for its completion
7898 *
7899 * sc - The pointer to the hw structure
7900 ****************************************************************************/
7901 static int32_t
7902 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7903 {
7904 uint16_t hsflctl;
7905 uint16_t hsfsts;
7906 int32_t error = 1;
7907 uint32_t i = 0;
7908
7909 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7910 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7911 hsflctl |= HSFCTL_GO;
7912 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7913
7914 /* Wait till FDONE bit is set to 1 */
7915 do {
7916 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7917 if (hsfsts & HSFSTS_DONE)
7918 break;
7919 delay(1);
7920 i++;
7921 } while (i < timeout);
7922 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7923 error = 0;
7924
7925 return error;
7926 }
7927
7928 /******************************************************************************
7929 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7930 *
7931 * sc - The pointer to the hw structure
7932 * index - The index of the byte or word to read.
7933 * size - Size of data to read, 1=byte 2=word
7934 * data - Pointer to the word to store the value read.
7935 *****************************************************************************/
7936 static int32_t
7937 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7938 uint32_t size, uint16_t *data)
7939 {
7940 uint16_t hsfsts;
7941 uint16_t hsflctl;
7942 uint32_t flash_linear_address;
7943 uint32_t flash_data = 0;
7944 int32_t error = 1;
7945 int32_t count = 0;
7946
7947 if (size < 1 || size > 2 || data == 0x0 ||
7948 index > ICH_FLASH_LINEAR_ADDR_MASK)
7949 return error;
7950
7951 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7952 sc->sc_ich8_flash_base;
7953
7954 do {
7955 delay(1);
7956 /* Steps */
7957 error = wm_ich8_cycle_init(sc);
7958 if (error)
7959 break;
7960
7961 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7962 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7963 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7964 & HSFCTL_BCOUNT_MASK;
7965 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7966 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7967
7968 /*
7969 * Write the last 24 bits of index into Flash Linear address
7970 * field in Flash Address
7971 */
7972 /* TODO: TBD maybe check the index against the size of flash */
7973
7974 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7975
7976 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7977
7978 /*
7979 * Check if FCERR is set to 1, if set to 1, clear it and try
7980 * the whole sequence a few more times, else read in (shift in)
7981 * the Flash Data0, the order is least significant byte first
7982 * msb to lsb
7983 */
7984 if (error == 0) {
7985 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7986 if (size == 1)
7987 *data = (uint8_t)(flash_data & 0x000000FF);
7988 else if (size == 2)
7989 *data = (uint16_t)(flash_data & 0x0000FFFF);
7990 break;
7991 } else {
7992 /*
7993 * If we've gotten here, then things are probably
7994 * completely hosed, but if the error condition is
7995 * detected, it won't hurt to give it another try...
7996 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7997 */
7998 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7999 if (hsfsts & HSFSTS_ERR) {
8000 /* Repeat for some time before giving up. */
8001 continue;
8002 } else if ((hsfsts & HSFSTS_DONE) == 0)
8003 break;
8004 }
8005 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8006
8007 return error;
8008 }
8009
8010 /******************************************************************************
8011 * Reads a single byte from the NVM using the ICH8 flash access registers.
8012 *
8013 * sc - pointer to wm_hw structure
8014 * index - The index of the byte to read.
8015 * data - Pointer to a byte to store the value read.
8016 *****************************************************************************/
8017 static int32_t
8018 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8019 {
8020 int32_t status;
8021 uint16_t word = 0;
8022
8023 status = wm_read_ich8_data(sc, index, 1, &word);
8024 if (status == 0)
8025 *data = (uint8_t)word;
8026 else
8027 *data = 0;
8028
8029 return status;
8030 }
8031
8032 /******************************************************************************
8033 * Reads a word from the NVM using the ICH8 flash access registers.
8034 *
8035 * sc - pointer to wm_hw structure
8036 * index - The starting byte index of the word to read.
8037 * data - Pointer to a word to store the value read.
8038 *****************************************************************************/
8039 static int32_t
8040 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8041 {
8042 int32_t status;
8043
8044 status = wm_read_ich8_data(sc, index, 2, data);
8045 return status;
8046 }
8047
8048 /******************************************************************************
8049 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8050 * register.
8051 *
8052 * sc - Struct containing variables accessed by shared code
8053 * offset - offset of word in the EEPROM to read
8054 * data - word read from the EEPROM
8055 * words - number of words to read
8056 *****************************************************************************/
8057 static int
8058 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8059 {
8060 int32_t error = 0;
8061 uint32_t flash_bank = 0;
8062 uint32_t act_offset = 0;
8063 uint32_t bank_offset = 0;
8064 uint16_t word = 0;
8065 uint16_t i = 0;
8066
8067 /*
8068 * We need to know which is the valid flash bank. In the event
8069 * that we didn't allocate eeprom_shadow_ram, we may not be
8070 * managing flash_bank. So it cannot be trusted and needs
8071 * to be updated with each read.
8072 */
8073 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8074 if (error) {
8075 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
8076 __func__);
8077 flash_bank = 0;
8078 }
8079
8080 /*
8081 * Adjust offset appropriately if we're on bank 1 - adjust for word
8082 * size
8083 */
8084 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8085
8086 error = wm_get_swfwhw_semaphore(sc);
8087 if (error) {
8088 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8089 __func__);
8090 return error;
8091 }
8092
8093 for (i = 0; i < words; i++) {
8094 /* The NVM part needs a byte offset, hence * 2 */
8095 act_offset = bank_offset + ((offset + i) * 2);
8096 error = wm_read_ich8_word(sc, act_offset, &word);
8097 if (error) {
8098 aprint_error_dev(sc->sc_dev,
8099 "%s: failed to read NVM\n", __func__);
8100 break;
8101 }
8102 data[i] = word;
8103 }
8104
8105 wm_put_swfwhw_semaphore(sc);
8106 return error;
8107 }
8108
8109 /* Lock, detecting NVM type, validate checksum and read */
8110
8111 /*
8112 * wm_nvm_acquire:
8113 *
8114 * Perform the EEPROM handshake required on some chips.
8115 */
8116 static int
8117 wm_nvm_acquire(struct wm_softc *sc)
8118 {
8119 uint32_t reg;
8120 int x;
8121 int ret = 0;
8122
8123 /* always success */
8124 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8125 return 0;
8126
8127 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8128 ret = wm_get_swfwhw_semaphore(sc);
8129 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8130 /* This will also do wm_get_swsm_semaphore() if needed */
8131 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8132 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8133 ret = wm_get_swsm_semaphore(sc);
8134 }
8135
8136 if (ret) {
8137 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8138 __func__);
8139 return 1;
8140 }
8141
8142 if (sc->sc_flags & WM_F_LOCK_EECD) {
8143 reg = CSR_READ(sc, WMREG_EECD);
8144
8145 /* Request EEPROM access. */
8146 reg |= EECD_EE_REQ;
8147 CSR_WRITE(sc, WMREG_EECD, reg);
8148
8149 /* ..and wait for it to be granted. */
8150 for (x = 0; x < 1000; x++) {
8151 reg = CSR_READ(sc, WMREG_EECD);
8152 if (reg & EECD_EE_GNT)
8153 break;
8154 delay(5);
8155 }
8156 if ((reg & EECD_EE_GNT) == 0) {
8157 aprint_error_dev(sc->sc_dev,
8158 "could not acquire EEPROM GNT\n");
8159 reg &= ~EECD_EE_REQ;
8160 CSR_WRITE(sc, WMREG_EECD, reg);
8161 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8162 wm_put_swfwhw_semaphore(sc);
8163 if (sc->sc_flags & WM_F_LOCK_SWFW)
8164 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8165 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8166 wm_put_swsm_semaphore(sc);
8167 return 1;
8168 }
8169 }
8170
8171 return 0;
8172 }
8173
8174 /*
8175 * wm_nvm_release:
8176 *
8177 * Release the EEPROM mutex.
8178 */
8179 static void
8180 wm_nvm_release(struct wm_softc *sc)
8181 {
8182 uint32_t reg;
8183
8184 /* always success */
8185 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8186 return;
8187
8188 if (sc->sc_flags & WM_F_LOCK_EECD) {
8189 reg = CSR_READ(sc, WMREG_EECD);
8190 reg &= ~EECD_EE_REQ;
8191 CSR_WRITE(sc, WMREG_EECD, reg);
8192 }
8193
8194 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8195 wm_put_swfwhw_semaphore(sc);
8196 if (sc->sc_flags & WM_F_LOCK_SWFW)
8197 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8198 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8199 wm_put_swsm_semaphore(sc);
8200 }
8201
8202 static int
8203 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8204 {
8205 uint32_t eecd = 0;
8206
8207 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8208 || sc->sc_type == WM_T_82583) {
8209 eecd = CSR_READ(sc, WMREG_EECD);
8210
8211 /* Isolate bits 15 & 16 */
8212 eecd = ((eecd >> 15) & 0x03);
8213
8214 /* If both bits are set, device is Flash type */
8215 if (eecd == 0x03)
8216 return 0;
8217 }
8218 return 1;
8219 }
8220
8221 #define NVM_CHECKSUM 0xBABA
8222 #define EEPROM_SIZE 0x0040
8223 #define NVM_COMPAT 0x0003
8224 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
8225 #define NVM_FUTURE_INIT_WORD1 0x0019
8226 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
8227
8228 /*
8229 * wm_nvm_validate_checksum
8230 *
8231 * The checksum is defined as the sum of the first 64 (16 bit) words.
8232 */
8233 static int
8234 wm_nvm_validate_checksum(struct wm_softc *sc)
8235 {
8236 uint16_t checksum;
8237 uint16_t eeprom_data;
8238 #ifdef WM_DEBUG
8239 uint16_t csum_wordaddr, valid_checksum;
8240 #endif
8241 int i;
8242
8243 checksum = 0;
8244
8245 /* Don't check for I211 */
8246 if (sc->sc_type == WM_T_I211)
8247 return 0;
8248
8249 #ifdef WM_DEBUG
8250 if (sc->sc_type == WM_T_PCH_LPT) {
8251 csum_wordaddr = NVM_COMPAT;
8252 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8253 } else {
8254 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
8255 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8256 }
8257
8258 /* Dump EEPROM image for debug */
8259 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8260 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8261 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8262 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8263 if ((eeprom_data & valid_checksum) == 0) {
8264 DPRINTF(WM_DEBUG_NVM,
8265 ("%s: NVM need to be updated (%04x != %04x)\n",
8266 device_xname(sc->sc_dev), eeprom_data,
8267 valid_checksum));
8268 }
8269 }
8270
8271 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8272 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8273 for (i = 0; i < EEPROM_SIZE; i++) {
8274 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8275 printf("XX ");
8276 else
8277 printf("%04x ", eeprom_data);
8278 if (i % 8 == 7)
8279 printf("\n");
8280 }
8281 }
8282
8283 #endif /* WM_DEBUG */
8284
8285 for (i = 0; i < EEPROM_SIZE; i++) {
8286 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8287 return 1;
8288 checksum += eeprom_data;
8289 }
8290
8291 if (checksum != (uint16_t) NVM_CHECKSUM) {
8292 #ifdef WM_DEBUG
8293 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8294 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8295 #endif
8296 }
8297
8298 return 0;
8299 }
8300
8301 /*
8302 * wm_nvm_read:
8303 *
8304 * Read data from the serial EEPROM.
8305 */
8306 static int
8307 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8308 {
8309 int rv;
8310
8311 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8312 return 1;
8313
8314 if (wm_nvm_acquire(sc))
8315 return 1;
8316
8317 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8318 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8319 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8320 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8321 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8322 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8323 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8324 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8325 else
8326 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8327
8328 wm_nvm_release(sc);
8329 return rv;
8330 }
8331
8332 /*
8333 * Hardware semaphores.
8334 * Very complexed...
8335 */
8336
8337 static int
8338 wm_get_swsm_semaphore(struct wm_softc *sc)
8339 {
8340 int32_t timeout;
8341 uint32_t swsm;
8342
8343 /* Get the SW semaphore. */
8344 timeout = 1000 + 1; /* XXX */
8345 while (timeout) {
8346 swsm = CSR_READ(sc, WMREG_SWSM);
8347
8348 if ((swsm & SWSM_SMBI) == 0)
8349 break;
8350
8351 delay(50);
8352 timeout--;
8353 }
8354
8355 if (timeout == 0) {
8356 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
8357 return 1;
8358 }
8359
8360 /* Get the FW semaphore. */
8361 timeout = 1000 + 1; /* XXX */
8362 while (timeout) {
8363 swsm = CSR_READ(sc, WMREG_SWSM);
8364 swsm |= SWSM_SWESMBI;
8365 CSR_WRITE(sc, WMREG_SWSM, swsm);
8366 /* If we managed to set the bit we got the semaphore. */
8367 swsm = CSR_READ(sc, WMREG_SWSM);
8368 if (swsm & SWSM_SWESMBI)
8369 break;
8370
8371 delay(50);
8372 timeout--;
8373 }
8374
8375 if (timeout == 0) {
8376 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8377 /* Release semaphores */
8378 wm_put_swsm_semaphore(sc);
8379 return 1;
8380 }
8381 return 0;
8382 }
8383
8384 static void
8385 wm_put_swsm_semaphore(struct wm_softc *sc)
8386 {
8387 uint32_t swsm;
8388
8389 swsm = CSR_READ(sc, WMREG_SWSM);
8390 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8391 CSR_WRITE(sc, WMREG_SWSM, swsm);
8392 }
8393
8394 static int
8395 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8396 {
8397 uint32_t swfw_sync;
8398 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8399 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8400 int timeout = 200;
8401
8402 for (timeout = 0; timeout < 200; timeout++) {
8403 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8404 if (wm_get_swsm_semaphore(sc)) {
8405 aprint_error_dev(sc->sc_dev,
8406 "%s: failed to get semaphore\n",
8407 __func__);
8408 return 1;
8409 }
8410 }
8411 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8412 if ((swfw_sync & (swmask | fwmask)) == 0) {
8413 swfw_sync |= swmask;
8414 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8415 if (sc->sc_flags & WM_F_LOCK_SWSM)
8416 wm_put_swsm_semaphore(sc);
8417 return 0;
8418 }
8419 if (sc->sc_flags & WM_F_LOCK_SWSM)
8420 wm_put_swsm_semaphore(sc);
8421 delay(5000);
8422 }
8423 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8424 device_xname(sc->sc_dev), mask, swfw_sync);
8425 return 1;
8426 }
8427
8428 static void
8429 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8430 {
8431 uint32_t swfw_sync;
8432
8433 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8434 while (wm_get_swsm_semaphore(sc) != 0)
8435 continue;
8436 }
8437 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8438 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8439 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8440 if (sc->sc_flags & WM_F_LOCK_SWSM)
8441 wm_put_swsm_semaphore(sc);
8442 }
8443
8444 static int
8445 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8446 {
8447 uint32_t ext_ctrl;
8448 int timeout = 200;
8449
8450 for (timeout = 0; timeout < 200; timeout++) {
8451 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8452 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8453 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8454
8455 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8456 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8457 return 0;
8458 delay(5000);
8459 }
8460 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8461 device_xname(sc->sc_dev), ext_ctrl);
8462 return 1;
8463 }
8464
8465 static void
8466 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8467 {
8468 uint32_t ext_ctrl;
8469 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8470 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8471 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8472 }
8473
8474 static int
8475 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8476 {
8477 int i = 0;
8478 uint32_t reg;
8479
8480 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8481 do {
8482 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8483 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8484 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8485 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8486 break;
8487 delay(2*1000);
8488 i++;
8489 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8490
8491 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8492 wm_put_hw_semaphore_82573(sc);
8493 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8494 device_xname(sc->sc_dev));
8495 return -1;
8496 }
8497
8498 return 0;
8499 }
8500
8501 static void
8502 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8503 {
8504 uint32_t reg;
8505
8506 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8507 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8508 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8509 }
8510
8511 /*
8512 * Management mode and power management related subroutines.
8513 * BMC, AMT, suspend/resume and EEE.
8514 */
8515
8516 static int
8517 wm_check_mng_mode(struct wm_softc *sc)
8518 {
8519 int rv;
8520
8521 switch (sc->sc_type) {
8522 case WM_T_ICH8:
8523 case WM_T_ICH9:
8524 case WM_T_ICH10:
8525 case WM_T_PCH:
8526 case WM_T_PCH2:
8527 case WM_T_PCH_LPT:
8528 rv = wm_check_mng_mode_ich8lan(sc);
8529 break;
8530 case WM_T_82574:
8531 case WM_T_82583:
8532 rv = wm_check_mng_mode_82574(sc);
8533 break;
8534 case WM_T_82571:
8535 case WM_T_82572:
8536 case WM_T_82573:
8537 case WM_T_80003:
8538 rv = wm_check_mng_mode_generic(sc);
8539 break;
8540 default:
8541 /* noting to do */
8542 rv = 0;
8543 break;
8544 }
8545
8546 return rv;
8547 }
8548
8549 static int
8550 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8551 {
8552 uint32_t fwsm;
8553
8554 fwsm = CSR_READ(sc, WMREG_FWSM);
8555
8556 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8557 return 1;
8558
8559 return 0;
8560 }
8561
8562 static int
8563 wm_check_mng_mode_82574(struct wm_softc *sc)
8564 {
8565 uint16_t data;
8566
8567 wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8568
8569 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8570 return 1;
8571
8572 return 0;
8573 }
8574
8575 static int
8576 wm_check_mng_mode_generic(struct wm_softc *sc)
8577 {
8578 uint32_t fwsm;
8579
8580 fwsm = CSR_READ(sc, WMREG_FWSM);
8581
8582 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8583 return 1;
8584
8585 return 0;
8586 }
8587
8588 static int
8589 wm_enable_mng_pass_thru(struct wm_softc *sc)
8590 {
8591 uint32_t manc, fwsm, factps;
8592
8593 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8594 return 0;
8595
8596 manc = CSR_READ(sc, WMREG_MANC);
8597
8598 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8599 device_xname(sc->sc_dev), manc));
8600 if ((manc & MANC_RECV_TCO_EN) == 0)
8601 return 0;
8602
8603 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8604 fwsm = CSR_READ(sc, WMREG_FWSM);
8605 factps = CSR_READ(sc, WMREG_FACTPS);
8606 if (((factps & FACTPS_MNGCG) == 0)
8607 && ((fwsm & FWSM_MODE_MASK)
8608 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8609 return 1;
8610 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8611 uint16_t data;
8612
8613 factps = CSR_READ(sc, WMREG_FACTPS);
8614 wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8615 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8616 device_xname(sc->sc_dev), factps, data));
8617 if (((factps & FACTPS_MNGCG) == 0)
8618 && ((data & EEPROM_CFG2_MNGM_MASK)
8619 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8620 return 1;
8621 } else if (((manc & MANC_SMBUS_EN) != 0)
8622 && ((manc & MANC_ASF_EN) == 0))
8623 return 1;
8624
8625 return 0;
8626 }
8627
8628 static int
8629 wm_check_reset_block(struct wm_softc *sc)
8630 {
8631 uint32_t reg;
8632
8633 switch (sc->sc_type) {
8634 case WM_T_ICH8:
8635 case WM_T_ICH9:
8636 case WM_T_ICH10:
8637 case WM_T_PCH:
8638 case WM_T_PCH2:
8639 case WM_T_PCH_LPT:
8640 reg = CSR_READ(sc, WMREG_FWSM);
8641 if ((reg & FWSM_RSPCIPHY) != 0)
8642 return 0;
8643 else
8644 return -1;
8645 break;
8646 case WM_T_82571:
8647 case WM_T_82572:
8648 case WM_T_82573:
8649 case WM_T_82574:
8650 case WM_T_82583:
8651 case WM_T_80003:
8652 reg = CSR_READ(sc, WMREG_MANC);
8653 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8654 return -1;
8655 else
8656 return 0;
8657 break;
8658 default:
8659 /* no problem */
8660 break;
8661 }
8662
8663 return 0;
8664 }
8665
8666 static void
8667 wm_get_hw_control(struct wm_softc *sc)
8668 {
8669 uint32_t reg;
8670
8671 switch (sc->sc_type) {
8672 case WM_T_82573:
8673 reg = CSR_READ(sc, WMREG_SWSM);
8674 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8675 break;
8676 case WM_T_82571:
8677 case WM_T_82572:
8678 case WM_T_82574:
8679 case WM_T_82583:
8680 case WM_T_80003:
8681 case WM_T_ICH8:
8682 case WM_T_ICH9:
8683 case WM_T_ICH10:
8684 case WM_T_PCH:
8685 case WM_T_PCH2:
8686 case WM_T_PCH_LPT:
8687 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8688 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8689 break;
8690 default:
8691 break;
8692 }
8693 }
8694
8695 static void
8696 wm_release_hw_control(struct wm_softc *sc)
8697 {
8698 uint32_t reg;
8699
8700 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8701 return;
8702
8703 if (sc->sc_type == WM_T_82573) {
8704 reg = CSR_READ(sc, WMREG_SWSM);
8705 reg &= ~SWSM_DRV_LOAD;
8706 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8707 } else {
8708 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8709 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8710 }
8711 }
8712
8713 static void
8714 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8715 {
8716 uint32_t reg;
8717
8718 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8719
8720 if (on != 0)
8721 reg |= EXTCNFCTR_GATE_PHY_CFG;
8722 else
8723 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8724
8725 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8726 }
8727
8728 static void
8729 wm_smbustopci(struct wm_softc *sc)
8730 {
8731 uint32_t fwsm;
8732
8733 fwsm = CSR_READ(sc, WMREG_FWSM);
8734 if (((fwsm & FWSM_FW_VALID) == 0)
8735 && ((wm_check_reset_block(sc) == 0))) {
8736 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8737 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8738 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8739 CSR_WRITE_FLUSH(sc);
8740 delay(10);
8741 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8742 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8743 CSR_WRITE_FLUSH(sc);
8744 delay(50*1000);
8745
8746 /*
8747 * Gate automatic PHY configuration by hardware on non-managed
8748 * 82579
8749 */
8750 if (sc->sc_type == WM_T_PCH2)
8751 wm_gate_hw_phy_config_ich8lan(sc, 1);
8752 }
8753 }
8754
8755 static void
8756 wm_init_manageability(struct wm_softc *sc)
8757 {
8758
8759 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8760 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8761 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8762
8763 /* Disable hardware interception of ARP */
8764 manc &= ~MANC_ARP_EN;
8765
8766 /* Enable receiving management packets to the host */
8767 if (sc->sc_type >= WM_T_82571) {
8768 manc |= MANC_EN_MNG2HOST;
8769 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8770 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8771
8772 }
8773
8774 CSR_WRITE(sc, WMREG_MANC, manc);
8775 }
8776 }
8777
8778 static void
8779 wm_release_manageability(struct wm_softc *sc)
8780 {
8781
8782 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8783 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8784
8785 manc |= MANC_ARP_EN;
8786 if (sc->sc_type >= WM_T_82571)
8787 manc &= ~MANC_EN_MNG2HOST;
8788
8789 CSR_WRITE(sc, WMREG_MANC, manc);
8790 }
8791 }
8792
8793 static void
8794 wm_get_wakeup(struct wm_softc *sc)
8795 {
8796
8797 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8798 switch (sc->sc_type) {
8799 case WM_T_82573:
8800 case WM_T_82583:
8801 sc->sc_flags |= WM_F_HAS_AMT;
8802 /* FALLTHROUGH */
8803 case WM_T_80003:
8804 case WM_T_82541:
8805 case WM_T_82547:
8806 case WM_T_82571:
8807 case WM_T_82572:
8808 case WM_T_82574:
8809 case WM_T_82575:
8810 case WM_T_82576:
8811 case WM_T_82580:
8812 case WM_T_82580ER:
8813 case WM_T_I350:
8814 case WM_T_I354:
8815 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8816 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8817 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8818 break;
8819 case WM_T_ICH8:
8820 case WM_T_ICH9:
8821 case WM_T_ICH10:
8822 case WM_T_PCH:
8823 case WM_T_PCH2:
8824 case WM_T_PCH_LPT:
8825 sc->sc_flags |= WM_F_HAS_AMT;
8826 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8827 break;
8828 default:
8829 break;
8830 }
8831
8832 /* 1: HAS_MANAGE */
8833 if (wm_enable_mng_pass_thru(sc) != 0)
8834 sc->sc_flags |= WM_F_HAS_MANAGE;
8835
8836 #ifdef WM_DEBUG
8837 printf("\n");
8838 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8839 printf("HAS_AMT,");
8840 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8841 printf("ARC_SUBSYS_VALID,");
8842 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8843 printf("ASF_FIRMWARE_PRES,");
8844 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8845 printf("HAS_MANAGE,");
8846 printf("\n");
8847 #endif
8848 /*
8849 * Note that the WOL flags is set after the resetting of the eeprom
8850 * stuff
8851 */
8852 }
8853
8854 #ifdef WM_WOL
8855 /* WOL in the newer chipset interfaces (pchlan) */
8856 static void
8857 wm_enable_phy_wakeup(struct wm_softc *sc)
8858 {
8859 #if 0
8860 uint16_t preg;
8861
8862 /* Copy MAC RARs to PHY RARs */
8863
8864 /* Copy MAC MTA to PHY MTA */
8865
8866 /* Configure PHY Rx Control register */
8867
8868 /* Enable PHY wakeup in MAC register */
8869
8870 /* Configure and enable PHY wakeup in PHY registers */
8871
8872 /* Activate PHY wakeup */
8873
8874 /* XXX */
8875 #endif
8876 }
8877
8878 /* Power down workaround on D3 */
8879 static void
8880 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8881 {
8882 uint32_t reg;
8883 int i;
8884
8885 for (i = 0; i < 2; i++) {
8886 /* Disable link */
8887 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8888 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8889 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8890
8891 /*
8892 * Call gig speed drop workaround on Gig disable before
8893 * accessing any PHY registers
8894 */
8895 if (sc->sc_type == WM_T_ICH8)
8896 wm_gig_downshift_workaround_ich8lan(sc);
8897
8898 /* Write VR power-down enable */
8899 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8900 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8901 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8902 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8903
8904 /* Read it back and test */
8905 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8906 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8907 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8908 break;
8909
8910 /* Issue PHY reset and repeat at most one more time */
8911 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8912 }
8913 }
8914
8915 static void
8916 wm_enable_wakeup(struct wm_softc *sc)
8917 {
8918 uint32_t reg, pmreg;
8919 pcireg_t pmode;
8920
8921 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8922 &pmreg, NULL) == 0)
8923 return;
8924
8925 /* Advertise the wakeup capability */
8926 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8927 | CTRL_SWDPIN(3));
8928 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8929
8930 /* ICH workaround */
8931 switch (sc->sc_type) {
8932 case WM_T_ICH8:
8933 case WM_T_ICH9:
8934 case WM_T_ICH10:
8935 case WM_T_PCH:
8936 case WM_T_PCH2:
8937 case WM_T_PCH_LPT:
8938 /* Disable gig during WOL */
8939 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8940 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8941 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8942 if (sc->sc_type == WM_T_PCH)
8943 wm_gmii_reset(sc);
8944
8945 /* Power down workaround */
8946 if (sc->sc_phytype == WMPHY_82577) {
8947 struct mii_softc *child;
8948
8949 /* Assume that the PHY is copper */
8950 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8951 if (child->mii_mpd_rev <= 2)
8952 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8953 (768 << 5) | 25, 0x0444); /* magic num */
8954 }
8955 break;
8956 default:
8957 break;
8958 }
8959
8960 /* Keep the laser running on fiber adapters */
8961 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8962 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8963 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8964 reg |= CTRL_EXT_SWDPIN(3);
8965 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8966 }
8967
8968 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8969 #if 0 /* for the multicast packet */
8970 reg |= WUFC_MC;
8971 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8972 #endif
8973
8974 if (sc->sc_type == WM_T_PCH) {
8975 wm_enable_phy_wakeup(sc);
8976 } else {
8977 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8978 CSR_WRITE(sc, WMREG_WUFC, reg);
8979 }
8980
8981 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8982 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8983 || (sc->sc_type == WM_T_PCH2))
8984 && (sc->sc_phytype == WMPHY_IGP_3))
8985 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8986
8987 /* Request PME */
8988 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8989 #if 0
8990 /* Disable WOL */
8991 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8992 #else
8993 /* For WOL */
8994 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8995 #endif
8996 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8997 }
8998 #endif /* WM_WOL */
8999
9000 /* EEE */
9001
9002 static void
9003 wm_set_eee_i350(struct wm_softc *sc)
9004 {
9005 uint32_t ipcnfg, eeer;
9006
9007 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9008 eeer = CSR_READ(sc, WMREG_EEER);
9009
9010 if ((sc->sc_flags & WM_F_EEE) != 0) {
9011 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9012 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9013 | EEER_LPI_FC);
9014 } else {
9015 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9016 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9017 | EEER_LPI_FC);
9018 }
9019
9020 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9021 CSR_WRITE(sc, WMREG_EEER, eeer);
9022 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9023 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9024 }
9025
9026 /*
9027 * Workarounds (mainly PHY related).
9028 * Basically, PHY's workarounds are in the PHY drivers.
9029 */
9030
9031 /* Work-around for 82566 Kumeran PCS lock loss */
9032 static void
9033 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9034 {
9035 int miistatus, active, i;
9036 int reg;
9037
9038 miistatus = sc->sc_mii.mii_media_status;
9039
9040 /* If the link is not up, do nothing */
9041 if ((miistatus & IFM_ACTIVE) != 0)
9042 return;
9043
9044 active = sc->sc_mii.mii_media_active;
9045
9046 /* Nothing to do if the link is other than 1Gbps */
9047 if (IFM_SUBTYPE(active) != IFM_1000_T)
9048 return;
9049
9050 for (i = 0; i < 10; i++) {
9051 /* read twice */
9052 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9053 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9054 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9055 goto out; /* GOOD! */
9056
9057 /* Reset the PHY */
9058 wm_gmii_reset(sc);
9059 delay(5*1000);
9060 }
9061
9062 /* Disable GigE link negotiation */
9063 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9064 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9065 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9066
9067 /*
9068 * Call gig speed drop workaround on Gig disable before accessing
9069 * any PHY registers.
9070 */
9071 wm_gig_downshift_workaround_ich8lan(sc);
9072
9073 out:
9074 return;
9075 }
9076
9077 /* WOL from S5 stops working */
9078 static void
9079 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9080 {
9081 uint16_t kmrn_reg;
9082
9083 /* Only for igp3 */
9084 if (sc->sc_phytype == WMPHY_IGP_3) {
9085 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9086 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9087 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9088 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9089 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9090 }
9091 }
9092
9093 /*
9094 * Workaround for pch's PHYs
9095 * XXX should be moved to new PHY driver?
9096 */
9097 static void
9098 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9099 {
9100 if (sc->sc_phytype == WMPHY_82577)
9101 wm_set_mdio_slow_mode_hv(sc);
9102
9103 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9104
9105 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9106
9107 /* 82578 */
9108 if (sc->sc_phytype == WMPHY_82578) {
9109 /* PCH rev. < 3 */
9110 if (sc->sc_rev < 3) {
9111 /* XXX 6 bit shift? Why? Is it page2? */
9112 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9113 0x66c0);
9114 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9115 0xffff);
9116 }
9117
9118 /* XXX phy rev. < 2 */
9119 }
9120
9121 /* Select page 0 */
9122
9123 /* XXX acquire semaphore */
9124 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9125 /* XXX release semaphore */
9126
9127 /*
9128 * Configure the K1 Si workaround during phy reset assuming there is
9129 * link so that it disables K1 if link is in 1Gbps.
9130 */
9131 wm_k1_gig_workaround_hv(sc, 1);
9132 }
9133
9134 static void
9135 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9136 {
9137
9138 wm_set_mdio_slow_mode_hv(sc);
9139 }
9140
9141 static void
9142 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9143 {
9144 int k1_enable = sc->sc_nvm_k1_enabled;
9145
9146 /* XXX acquire semaphore */
9147
9148 if (link) {
9149 k1_enable = 0;
9150
9151 /* Link stall fix for link up */
9152 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9153 } else {
9154 /* Link stall fix for link down */
9155 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9156 }
9157
9158 wm_configure_k1_ich8lan(sc, k1_enable);
9159
9160 /* XXX release semaphore */
9161 }
9162
9163 static void
9164 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9165 {
9166 uint32_t reg;
9167
9168 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9169 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9170 reg | HV_KMRN_MDIO_SLOW);
9171 }
9172
9173 static void
9174 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9175 {
9176 uint32_t ctrl, ctrl_ext, tmp;
9177 uint16_t kmrn_reg;
9178
9179 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9180
9181 if (k1_enable)
9182 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9183 else
9184 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9185
9186 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9187
9188 delay(20);
9189
9190 ctrl = CSR_READ(sc, WMREG_CTRL);
9191 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9192
9193 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9194 tmp |= CTRL_FRCSPD;
9195
9196 CSR_WRITE(sc, WMREG_CTRL, tmp);
9197 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9198 CSR_WRITE_FLUSH(sc);
9199 delay(20);
9200
9201 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9202 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9203 CSR_WRITE_FLUSH(sc);
9204 delay(20);
9205 }
9206
9207 /* special case - for 82575 - need to do manual init ... */
9208 static void
9209 wm_reset_init_script_82575(struct wm_softc *sc)
9210 {
9211 /*
9212 * remark: this is untested code - we have no board without EEPROM
9213 * same setup as mentioned int the freeBSD driver for the i82575
9214 */
9215
9216 /* SerDes configuration via SERDESCTRL */
9217 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9218 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9219 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9220 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9221
9222 /* CCM configuration via CCMCTL register */
9223 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9224 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9225
9226 /* PCIe lanes configuration */
9227 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9228 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9229 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9230 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9231
9232 /* PCIe PLL Configuration */
9233 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9234 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9235 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9236 }
9237