if_wm.c revision 1.275 1 /* $NetBSD: if_wm.c,v 1.275 2014/07/11 08:34:27 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.275 2014/07/11 08:34:27 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 #ifdef NET_MPSAFE
145 #define WM_MPSAFE 1
146 #endif
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 union {
203 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
204 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
205 } wdc_u;
206 };
207
208 struct wm_control_data_82542 {
209 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
210 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
211 };
212
213 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
214 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
215 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
216
217 /*
218 * Software state for transmit jobs.
219 */
220 struct wm_txsoft {
221 struct mbuf *txs_mbuf; /* head of our mbuf chain */
222 bus_dmamap_t txs_dmamap; /* our DMA map */
223 int txs_firstdesc; /* first descriptor in packet */
224 int txs_lastdesc; /* last descriptor in packet */
225 int txs_ndesc; /* # of descriptors used */
226 };
227
228 /*
229 * Software state for receive buffers. Each descriptor gets a
230 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
231 * more than one buffer, we chain them together.
232 */
233 struct wm_rxsoft {
234 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
235 bus_dmamap_t rxs_dmamap; /* our DMA map */
236 };
237
238 #define WM_LINKUP_TIMEOUT 50
239
240 static uint16_t swfwphysem[] = {
241 SWFW_PHY0_SM,
242 SWFW_PHY1_SM,
243 SWFW_PHY2_SM,
244 SWFW_PHY3_SM
245 };
246
247 /*
248 * Software state per device.
249 */
250 struct wm_softc {
251 device_t sc_dev; /* generic device information */
252 bus_space_tag_t sc_st; /* bus space tag */
253 bus_space_handle_t sc_sh; /* bus space handle */
254 bus_size_t sc_ss; /* bus space size */
255 bus_space_tag_t sc_iot; /* I/O space tag */
256 bus_space_handle_t sc_ioh; /* I/O space handle */
257 bus_size_t sc_ios; /* I/O space size */
258 bus_space_tag_t sc_flasht; /* flash registers space tag */
259 bus_space_handle_t sc_flashh; /* flash registers space handle */
260 bus_dma_tag_t sc_dmat; /* bus DMA tag */
261
262 struct ethercom sc_ethercom; /* ethernet common data */
263 struct mii_data sc_mii; /* MII/media information */
264
265 pci_chipset_tag_t sc_pc;
266 pcitag_t sc_pcitag;
267 int sc_bus_speed; /* PCI/PCIX bus speed */
268 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
269
270 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
271 wm_chip_type sc_type; /* MAC type */
272 int sc_rev; /* MAC revision */
273 wm_phy_type sc_phytype; /* PHY type */
274 int sc_funcid; /* unit number of the chip (0 to 3) */
275 int sc_flags; /* flags; see below */
276 int sc_if_flags; /* last if_flags */
277 int sc_flowflags; /* 802.3x flow control flags */
278 int sc_align_tweak;
279
280 void *sc_ih; /* interrupt cookie */
281 callout_t sc_tick_ch; /* tick callout */
282 bool sc_stopping;
283
284 int sc_ee_addrbits; /* EEPROM address bits */
285 int sc_ich8_flash_base;
286 int sc_ich8_flash_bank_size;
287 int sc_nvm_k1_enabled;
288
289 /*
290 * Software state for the transmit and receive descriptors.
291 */
292 int sc_txnum; /* must be a power of two */
293 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
294 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
295
296 /*
297 * Control data structures.
298 */
299 int sc_ntxdesc; /* must be a power of two */
300 struct wm_control_data_82544 *sc_control_data;
301 bus_dmamap_t sc_cddmamap; /* control data DMA map */
302 bus_dma_segment_t sc_cd_seg; /* control data segment */
303 int sc_cd_rseg; /* real number of control segment */
304 size_t sc_cd_size; /* control data size */
305 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
306 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
307 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
308 #define sc_rxdescs sc_control_data->wcd_rxdescs
309
310 #ifdef WM_EVENT_COUNTERS
311 /* Event counters. */
312 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
313 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
314 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
315 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
316 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
317 struct evcnt sc_ev_rxintr; /* Rx interrupts */
318 struct evcnt sc_ev_linkintr; /* Link interrupts */
319
320 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
321 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
322 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
323 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
324 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
325 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
326 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
327 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
328
329 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
330 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
331
332 struct evcnt sc_ev_tu; /* Tx underrun */
333
334 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
335 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
336 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
337 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
339 #endif /* WM_EVENT_COUNTERS */
340
341 bus_addr_t sc_tdt_reg; /* offset of TDT register */
342
343 int sc_txfree; /* number of free Tx descriptors */
344 int sc_txnext; /* next ready Tx descriptor */
345
346 int sc_txsfree; /* number of free Tx jobs */
347 int sc_txsnext; /* next free Tx job */
348 int sc_txsdirty; /* dirty Tx jobs */
349
350 /* These 5 variables are used only on the 82547. */
351 int sc_txfifo_size; /* Tx FIFO size */
352 int sc_txfifo_head; /* current head of FIFO */
353 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
354 int sc_txfifo_stall; /* Tx FIFO is stalled */
355 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
356
357 bus_addr_t sc_rdt_reg; /* offset of RDT register */
358
359 int sc_rxptr; /* next ready Rx descriptor/queue ent */
360 int sc_rxdiscard;
361 int sc_rxlen;
362 struct mbuf *sc_rxhead;
363 struct mbuf *sc_rxtail;
364 struct mbuf **sc_rxtailp;
365
366 uint32_t sc_ctrl; /* prototype CTRL register */
367 #if 0
368 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
369 #endif
370 uint32_t sc_icr; /* prototype interrupt bits */
371 uint32_t sc_itr; /* prototype intr throttling reg */
372 uint32_t sc_tctl; /* prototype TCTL register */
373 uint32_t sc_rctl; /* prototype RCTL register */
374 uint32_t sc_txcw; /* prototype TXCW register */
375 uint32_t sc_tipg; /* prototype TIPG register */
376 uint32_t sc_fcrtl; /* prototype FCRTL register */
377 uint32_t sc_pba; /* prototype PBA register */
378
379 int sc_tbi_linkup; /* TBI link status */
380 int sc_tbi_anegticks; /* autonegotiation ticks */
381 int sc_tbi_ticks; /* tbi ticks */
382 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
383 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
384
385 int sc_mchash_type; /* multicast filter offset */
386
387 krndsource_t rnd_source; /* random source */
388
389 kmutex_t *sc_txrx_lock; /* lock for tx/rx operations */
390 /* XXX need separation? */
391 };
392
393 #define WM_LOCK(_sc) if ((_sc)->sc_txrx_lock) mutex_enter((_sc)->sc_txrx_lock)
394 #define WM_UNLOCK(_sc) if ((_sc)->sc_txrx_lock) mutex_exit((_sc)->sc_txrx_lock)
395 #define WM_LOCKED(_sc) (!(_sc)->sc_txrx_lock || mutex_owned((_sc)->sc_txrx_lock))
396
397 #ifdef WM_MPSAFE
398 #define CALLOUT_FLAGS CALLOUT_MPSAFE
399 #else
400 #define CALLOUT_FLAGS 0
401 #endif
402
403 #define WM_RXCHAIN_RESET(sc) \
404 do { \
405 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
406 *(sc)->sc_rxtailp = NULL; \
407 (sc)->sc_rxlen = 0; \
408 } while (/*CONSTCOND*/0)
409
410 #define WM_RXCHAIN_LINK(sc, m) \
411 do { \
412 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
413 (sc)->sc_rxtailp = &(m)->m_next; \
414 } while (/*CONSTCOND*/0)
415
416 #ifdef WM_EVENT_COUNTERS
417 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
418 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
419 #else
420 #define WM_EVCNT_INCR(ev) /* nothing */
421 #define WM_EVCNT_ADD(ev, val) /* nothing */
422 #endif
423
424 #define CSR_READ(sc, reg) \
425 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
426 #define CSR_WRITE(sc, reg, val) \
427 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
428 #define CSR_WRITE_FLUSH(sc) \
429 (void) CSR_READ((sc), WMREG_STATUS)
430
431 #define ICH8_FLASH_READ32(sc, reg) \
432 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
433 #define ICH8_FLASH_WRITE32(sc, reg, data) \
434 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
435
436 #define ICH8_FLASH_READ16(sc, reg) \
437 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
438 #define ICH8_FLASH_WRITE16(sc, reg, data) \
439 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
440
441 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
442 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
443
444 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
445 #define WM_CDTXADDR_HI(sc, x) \
446 (sizeof(bus_addr_t) == 8 ? \
447 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
448
449 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
450 #define WM_CDRXADDR_HI(sc, x) \
451 (sizeof(bus_addr_t) == 8 ? \
452 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
453
454 #define WM_CDTXSYNC(sc, x, n, ops) \
455 do { \
456 int __x, __n; \
457 \
458 __x = (x); \
459 __n = (n); \
460 \
461 /* If it will wrap around, sync to the end of the ring. */ \
462 if ((__x + __n) > WM_NTXDESC(sc)) { \
463 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
464 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
465 (WM_NTXDESC(sc) - __x), (ops)); \
466 __n -= (WM_NTXDESC(sc) - __x); \
467 __x = 0; \
468 } \
469 \
470 /* Now sync whatever is left. */ \
471 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
472 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
473 } while (/*CONSTCOND*/0)
474
475 #define WM_CDRXSYNC(sc, x, ops) \
476 do { \
477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
478 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
479 } while (/*CONSTCOND*/0)
480
481 #define WM_INIT_RXDESC(sc, x) \
482 do { \
483 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
484 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
485 struct mbuf *__m = __rxs->rxs_mbuf; \
486 \
487 /* \
488 * Note: We scoot the packet forward 2 bytes in the buffer \
489 * so that the payload after the Ethernet header is aligned \
490 * to a 4-byte boundary. \
491 * \
492 * XXX BRAINDAMAGE ALERT! \
493 * The stupid chip uses the same size for every buffer, which \
494 * is set in the Receive Control register. We are using the 2K \
495 * size option, but what we REALLY want is (2K - 2)! For this \
496 * reason, we can't "scoot" packets longer than the standard \
497 * Ethernet MTU. On strict-alignment platforms, if the total \
498 * size exceeds (2K - 2) we set align_tweak to 0 and let \
499 * the upper layer copy the headers. \
500 */ \
501 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
502 \
503 wm_set_dma_addr(&__rxd->wrx_addr, \
504 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
505 __rxd->wrx_len = 0; \
506 __rxd->wrx_cksum = 0; \
507 __rxd->wrx_status = 0; \
508 __rxd->wrx_errors = 0; \
509 __rxd->wrx_special = 0; \
510 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
511 \
512 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
513 } while (/*CONSTCOND*/0)
514
515 static void wm_start(struct ifnet *);
516 static void wm_start_locked(struct ifnet *);
517 static void wm_nq_start(struct ifnet *);
518 static void wm_nq_start_locked(struct ifnet *);
519 static void wm_watchdog(struct ifnet *);
520 static int wm_ifflags_cb(struct ethercom *);
521 static int wm_ioctl(struct ifnet *, u_long, void *);
522 static int wm_init(struct ifnet *);
523 static int wm_init_locked(struct ifnet *);
524 static void wm_stop(struct ifnet *, int);
525 static void wm_stop_locked(struct ifnet *, int);
526 static bool wm_suspend(device_t, const pmf_qual_t *);
527 static bool wm_resume(device_t, const pmf_qual_t *);
528
529 static void wm_reset(struct wm_softc *);
530 static void wm_rxdrain(struct wm_softc *);
531 static int wm_add_rxbuf(struct wm_softc *, int);
532 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
533 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
534 static int wm_validate_eeprom_checksum(struct wm_softc *);
535 static int wm_check_alt_mac_addr(struct wm_softc *);
536 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
537 static void wm_tick(void *);
538
539 static void wm_set_filter(struct wm_softc *);
540 static void wm_set_vlan(struct wm_softc *);
541
542 static int wm_intr(void *);
543 static void wm_txintr(struct wm_softc *);
544 static void wm_rxintr(struct wm_softc *);
545 static void wm_linkintr(struct wm_softc *, uint32_t);
546
547 static void wm_tbi_mediainit(struct wm_softc *);
548 static int wm_tbi_mediachange(struct ifnet *);
549 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
550
551 static void wm_tbi_set_linkled(struct wm_softc *);
552 static void wm_tbi_check_link(struct wm_softc *);
553
554 static void wm_gmii_reset(struct wm_softc *);
555
556 static int wm_gmii_i82543_readreg(device_t, int, int);
557 static void wm_gmii_i82543_writereg(device_t, int, int, int);
558 static int wm_gmii_i82544_readreg(device_t, int, int);
559 static void wm_gmii_i82544_writereg(device_t, int, int, int);
560 static int wm_gmii_i80003_readreg(device_t, int, int);
561 static void wm_gmii_i80003_writereg(device_t, int, int, int);
562 static int wm_gmii_bm_readreg(device_t, int, int);
563 static void wm_gmii_bm_writereg(device_t, int, int, int);
564 static int wm_gmii_hv_readreg(device_t, int, int);
565 static void wm_gmii_hv_writereg(device_t, int, int, int);
566 static int wm_gmii_82580_readreg(device_t, int, int);
567 static void wm_gmii_82580_writereg(device_t, int, int, int);
568 static bool wm_sgmii_uses_mdio(struct wm_softc *);
569 static int wm_sgmii_readreg(device_t, int, int);
570 static void wm_sgmii_writereg(device_t, int, int, int);
571
572 static void wm_gmii_statchg(struct ifnet *);
573
574 static int wm_get_phy_id_82575(struct wm_softc *);
575 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
576 static int wm_gmii_mediachange(struct ifnet *);
577 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
578
579 static int wm_kmrn_readreg(struct wm_softc *, int);
580 static void wm_kmrn_writereg(struct wm_softc *, int, int);
581
582 static void wm_set_spiaddrbits(struct wm_softc *);
583 static int wm_match(device_t, cfdata_t, void *);
584 static void wm_attach(device_t, device_t, void *);
585 static int wm_detach(device_t, int);
586 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
587 static void wm_get_auto_rd_done(struct wm_softc *);
588 static void wm_lan_init_done(struct wm_softc *);
589 static void wm_get_cfg_done(struct wm_softc *);
590 static int wm_get_swsm_semaphore(struct wm_softc *);
591 static void wm_put_swsm_semaphore(struct wm_softc *);
592 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
593 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
594 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
595 static int wm_get_swfwhw_semaphore(struct wm_softc *);
596 static void wm_put_swfwhw_semaphore(struct wm_softc *);
597 static int wm_get_hw_semaphore_82573(struct wm_softc *);
598 static void wm_put_hw_semaphore_82573(struct wm_softc *);
599
600 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
601 static int32_t wm_ich8_cycle_init(struct wm_softc *);
602 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
603 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
604 uint32_t, uint16_t *);
605 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
606 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
607 static void wm_82547_txfifo_stall(void *);
608 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
609 static int wm_check_mng_mode(struct wm_softc *);
610 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
611 static int wm_check_mng_mode_82574(struct wm_softc *);
612 static int wm_check_mng_mode_generic(struct wm_softc *);
613 static int wm_enable_mng_pass_thru(struct wm_softc *);
614 static int wm_check_reset_block(struct wm_softc *);
615 static void wm_get_hw_control(struct wm_softc *);
616 static int wm_check_for_link(struct wm_softc *);
617 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
618 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
619 #ifdef WM_WOL
620 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
621 #endif
622 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
623 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
624 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
625 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
626 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
627 static void wm_smbustopci(struct wm_softc *);
628 static void wm_set_pcie_completion_timeout(struct wm_softc *);
629 static void wm_reset_init_script_82575(struct wm_softc *);
630 static void wm_release_manageability(struct wm_softc *);
631 static void wm_release_hw_control(struct wm_softc *);
632 static void wm_get_wakeup(struct wm_softc *);
633 #ifdef WM_WOL
634 static void wm_enable_phy_wakeup(struct wm_softc *);
635 static void wm_enable_wakeup(struct wm_softc *);
636 #endif
637 static void wm_init_manageability(struct wm_softc *);
638 static void wm_set_eee_i350(struct wm_softc *);
639
640 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
641 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
642
643 /*
644 * Devices supported by this driver.
645 */
646 static const struct wm_product {
647 pci_vendor_id_t wmp_vendor;
648 pci_product_id_t wmp_product;
649 const char *wmp_name;
650 wm_chip_type wmp_type;
651 int wmp_flags;
652 #define WMP_F_1000X 0x01
653 #define WMP_F_1000T 0x02
654 #define WMP_F_SERDES 0x04
655 } wm_products[] = {
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
657 "Intel i82542 1000BASE-X Ethernet",
658 WM_T_82542_2_1, WMP_F_1000X },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
661 "Intel i82543GC 1000BASE-X Ethernet",
662 WM_T_82543, WMP_F_1000X },
663
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
665 "Intel i82543GC 1000BASE-T Ethernet",
666 WM_T_82543, WMP_F_1000T },
667
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
669 "Intel i82544EI 1000BASE-T Ethernet",
670 WM_T_82544, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
673 "Intel i82544EI 1000BASE-X Ethernet",
674 WM_T_82544, WMP_F_1000X },
675
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
677 "Intel i82544GC 1000BASE-T Ethernet",
678 WM_T_82544, WMP_F_1000T },
679
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
681 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
682 WM_T_82544, WMP_F_1000T },
683
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
685 "Intel i82540EM 1000BASE-T Ethernet",
686 WM_T_82540, WMP_F_1000T },
687
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
689 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
690 WM_T_82540, WMP_F_1000T },
691
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
693 "Intel i82540EP 1000BASE-T Ethernet",
694 WM_T_82540, WMP_F_1000T },
695
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
697 "Intel i82540EP 1000BASE-T Ethernet",
698 WM_T_82540, WMP_F_1000T },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
701 "Intel i82540EP 1000BASE-T Ethernet",
702 WM_T_82540, WMP_F_1000T },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
705 "Intel i82545EM 1000BASE-T Ethernet",
706 WM_T_82545, WMP_F_1000T },
707
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
709 "Intel i82545GM 1000BASE-T Ethernet",
710 WM_T_82545_3, WMP_F_1000T },
711
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
713 "Intel i82545GM 1000BASE-X Ethernet",
714 WM_T_82545_3, WMP_F_1000X },
715 #if 0
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
717 "Intel i82545GM Gigabit Ethernet (SERDES)",
718 WM_T_82545_3, WMP_F_SERDES },
719 #endif
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
721 "Intel i82546EB 1000BASE-T Ethernet",
722 WM_T_82546, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
725 "Intel i82546EB 1000BASE-T Ethernet",
726 WM_T_82546, WMP_F_1000T },
727
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
729 "Intel i82545EM 1000BASE-X Ethernet",
730 WM_T_82545, WMP_F_1000X },
731
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
733 "Intel i82546EB 1000BASE-X Ethernet",
734 WM_T_82546, WMP_F_1000X },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
737 "Intel i82546GB 1000BASE-T Ethernet",
738 WM_T_82546_3, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
741 "Intel i82546GB 1000BASE-X Ethernet",
742 WM_T_82546_3, WMP_F_1000X },
743 #if 0
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
745 "Intel i82546GB Gigabit Ethernet (SERDES)",
746 WM_T_82546_3, WMP_F_SERDES },
747 #endif
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
749 "i82546GB quad-port Gigabit Ethernet",
750 WM_T_82546_3, WMP_F_1000T },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
753 "i82546GB quad-port Gigabit Ethernet (KSP3)",
754 WM_T_82546_3, WMP_F_1000T },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
757 "Intel PRO/1000MT (82546GB)",
758 WM_T_82546_3, WMP_F_1000T },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
761 "Intel i82541EI 1000BASE-T Ethernet",
762 WM_T_82541, WMP_F_1000T },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
765 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
766 WM_T_82541, WMP_F_1000T },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
769 "Intel i82541EI Mobile 1000BASE-T Ethernet",
770 WM_T_82541, WMP_F_1000T },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
773 "Intel i82541ER 1000BASE-T Ethernet",
774 WM_T_82541_2, WMP_F_1000T },
775
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
777 "Intel i82541GI 1000BASE-T Ethernet",
778 WM_T_82541_2, WMP_F_1000T },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
781 "Intel i82541GI Mobile 1000BASE-T Ethernet",
782 WM_T_82541_2, WMP_F_1000T },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
785 "Intel i82541PI 1000BASE-T Ethernet",
786 WM_T_82541_2, WMP_F_1000T },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
789 "Intel i82547EI 1000BASE-T Ethernet",
790 WM_T_82547, WMP_F_1000T },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
793 "Intel i82547EI Mobile 1000BASE-T Ethernet",
794 WM_T_82547, WMP_F_1000T },
795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
797 "Intel i82547GI 1000BASE-T Ethernet",
798 WM_T_82547_2, WMP_F_1000T },
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
801 "Intel PRO/1000 PT (82571EB)",
802 WM_T_82571, WMP_F_1000T },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
805 "Intel PRO/1000 PF (82571EB)",
806 WM_T_82571, WMP_F_1000X },
807 #if 0
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
809 "Intel PRO/1000 PB (82571EB)",
810 WM_T_82571, WMP_F_SERDES },
811 #endif
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
813 "Intel PRO/1000 QT (82571EB)",
814 WM_T_82571, WMP_F_1000T },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
817 "Intel i82572EI 1000baseT Ethernet",
818 WM_T_82572, WMP_F_1000T },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
821 "Intel PRO/1000 PT Quad Port Server Adapter",
822 WM_T_82571, WMP_F_1000T, },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
825 "Intel i82572EI 1000baseX Ethernet",
826 WM_T_82572, WMP_F_1000X },
827 #if 0
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
829 "Intel i82572EI Gigabit Ethernet (SERDES)",
830 WM_T_82572, WMP_F_SERDES },
831 #endif
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
834 "Intel i82572EI 1000baseT Ethernet",
835 WM_T_82572, WMP_F_1000T },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
838 "Intel i82573E",
839 WM_T_82573, WMP_F_1000T },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
842 "Intel i82573E IAMT",
843 WM_T_82573, WMP_F_1000T },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
846 "Intel i82573L Gigabit Ethernet",
847 WM_T_82573, WMP_F_1000T },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
850 "Intel i82574L",
851 WM_T_82574, WMP_F_1000T },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
854 "Intel i82583V",
855 WM_T_82583, WMP_F_1000T },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
858 "i80003 dual 1000baseT Ethernet",
859 WM_T_80003, WMP_F_1000T },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
862 "i80003 dual 1000baseX Ethernet",
863 WM_T_80003, WMP_F_1000T },
864 #if 0
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
866 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
867 WM_T_80003, WMP_F_SERDES },
868 #endif
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
871 "Intel i80003 1000baseT Ethernet",
872 WM_T_80003, WMP_F_1000T },
873 #if 0
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
875 "Intel i80003 Gigabit Ethernet (SERDES)",
876 WM_T_80003, WMP_F_SERDES },
877 #endif
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
879 "Intel i82801H (M_AMT) LAN Controller",
880 WM_T_ICH8, WMP_F_1000T },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
882 "Intel i82801H (AMT) LAN Controller",
883 WM_T_ICH8, WMP_F_1000T },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
885 "Intel i82801H LAN Controller",
886 WM_T_ICH8, WMP_F_1000T },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
888 "Intel i82801H (IFE) LAN Controller",
889 WM_T_ICH8, WMP_F_1000T },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
891 "Intel i82801H (M) LAN Controller",
892 WM_T_ICH8, WMP_F_1000T },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
894 "Intel i82801H IFE (GT) LAN Controller",
895 WM_T_ICH8, WMP_F_1000T },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
897 "Intel i82801H IFE (G) LAN Controller",
898 WM_T_ICH8, WMP_F_1000T },
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
900 "82801I (AMT) LAN Controller",
901 WM_T_ICH9, WMP_F_1000T },
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
903 "82801I LAN Controller",
904 WM_T_ICH9, WMP_F_1000T },
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
906 "82801I (G) LAN Controller",
907 WM_T_ICH9, WMP_F_1000T },
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
909 "82801I (GT) LAN Controller",
910 WM_T_ICH9, WMP_F_1000T },
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
912 "82801I (C) LAN Controller",
913 WM_T_ICH9, WMP_F_1000T },
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
915 "82801I mobile LAN Controller",
916 WM_T_ICH9, WMP_F_1000T },
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
918 "82801I mobile (V) LAN Controller",
919 WM_T_ICH9, WMP_F_1000T },
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
921 "82801I mobile (AMT) LAN Controller",
922 WM_T_ICH9, WMP_F_1000T },
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
924 "82567LM-4 LAN Controller",
925 WM_T_ICH9, WMP_F_1000T },
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
927 "82567V-3 LAN Controller",
928 WM_T_ICH9, WMP_F_1000T },
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
930 "82567LM-2 LAN Controller",
931 WM_T_ICH10, WMP_F_1000T },
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
933 "82567LF-2 LAN Controller",
934 WM_T_ICH10, WMP_F_1000T },
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
936 "82567LM-3 LAN Controller",
937 WM_T_ICH10, WMP_F_1000T },
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
939 "82567LF-3 LAN Controller",
940 WM_T_ICH10, WMP_F_1000T },
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
942 "82567V-2 LAN Controller",
943 WM_T_ICH10, WMP_F_1000T },
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
945 "82567V-3? LAN Controller",
946 WM_T_ICH10, WMP_F_1000T },
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
948 "HANKSVILLE LAN Controller",
949 WM_T_ICH10, WMP_F_1000T },
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
951 "PCH LAN (82577LM) Controller",
952 WM_T_PCH, WMP_F_1000T },
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
954 "PCH LAN (82577LC) Controller",
955 WM_T_PCH, WMP_F_1000T },
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
957 "PCH LAN (82578DM) Controller",
958 WM_T_PCH, WMP_F_1000T },
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
960 "PCH LAN (82578DC) Controller",
961 WM_T_PCH, WMP_F_1000T },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
963 "PCH2 LAN (82579LM) Controller",
964 WM_T_PCH2, WMP_F_1000T },
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
966 "PCH2 LAN (82579V) Controller",
967 WM_T_PCH2, WMP_F_1000T },
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
969 "82575EB dual-1000baseT Ethernet",
970 WM_T_82575, WMP_F_1000T },
971 #if 0
972 /*
973 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
974 * disabled for now ...
975 */
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
977 "82575EB dual-1000baseX Ethernet (SERDES)",
978 WM_T_82575, WMP_F_SERDES },
979 #endif
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
981 "82575GB quad-1000baseT Ethernet",
982 WM_T_82575, WMP_F_1000T },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
984 "82575GB quad-1000baseT Ethernet (PM)",
985 WM_T_82575, WMP_F_1000T },
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
987 "82576 1000BaseT Ethernet",
988 WM_T_82576, WMP_F_1000T },
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
990 "82576 1000BaseX Ethernet",
991 WM_T_82576, WMP_F_1000X },
992 #if 0
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
994 "82576 gigabit Ethernet (SERDES)",
995 WM_T_82576, WMP_F_SERDES },
996 #endif
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
998 "82576 quad-1000BaseT Ethernet",
999 WM_T_82576, WMP_F_1000T },
1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1001 "82576 gigabit Ethernet",
1002 WM_T_82576, WMP_F_1000T },
1003 #if 0
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1005 "82576 gigabit Ethernet (SERDES)",
1006 WM_T_82576, WMP_F_SERDES },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1008 "82576 quad-gigabit Ethernet (SERDES)",
1009 WM_T_82576, WMP_F_SERDES },
1010 #endif
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1012 "82580 1000BaseT Ethernet",
1013 WM_T_82580, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1015 "82580 1000BaseX Ethernet",
1016 WM_T_82580, WMP_F_1000X },
1017 #if 0
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1019 "82580 1000BaseT Ethernet (SERDES)",
1020 WM_T_82580, WMP_F_SERDES },
1021 #endif
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1023 "82580 gigabit Ethernet (SGMII)",
1024 WM_T_82580, WMP_F_1000T },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1026 "82580 dual-1000BaseT Ethernet",
1027 WM_T_82580, WMP_F_1000T },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1029 "82580 1000BaseT Ethernet",
1030 WM_T_82580ER, WMP_F_1000T },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1032 "82580 dual-1000BaseT Ethernet",
1033 WM_T_82580ER, WMP_F_1000T },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1035 "82580 quad-1000BaseX Ethernet",
1036 WM_T_82580, WMP_F_1000X },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1038 "I350 Gigabit Network Connection",
1039 WM_T_I350, WMP_F_1000T },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1041 "I350 Gigabit Fiber Network Connection",
1042 WM_T_I350, WMP_F_1000X },
1043 #if 0
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1045 "I350 Gigabit Backplane Connection",
1046 WM_T_I350, WMP_F_SERDES },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1048 "I350 Gigabit Connection",
1049 WM_T_I350, WMP_F_1000T },
1050 #endif
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1052 "I354 Gigabit Connection",
1053 WM_T_I354, WMP_F_1000T },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1055 "I210-T1 Ethernet Server Adapter",
1056 WM_T_I210, WMP_F_1000T },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1058 "I210 Ethernet (Copper OEM)",
1059 WM_T_I210, WMP_F_1000T },
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1061 "I210 Ethernet (Copper IT)",
1062 WM_T_I210, WMP_F_1000T },
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1064 "I210 Gigabit Ethernet (Fiber)",
1065 WM_T_I210, WMP_F_1000X },
1066 #if 0
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1068 "I210 Gigabit Ethernet (SERDES)",
1069 WM_T_I210, WMP_F_SERDES },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1071 "I210 Gigabit Ethernet (SGMII)",
1072 WM_T_I210, WMP_F_SERDES },
1073 #endif
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1075 "I211 Ethernet (COPPER)",
1076 WM_T_I211, WMP_F_1000T },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1078 "I217 V Ethernet Connection",
1079 WM_T_PCH_LPT, WMP_F_1000T },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1081 "I217 LM Ethernet Connection",
1082 WM_T_PCH_LPT, WMP_F_1000T },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1084 "I218 V Ethernet Connection",
1085 WM_T_PCH_LPT, WMP_F_1000T },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1087 "I218 LM Ethernet Connection",
1088 WM_T_PCH_LPT, WMP_F_1000T },
1089 { 0, 0,
1090 NULL,
1091 0, 0 },
1092 };
1093
1094 #ifdef WM_EVENT_COUNTERS
1095 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1096 #endif /* WM_EVENT_COUNTERS */
1097
1098 #if 0 /* Not currently used */
1099 static inline uint32_t
1100 wm_io_read(struct wm_softc *sc, int reg)
1101 {
1102
1103 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1104 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1105 }
1106 #endif
1107
1108 static inline void
1109 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1110 {
1111
1112 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1113 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1114 }
1115
1116 static inline void
1117 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1118 uint32_t data)
1119 {
1120 uint32_t regval;
1121 int i;
1122
1123 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1124
1125 CSR_WRITE(sc, reg, regval);
1126
1127 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1128 delay(5);
1129 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1130 break;
1131 }
1132 if (i == SCTL_CTL_POLL_TIMEOUT) {
1133 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1134 device_xname(sc->sc_dev), reg);
1135 }
1136 }
1137
1138 static inline void
1139 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1140 {
1141 wa->wa_low = htole32(v & 0xffffffffU);
1142 if (sizeof(bus_addr_t) == 8)
1143 wa->wa_high = htole32((uint64_t) v >> 32);
1144 else
1145 wa->wa_high = 0;
1146 }
1147
1148 static void
1149 wm_set_spiaddrbits(struct wm_softc *sc)
1150 {
1151 uint32_t reg;
1152
1153 sc->sc_flags |= WM_F_EEPROM_SPI;
1154 reg = CSR_READ(sc, WMREG_EECD);
1155 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1156 }
1157
1158 static const struct wm_product *
1159 wm_lookup(const struct pci_attach_args *pa)
1160 {
1161 const struct wm_product *wmp;
1162
1163 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1164 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1165 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1166 return wmp;
1167 }
1168 return NULL;
1169 }
1170
1171 static int
1172 wm_match(device_t parent, cfdata_t cf, void *aux)
1173 {
1174 struct pci_attach_args *pa = aux;
1175
1176 if (wm_lookup(pa) != NULL)
1177 return 1;
1178
1179 return 0;
1180 }
1181
1182 static void
1183 wm_attach(device_t parent, device_t self, void *aux)
1184 {
1185 struct wm_softc *sc = device_private(self);
1186 struct pci_attach_args *pa = aux;
1187 prop_dictionary_t dict;
1188 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1189 pci_chipset_tag_t pc = pa->pa_pc;
1190 pci_intr_handle_t ih;
1191 const char *intrstr = NULL;
1192 const char *eetype, *xname;
1193 bus_space_tag_t memt;
1194 bus_space_handle_t memh;
1195 bus_size_t memsize;
1196 int memh_valid;
1197 int i, error;
1198 const struct wm_product *wmp;
1199 prop_data_t ea;
1200 prop_number_t pn;
1201 uint8_t enaddr[ETHER_ADDR_LEN];
1202 uint16_t cfg1, cfg2, swdpin, io3;
1203 pcireg_t preg, memtype;
1204 uint16_t eeprom_data, apme_mask;
1205 bool force_clear_smbi;
1206 uint32_t reg;
1207 char intrbuf[PCI_INTRSTR_LEN];
1208
1209 sc->sc_dev = self;
1210 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1211 sc->sc_stopping = false;
1212
1213 sc->sc_wmp = wmp = wm_lookup(pa);
1214 if (wmp == NULL) {
1215 printf("\n");
1216 panic("wm_attach: impossible");
1217 }
1218
1219 sc->sc_pc = pa->pa_pc;
1220 sc->sc_pcitag = pa->pa_tag;
1221
1222 if (pci_dma64_available(pa))
1223 sc->sc_dmat = pa->pa_dmat64;
1224 else
1225 sc->sc_dmat = pa->pa_dmat;
1226
1227 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1228 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1229
1230 sc->sc_type = wmp->wmp_type;
1231 if (sc->sc_type < WM_T_82543) {
1232 if (sc->sc_rev < 2) {
1233 aprint_error_dev(sc->sc_dev,
1234 "i82542 must be at least rev. 2\n");
1235 return;
1236 }
1237 if (sc->sc_rev < 3)
1238 sc->sc_type = WM_T_82542_2_0;
1239 }
1240
1241 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1242 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1243 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1244 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1245 sc->sc_flags |= WM_F_NEWQUEUE;
1246
1247 /* Set device properties (mactype) */
1248 dict = device_properties(sc->sc_dev);
1249 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1250
1251 /*
1252 * Map the device. All devices support memory-mapped acccess,
1253 * and it is really required for normal operation.
1254 */
1255 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1256 switch (memtype) {
1257 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1258 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1259 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1260 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1261 break;
1262 default:
1263 memh_valid = 0;
1264 break;
1265 }
1266
1267 if (memh_valid) {
1268 sc->sc_st = memt;
1269 sc->sc_sh = memh;
1270 sc->sc_ss = memsize;
1271 } else {
1272 aprint_error_dev(sc->sc_dev,
1273 "unable to map device registers\n");
1274 return;
1275 }
1276
1277 /*
1278 * In addition, i82544 and later support I/O mapped indirect
1279 * register access. It is not desirable (nor supported in
1280 * this driver) to use it for normal operation, though it is
1281 * required to work around bugs in some chip versions.
1282 */
1283 if (sc->sc_type >= WM_T_82544) {
1284 /* First we have to find the I/O BAR. */
1285 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1286 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1287 if (memtype == PCI_MAPREG_TYPE_IO)
1288 break;
1289 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1290 PCI_MAPREG_MEM_TYPE_64BIT)
1291 i += 4; /* skip high bits, too */
1292 }
1293 if (i < PCI_MAPREG_END) {
1294 /*
1295 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1296 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1297 * It's no problem because newer chips has no this
1298 * bug.
1299 *
1300 * The i8254x doesn't apparently respond when the
1301 * I/O BAR is 0, which looks somewhat like it's not
1302 * been configured.
1303 */
1304 preg = pci_conf_read(pc, pa->pa_tag, i);
1305 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1306 aprint_error_dev(sc->sc_dev,
1307 "WARNING: I/O BAR at zero.\n");
1308 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1309 0, &sc->sc_iot, &sc->sc_ioh,
1310 NULL, &sc->sc_ios) == 0) {
1311 sc->sc_flags |= WM_F_IOH_VALID;
1312 } else {
1313 aprint_error_dev(sc->sc_dev,
1314 "WARNING: unable to map I/O space\n");
1315 }
1316 }
1317
1318 }
1319
1320 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1321 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1322 preg |= PCI_COMMAND_MASTER_ENABLE;
1323 if (sc->sc_type < WM_T_82542_2_1)
1324 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1325 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1326
1327 /* power up chip */
1328 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1329 NULL)) && error != EOPNOTSUPP) {
1330 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1331 return;
1332 }
1333
1334 /*
1335 * Map and establish our interrupt.
1336 */
1337 if (pci_intr_map(pa, &ih)) {
1338 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1339 return;
1340 }
1341 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1342 #ifdef WM_MPSAFE
1343 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1344 #endif
1345 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1346 if (sc->sc_ih == NULL) {
1347 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1348 if (intrstr != NULL)
1349 aprint_error(" at %s", intrstr);
1350 aprint_error("\n");
1351 return;
1352 }
1353 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1354
1355 /*
1356 * Check the function ID (unit number of the chip).
1357 */
1358 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1359 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1360 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1361 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1362 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1363 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1364 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1365 else
1366 sc->sc_funcid = 0;
1367
1368 /*
1369 * Determine a few things about the bus we're connected to.
1370 */
1371 if (sc->sc_type < WM_T_82543) {
1372 /* We don't really know the bus characteristics here. */
1373 sc->sc_bus_speed = 33;
1374 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1375 /*
1376 * CSA (Communication Streaming Architecture) is about as fast
1377 * a 32-bit 66MHz PCI Bus.
1378 */
1379 sc->sc_flags |= WM_F_CSA;
1380 sc->sc_bus_speed = 66;
1381 aprint_verbose_dev(sc->sc_dev,
1382 "Communication Streaming Architecture\n");
1383 if (sc->sc_type == WM_T_82547) {
1384 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1385 callout_setfunc(&sc->sc_txfifo_ch,
1386 wm_82547_txfifo_stall, sc);
1387 aprint_verbose_dev(sc->sc_dev,
1388 "using 82547 Tx FIFO stall work-around\n");
1389 }
1390 } else if (sc->sc_type >= WM_T_82571) {
1391 sc->sc_flags |= WM_F_PCIE;
1392 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1393 && (sc->sc_type != WM_T_ICH10)
1394 && (sc->sc_type != WM_T_PCH)
1395 && (sc->sc_type != WM_T_PCH2)
1396 && (sc->sc_type != WM_T_PCH_LPT)) {
1397 /* ICH* and PCH* have no PCIe capability registers */
1398 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1399 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1400 NULL) == 0)
1401 aprint_error_dev(sc->sc_dev,
1402 "unable to find PCIe capability\n");
1403 }
1404 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1405 } else {
1406 reg = CSR_READ(sc, WMREG_STATUS);
1407 if (reg & STATUS_BUS64)
1408 sc->sc_flags |= WM_F_BUS64;
1409 if ((reg & STATUS_PCIX_MODE) != 0) {
1410 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1411
1412 sc->sc_flags |= WM_F_PCIX;
1413 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1414 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1415 aprint_error_dev(sc->sc_dev,
1416 "unable to find PCIX capability\n");
1417 else if (sc->sc_type != WM_T_82545_3 &&
1418 sc->sc_type != WM_T_82546_3) {
1419 /*
1420 * Work around a problem caused by the BIOS
1421 * setting the max memory read byte count
1422 * incorrectly.
1423 */
1424 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1425 sc->sc_pcixe_capoff + PCIX_CMD);
1426 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1427 sc->sc_pcixe_capoff + PCIX_STATUS);
1428
1429 bytecnt =
1430 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1431 PCIX_CMD_BYTECNT_SHIFT;
1432 maxb =
1433 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1434 PCIX_STATUS_MAXB_SHIFT;
1435 if (bytecnt > maxb) {
1436 aprint_verbose_dev(sc->sc_dev,
1437 "resetting PCI-X MMRBC: %d -> %d\n",
1438 512 << bytecnt, 512 << maxb);
1439 pcix_cmd = (pcix_cmd &
1440 ~PCIX_CMD_BYTECNT_MASK) |
1441 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1442 pci_conf_write(pa->pa_pc, pa->pa_tag,
1443 sc->sc_pcixe_capoff + PCIX_CMD,
1444 pcix_cmd);
1445 }
1446 }
1447 }
1448 /*
1449 * The quad port adapter is special; it has a PCIX-PCIX
1450 * bridge on the board, and can run the secondary bus at
1451 * a higher speed.
1452 */
1453 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1454 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1455 : 66;
1456 } else if (sc->sc_flags & WM_F_PCIX) {
1457 switch (reg & STATUS_PCIXSPD_MASK) {
1458 case STATUS_PCIXSPD_50_66:
1459 sc->sc_bus_speed = 66;
1460 break;
1461 case STATUS_PCIXSPD_66_100:
1462 sc->sc_bus_speed = 100;
1463 break;
1464 case STATUS_PCIXSPD_100_133:
1465 sc->sc_bus_speed = 133;
1466 break;
1467 default:
1468 aprint_error_dev(sc->sc_dev,
1469 "unknown PCIXSPD %d; assuming 66MHz\n",
1470 reg & STATUS_PCIXSPD_MASK);
1471 sc->sc_bus_speed = 66;
1472 break;
1473 }
1474 } else
1475 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1476 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1477 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1478 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1479 }
1480
1481 /*
1482 * Allocate the control data structures, and create and load the
1483 * DMA map for it.
1484 *
1485 * NOTE: All Tx descriptors must be in the same 4G segment of
1486 * memory. So must Rx descriptors. We simplify by allocating
1487 * both sets within the same 4G segment.
1488 */
1489 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1490 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1491 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1492 sizeof(struct wm_control_data_82542) :
1493 sizeof(struct wm_control_data_82544);
1494 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1495 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1496 &sc->sc_cd_rseg, 0)) != 0) {
1497 aprint_error_dev(sc->sc_dev,
1498 "unable to allocate control data, error = %d\n",
1499 error);
1500 goto fail_0;
1501 }
1502
1503 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1504 sc->sc_cd_rseg, sc->sc_cd_size,
1505 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1506 aprint_error_dev(sc->sc_dev,
1507 "unable to map control data, error = %d\n", error);
1508 goto fail_1;
1509 }
1510
1511 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1512 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1513 aprint_error_dev(sc->sc_dev,
1514 "unable to create control data DMA map, error = %d\n",
1515 error);
1516 goto fail_2;
1517 }
1518
1519 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1520 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1521 aprint_error_dev(sc->sc_dev,
1522 "unable to load control data DMA map, error = %d\n",
1523 error);
1524 goto fail_3;
1525 }
1526
1527 /*
1528 * Create the transmit buffer DMA maps.
1529 */
1530 WM_TXQUEUELEN(sc) =
1531 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1532 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1533 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1534 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1535 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1536 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1537 aprint_error_dev(sc->sc_dev,
1538 "unable to create Tx DMA map %d, error = %d\n",
1539 i, error);
1540 goto fail_4;
1541 }
1542 }
1543
1544 /*
1545 * Create the receive buffer DMA maps.
1546 */
1547 for (i = 0; i < WM_NRXDESC; i++) {
1548 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1549 MCLBYTES, 0, 0,
1550 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1551 aprint_error_dev(sc->sc_dev,
1552 "unable to create Rx DMA map %d error = %d\n",
1553 i, error);
1554 goto fail_5;
1555 }
1556 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1557 }
1558
1559 /* clear interesting stat counters */
1560 CSR_READ(sc, WMREG_COLC);
1561 CSR_READ(sc, WMREG_RXERRC);
1562
1563 /* get PHY control from SMBus to PCIe */
1564 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1565 || (sc->sc_type == WM_T_PCH_LPT))
1566 wm_smbustopci(sc);
1567
1568 /*
1569 * Reset the chip to a known state.
1570 */
1571 wm_reset(sc);
1572
1573 /*
1574 * Get some information about the EEPROM.
1575 */
1576 switch (sc->sc_type) {
1577 case WM_T_82542_2_0:
1578 case WM_T_82542_2_1:
1579 case WM_T_82543:
1580 case WM_T_82544:
1581 /* Microwire */
1582 sc->sc_ee_addrbits = 6;
1583 break;
1584 case WM_T_82540:
1585 case WM_T_82545:
1586 case WM_T_82545_3:
1587 case WM_T_82546:
1588 case WM_T_82546_3:
1589 /* Microwire */
1590 reg = CSR_READ(sc, WMREG_EECD);
1591 if (reg & EECD_EE_SIZE)
1592 sc->sc_ee_addrbits = 8;
1593 else
1594 sc->sc_ee_addrbits = 6;
1595 sc->sc_flags |= WM_F_LOCK_EECD;
1596 break;
1597 case WM_T_82541:
1598 case WM_T_82541_2:
1599 case WM_T_82547:
1600 case WM_T_82547_2:
1601 reg = CSR_READ(sc, WMREG_EECD);
1602 if (reg & EECD_EE_TYPE) {
1603 /* SPI */
1604 wm_set_spiaddrbits(sc);
1605 } else
1606 /* Microwire */
1607 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1608 sc->sc_flags |= WM_F_LOCK_EECD;
1609 break;
1610 case WM_T_82571:
1611 case WM_T_82572:
1612 /* SPI */
1613 wm_set_spiaddrbits(sc);
1614 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1615 break;
1616 case WM_T_82573:
1617 sc->sc_flags |= WM_F_LOCK_SWSM;
1618 /* FALLTHROUGH */
1619 case WM_T_82574:
1620 case WM_T_82583:
1621 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1622 sc->sc_flags |= WM_F_EEPROM_FLASH;
1623 else {
1624 /* SPI */
1625 wm_set_spiaddrbits(sc);
1626 }
1627 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1628 break;
1629 case WM_T_82575:
1630 case WM_T_82576:
1631 case WM_T_82580:
1632 case WM_T_82580ER:
1633 case WM_T_I350:
1634 case WM_T_I354: /* XXXX ok? */
1635 case WM_T_80003:
1636 /* SPI */
1637 wm_set_spiaddrbits(sc);
1638 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1639 | WM_F_LOCK_SWSM;
1640 break;
1641 case WM_T_ICH8:
1642 case WM_T_ICH9:
1643 case WM_T_ICH10:
1644 case WM_T_PCH:
1645 case WM_T_PCH2:
1646 case WM_T_PCH_LPT:
1647 /* FLASH */
1648 sc->sc_flags |= WM_F_EEPROM_FLASH | LOCK_EXTCNF;
1649 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1650 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1651 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1652 aprint_error_dev(sc->sc_dev,
1653 "can't map FLASH registers\n");
1654 return;
1655 }
1656 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1657 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1658 ICH_FLASH_SECTOR_SIZE;
1659 sc->sc_ich8_flash_bank_size =
1660 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1661 sc->sc_ich8_flash_bank_size -=
1662 (reg & ICH_GFPREG_BASE_MASK);
1663 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1664 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1665 break;
1666 case WM_T_I210:
1667 case WM_T_I211:
1668 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1669 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1670 break;
1671 default:
1672 break;
1673 }
1674
1675 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1676 switch (sc->sc_type) {
1677 case WM_T_82571:
1678 case WM_T_82572:
1679 reg = CSR_READ(sc, WMREG_SWSM2);
1680 if ((reg & SWSM2_LOCK) != 0) {
1681 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1682 force_clear_smbi = true;
1683 } else
1684 force_clear_smbi = false;
1685 break;
1686 default:
1687 force_clear_smbi = true;
1688 break;
1689 }
1690 if (force_clear_smbi) {
1691 reg = CSR_READ(sc, WMREG_SWSM);
1692 if ((reg & ~SWSM_SMBI) != 0)
1693 aprint_error_dev(sc->sc_dev,
1694 "Please update the Bootagent\n");
1695 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1696 }
1697
1698 /*
1699 * Defer printing the EEPROM type until after verifying the checksum
1700 * This allows the EEPROM type to be printed correctly in the case
1701 * that no EEPROM is attached.
1702 */
1703 /*
1704 * Validate the EEPROM checksum. If the checksum fails, flag
1705 * this for later, so we can fail future reads from the EEPROM.
1706 */
1707 if (wm_validate_eeprom_checksum(sc)) {
1708 /*
1709 * Read twice again because some PCI-e parts fail the
1710 * first check due to the link being in sleep state.
1711 */
1712 if (wm_validate_eeprom_checksum(sc))
1713 sc->sc_flags |= WM_F_EEPROM_INVALID;
1714 }
1715
1716 /* Set device properties (macflags) */
1717 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1718
1719 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1720 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1721 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1722 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1723 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1724 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1725 } else {
1726 if (sc->sc_flags & WM_F_EEPROM_SPI)
1727 eetype = "SPI";
1728 else
1729 eetype = "MicroWire";
1730 aprint_verbose_dev(sc->sc_dev,
1731 "%u word (%d address bits) %s EEPROM\n",
1732 1U << sc->sc_ee_addrbits,
1733 sc->sc_ee_addrbits, eetype);
1734 }
1735
1736 switch (sc->sc_type) {
1737 case WM_T_82571:
1738 case WM_T_82572:
1739 case WM_T_82573:
1740 case WM_T_82574:
1741 case WM_T_82583:
1742 case WM_T_80003:
1743 case WM_T_ICH8:
1744 case WM_T_ICH9:
1745 case WM_T_ICH10:
1746 case WM_T_PCH:
1747 case WM_T_PCH2:
1748 case WM_T_PCH_LPT:
1749 if (wm_check_mng_mode(sc) != 0)
1750 wm_get_hw_control(sc);
1751 break;
1752 default:
1753 break;
1754 }
1755 wm_get_wakeup(sc);
1756 /*
1757 * Read the Ethernet address from the EEPROM, if not first found
1758 * in device properties.
1759 */
1760 ea = prop_dictionary_get(dict, "mac-address");
1761 if (ea != NULL) {
1762 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1763 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1764 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1765 } else {
1766 if (wm_read_mac_addr(sc, enaddr) != 0) {
1767 aprint_error_dev(sc->sc_dev,
1768 "unable to read Ethernet address\n");
1769 return;
1770 }
1771 }
1772
1773 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1774 ether_sprintf(enaddr));
1775
1776 /*
1777 * Read the config info from the EEPROM, and set up various
1778 * bits in the control registers based on their contents.
1779 */
1780 pn = prop_dictionary_get(dict, "i82543-cfg1");
1781 if (pn != NULL) {
1782 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1783 cfg1 = (uint16_t) prop_number_integer_value(pn);
1784 } else {
1785 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1786 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1787 return;
1788 }
1789 }
1790
1791 pn = prop_dictionary_get(dict, "i82543-cfg2");
1792 if (pn != NULL) {
1793 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1794 cfg2 = (uint16_t) prop_number_integer_value(pn);
1795 } else {
1796 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1797 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1798 return;
1799 }
1800 }
1801
1802 /* check for WM_F_WOL */
1803 switch (sc->sc_type) {
1804 case WM_T_82542_2_0:
1805 case WM_T_82542_2_1:
1806 case WM_T_82543:
1807 /* dummy? */
1808 eeprom_data = 0;
1809 apme_mask = EEPROM_CFG3_APME;
1810 break;
1811 case WM_T_82544:
1812 apme_mask = EEPROM_CFG2_82544_APM_EN;
1813 eeprom_data = cfg2;
1814 break;
1815 case WM_T_82546:
1816 case WM_T_82546_3:
1817 case WM_T_82571:
1818 case WM_T_82572:
1819 case WM_T_82573:
1820 case WM_T_82574:
1821 case WM_T_82583:
1822 case WM_T_80003:
1823 default:
1824 apme_mask = EEPROM_CFG3_APME;
1825 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1826 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1827 break;
1828 case WM_T_82575:
1829 case WM_T_82576:
1830 case WM_T_82580:
1831 case WM_T_82580ER:
1832 case WM_T_I350:
1833 case WM_T_I354: /* XXX ok? */
1834 case WM_T_ICH8:
1835 case WM_T_ICH9:
1836 case WM_T_ICH10:
1837 case WM_T_PCH:
1838 case WM_T_PCH2:
1839 case WM_T_PCH_LPT:
1840 /* XXX The funcid should be checked on some devices */
1841 apme_mask = WUC_APME;
1842 eeprom_data = CSR_READ(sc, WMREG_WUC);
1843 break;
1844 }
1845
1846 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1847 if ((eeprom_data & apme_mask) != 0)
1848 sc->sc_flags |= WM_F_WOL;
1849 #ifdef WM_DEBUG
1850 if ((sc->sc_flags & WM_F_WOL) != 0)
1851 printf("WOL\n");
1852 #endif
1853
1854 /*
1855 * XXX need special handling for some multiple port cards
1856 * to disable a paticular port.
1857 */
1858
1859 if (sc->sc_type >= WM_T_82544) {
1860 pn = prop_dictionary_get(dict, "i82543-swdpin");
1861 if (pn != NULL) {
1862 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1863 swdpin = (uint16_t) prop_number_integer_value(pn);
1864 } else {
1865 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1866 aprint_error_dev(sc->sc_dev,
1867 "unable to read SWDPIN\n");
1868 return;
1869 }
1870 }
1871 }
1872
1873 if (cfg1 & EEPROM_CFG1_ILOS)
1874 sc->sc_ctrl |= CTRL_ILOS;
1875 if (sc->sc_type >= WM_T_82544) {
1876 sc->sc_ctrl |=
1877 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1878 CTRL_SWDPIO_SHIFT;
1879 sc->sc_ctrl |=
1880 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1881 CTRL_SWDPINS_SHIFT;
1882 } else {
1883 sc->sc_ctrl |=
1884 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1885 CTRL_SWDPIO_SHIFT;
1886 }
1887
1888 #if 0
1889 if (sc->sc_type >= WM_T_82544) {
1890 if (cfg1 & EEPROM_CFG1_IPS0)
1891 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1892 if (cfg1 & EEPROM_CFG1_IPS1)
1893 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1894 sc->sc_ctrl_ext |=
1895 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1896 CTRL_EXT_SWDPIO_SHIFT;
1897 sc->sc_ctrl_ext |=
1898 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1899 CTRL_EXT_SWDPINS_SHIFT;
1900 } else {
1901 sc->sc_ctrl_ext |=
1902 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1903 CTRL_EXT_SWDPIO_SHIFT;
1904 }
1905 #endif
1906
1907 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1908 #if 0
1909 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1910 #endif
1911
1912 /*
1913 * Set up some register offsets that are different between
1914 * the i82542 and the i82543 and later chips.
1915 */
1916 if (sc->sc_type < WM_T_82543) {
1917 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1918 sc->sc_tdt_reg = WMREG_OLD_TDT;
1919 } else {
1920 sc->sc_rdt_reg = WMREG_RDT;
1921 sc->sc_tdt_reg = WMREG_TDT;
1922 }
1923
1924 if (sc->sc_type == WM_T_PCH) {
1925 uint16_t val;
1926
1927 /* Save the NVM K1 bit setting */
1928 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1929
1930 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1931 sc->sc_nvm_k1_enabled = 1;
1932 else
1933 sc->sc_nvm_k1_enabled = 0;
1934 }
1935
1936 /*
1937 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1938 * media structures accordingly.
1939 */
1940 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1941 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1942 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1943 || sc->sc_type == WM_T_82573
1944 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1945 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1946 wm_gmii_mediainit(sc, wmp->wmp_product);
1947 } else if (sc->sc_type < WM_T_82543 ||
1948 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1949 if (wmp->wmp_flags & WMP_F_1000T)
1950 aprint_error_dev(sc->sc_dev,
1951 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1952 wm_tbi_mediainit(sc);
1953 } else {
1954 switch (sc->sc_type) {
1955 case WM_T_82575:
1956 case WM_T_82576:
1957 case WM_T_82580:
1958 case WM_T_82580ER:
1959 case WM_T_I350:
1960 case WM_T_I354:
1961 case WM_T_I210:
1962 case WM_T_I211:
1963 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1964 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1965 case CTRL_EXT_LINK_MODE_1000KX:
1966 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
1967 CSR_WRITE(sc, WMREG_CTRL_EXT,
1968 reg | CTRL_EXT_I2C_ENA);
1969 panic("not supported yet\n");
1970 break;
1971 case CTRL_EXT_LINK_MODE_SGMII:
1972 if (wm_sgmii_uses_mdio(sc)) {
1973 aprint_verbose_dev(sc->sc_dev,
1974 "SGMII(MDIO)\n");
1975 sc->sc_flags |= WM_F_SGMII;
1976 wm_gmii_mediainit(sc,
1977 wmp->wmp_product);
1978 break;
1979 }
1980 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
1981 /*FALLTHROUGH*/
1982 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1983 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
1984 CSR_WRITE(sc, WMREG_CTRL_EXT,
1985 reg | CTRL_EXT_I2C_ENA);
1986 panic("not supported yet\n");
1987 break;
1988 case CTRL_EXT_LINK_MODE_GMII:
1989 default:
1990 CSR_WRITE(sc, WMREG_CTRL_EXT,
1991 reg & ~CTRL_EXT_I2C_ENA);
1992 wm_gmii_mediainit(sc, wmp->wmp_product);
1993 break;
1994 }
1995 break;
1996 default:
1997 if (wmp->wmp_flags & WMP_F_1000X)
1998 aprint_error_dev(sc->sc_dev,
1999 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2000 wm_gmii_mediainit(sc, wmp->wmp_product);
2001 }
2002 }
2003
2004 ifp = &sc->sc_ethercom.ec_if;
2005 xname = device_xname(sc->sc_dev);
2006 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2007 ifp->if_softc = sc;
2008 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2009 ifp->if_ioctl = wm_ioctl;
2010 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2011 ifp->if_start = wm_nq_start;
2012 else
2013 ifp->if_start = wm_start;
2014 ifp->if_watchdog = wm_watchdog;
2015 ifp->if_init = wm_init;
2016 ifp->if_stop = wm_stop;
2017 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2018 IFQ_SET_READY(&ifp->if_snd);
2019
2020 /* Check for jumbo frame */
2021 switch (sc->sc_type) {
2022 case WM_T_82573:
2023 /* XXX limited to 9234 if ASPM is disabled */
2024 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
2025 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
2026 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2027 break;
2028 case WM_T_82571:
2029 case WM_T_82572:
2030 case WM_T_82574:
2031 case WM_T_82575:
2032 case WM_T_82576:
2033 case WM_T_82580:
2034 case WM_T_82580ER:
2035 case WM_T_I350:
2036 case WM_T_I354: /* XXXX ok? */
2037 case WM_T_I210:
2038 case WM_T_I211:
2039 case WM_T_80003:
2040 case WM_T_ICH9:
2041 case WM_T_ICH10:
2042 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2043 case WM_T_PCH_LPT:
2044 /* XXX limited to 9234 */
2045 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2046 break;
2047 case WM_T_PCH:
2048 /* XXX limited to 4096 */
2049 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2050 break;
2051 case WM_T_82542_2_0:
2052 case WM_T_82542_2_1:
2053 case WM_T_82583:
2054 case WM_T_ICH8:
2055 /* No support for jumbo frame */
2056 break;
2057 default:
2058 /* ETHER_MAX_LEN_JUMBO */
2059 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2060 break;
2061 }
2062
2063 /*
2064 * If we're a i82543 or greater, we can support VLANs.
2065 */
2066 if (sc->sc_type >= WM_T_82543)
2067 sc->sc_ethercom.ec_capabilities |=
2068 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2069
2070 /*
2071 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2072 * on i82543 and later.
2073 */
2074 if (sc->sc_type >= WM_T_82543) {
2075 ifp->if_capabilities |=
2076 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2077 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2078 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2079 IFCAP_CSUM_TCPv6_Tx |
2080 IFCAP_CSUM_UDPv6_Tx;
2081 }
2082
2083 /*
2084 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2085 *
2086 * 82541GI (8086:1076) ... no
2087 * 82572EI (8086:10b9) ... yes
2088 */
2089 if (sc->sc_type >= WM_T_82571) {
2090 ifp->if_capabilities |=
2091 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2092 }
2093
2094 /*
2095 * If we're a i82544 or greater (except i82547), we can do
2096 * TCP segmentation offload.
2097 */
2098 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2099 ifp->if_capabilities |= IFCAP_TSOv4;
2100 }
2101
2102 if (sc->sc_type >= WM_T_82571) {
2103 ifp->if_capabilities |= IFCAP_TSOv6;
2104 }
2105
2106 #ifdef WM_MPSAFE
2107 sc->sc_txrx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2108 #else
2109 sc->sc_txrx_lock = NULL;
2110 #endif
2111
2112 /*
2113 * Attach the interface.
2114 */
2115 if_attach(ifp);
2116 ether_ifattach(ifp, enaddr);
2117 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2118 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2119
2120 #ifdef WM_EVENT_COUNTERS
2121 /* Attach event counters. */
2122 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2123 NULL, xname, "txsstall");
2124 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2125 NULL, xname, "txdstall");
2126 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2127 NULL, xname, "txfifo_stall");
2128 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2129 NULL, xname, "txdw");
2130 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2131 NULL, xname, "txqe");
2132 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2133 NULL, xname, "rxintr");
2134 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2135 NULL, xname, "linkintr");
2136
2137 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2138 NULL, xname, "rxipsum");
2139 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2140 NULL, xname, "rxtusum");
2141 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2142 NULL, xname, "txipsum");
2143 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2144 NULL, xname, "txtusum");
2145 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2146 NULL, xname, "txtusum6");
2147
2148 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2149 NULL, xname, "txtso");
2150 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2151 NULL, xname, "txtso6");
2152 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2153 NULL, xname, "txtsopain");
2154
2155 for (i = 0; i < WM_NTXSEGS; i++) {
2156 snprintf(wm_txseg_evcnt_names[i],
2157 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2158 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2159 NULL, xname, wm_txseg_evcnt_names[i]);
2160 }
2161
2162 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2163 NULL, xname, "txdrop");
2164
2165 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2166 NULL, xname, "tu");
2167
2168 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2169 NULL, xname, "tx_xoff");
2170 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2171 NULL, xname, "tx_xon");
2172 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2173 NULL, xname, "rx_xoff");
2174 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2175 NULL, xname, "rx_xon");
2176 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2177 NULL, xname, "rx_macctl");
2178 #endif /* WM_EVENT_COUNTERS */
2179
2180 if (pmf_device_register(self, wm_suspend, wm_resume))
2181 pmf_class_network_register(self, ifp);
2182 else
2183 aprint_error_dev(self, "couldn't establish power handler\n");
2184
2185 return;
2186
2187 /*
2188 * Free any resources we've allocated during the failed attach
2189 * attempt. Do this in reverse order and fall through.
2190 */
2191 fail_5:
2192 for (i = 0; i < WM_NRXDESC; i++) {
2193 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2194 bus_dmamap_destroy(sc->sc_dmat,
2195 sc->sc_rxsoft[i].rxs_dmamap);
2196 }
2197 fail_4:
2198 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2199 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2200 bus_dmamap_destroy(sc->sc_dmat,
2201 sc->sc_txsoft[i].txs_dmamap);
2202 }
2203 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2204 fail_3:
2205 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2206 fail_2:
2207 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2208 sc->sc_cd_size);
2209 fail_1:
2210 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2211 fail_0:
2212 return;
2213 }
2214
2215 static int
2216 wm_detach(device_t self, int flags __unused)
2217 {
2218 struct wm_softc *sc = device_private(self);
2219 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2220 int i;
2221 #ifndef WM_MPSAFE
2222 int s;
2223
2224 s = splnet();
2225 #endif
2226 /* Stop the interface. Callouts are stopped in it. */
2227 wm_stop(ifp, 1);
2228
2229 #ifndef WM_MPSAFE
2230 splx(s);
2231 #endif
2232
2233 pmf_device_deregister(self);
2234
2235 /* Tell the firmware about the release */
2236 WM_LOCK(sc);
2237 wm_release_manageability(sc);
2238 wm_release_hw_control(sc);
2239 WM_UNLOCK(sc);
2240
2241 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2242
2243 /* Delete all remaining media. */
2244 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2245
2246 ether_ifdetach(ifp);
2247 if_detach(ifp);
2248
2249
2250 /* Unload RX dmamaps and free mbufs */
2251 WM_LOCK(sc);
2252 wm_rxdrain(sc);
2253 WM_UNLOCK(sc);
2254 /* Must unlock here */
2255
2256 /* Free dmamap. It's the same as the end of the wm_attach() function */
2257 for (i = 0; i < WM_NRXDESC; i++) {
2258 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2259 bus_dmamap_destroy(sc->sc_dmat,
2260 sc->sc_rxsoft[i].rxs_dmamap);
2261 }
2262 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2263 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2264 bus_dmamap_destroy(sc->sc_dmat,
2265 sc->sc_txsoft[i].txs_dmamap);
2266 }
2267 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2268 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2269 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2270 sc->sc_cd_size);
2271 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2272
2273 /* Disestablish the interrupt handler */
2274 if (sc->sc_ih != NULL) {
2275 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2276 sc->sc_ih = NULL;
2277 }
2278
2279 /* Unmap the registers */
2280 if (sc->sc_ss) {
2281 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2282 sc->sc_ss = 0;
2283 }
2284
2285 if (sc->sc_ios) {
2286 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2287 sc->sc_ios = 0;
2288 }
2289
2290 if (sc->sc_txrx_lock)
2291 mutex_obj_free(sc->sc_txrx_lock);
2292
2293 return 0;
2294 }
2295
2296 /*
2297 * wm_tx_offload:
2298 *
2299 * Set up TCP/IP checksumming parameters for the
2300 * specified packet.
2301 */
2302 static int
2303 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2304 uint8_t *fieldsp)
2305 {
2306 struct mbuf *m0 = txs->txs_mbuf;
2307 struct livengood_tcpip_ctxdesc *t;
2308 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2309 uint32_t ipcse;
2310 struct ether_header *eh;
2311 int offset, iphl;
2312 uint8_t fields;
2313
2314 /*
2315 * XXX It would be nice if the mbuf pkthdr had offset
2316 * fields for the protocol headers.
2317 */
2318
2319 eh = mtod(m0, struct ether_header *);
2320 switch (htons(eh->ether_type)) {
2321 case ETHERTYPE_IP:
2322 case ETHERTYPE_IPV6:
2323 offset = ETHER_HDR_LEN;
2324 break;
2325
2326 case ETHERTYPE_VLAN:
2327 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2328 break;
2329
2330 default:
2331 /*
2332 * Don't support this protocol or encapsulation.
2333 */
2334 *fieldsp = 0;
2335 *cmdp = 0;
2336 return 0;
2337 }
2338
2339 if ((m0->m_pkthdr.csum_flags &
2340 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2341 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2342 } else {
2343 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2344 }
2345 ipcse = offset + iphl - 1;
2346
2347 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2348 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2349 seg = 0;
2350 fields = 0;
2351
2352 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2353 int hlen = offset + iphl;
2354 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2355
2356 if (__predict_false(m0->m_len <
2357 (hlen + sizeof(struct tcphdr)))) {
2358 /*
2359 * TCP/IP headers are not in the first mbuf; we need
2360 * to do this the slow and painful way. Let's just
2361 * hope this doesn't happen very often.
2362 */
2363 struct tcphdr th;
2364
2365 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2366
2367 m_copydata(m0, hlen, sizeof(th), &th);
2368 if (v4) {
2369 struct ip ip;
2370
2371 m_copydata(m0, offset, sizeof(ip), &ip);
2372 ip.ip_len = 0;
2373 m_copyback(m0,
2374 offset + offsetof(struct ip, ip_len),
2375 sizeof(ip.ip_len), &ip.ip_len);
2376 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2377 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2378 } else {
2379 struct ip6_hdr ip6;
2380
2381 m_copydata(m0, offset, sizeof(ip6), &ip6);
2382 ip6.ip6_plen = 0;
2383 m_copyback(m0,
2384 offset + offsetof(struct ip6_hdr, ip6_plen),
2385 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2386 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2387 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2388 }
2389 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2390 sizeof(th.th_sum), &th.th_sum);
2391
2392 hlen += th.th_off << 2;
2393 } else {
2394 /*
2395 * TCP/IP headers are in the first mbuf; we can do
2396 * this the easy way.
2397 */
2398 struct tcphdr *th;
2399
2400 if (v4) {
2401 struct ip *ip =
2402 (void *)(mtod(m0, char *) + offset);
2403 th = (void *)(mtod(m0, char *) + hlen);
2404
2405 ip->ip_len = 0;
2406 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2407 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2408 } else {
2409 struct ip6_hdr *ip6 =
2410 (void *)(mtod(m0, char *) + offset);
2411 th = (void *)(mtod(m0, char *) + hlen);
2412
2413 ip6->ip6_plen = 0;
2414 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2415 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2416 }
2417 hlen += th->th_off << 2;
2418 }
2419
2420 if (v4) {
2421 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2422 cmdlen |= WTX_TCPIP_CMD_IP;
2423 } else {
2424 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2425 ipcse = 0;
2426 }
2427 cmd |= WTX_TCPIP_CMD_TSE;
2428 cmdlen |= WTX_TCPIP_CMD_TSE |
2429 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2430 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2431 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2432 }
2433
2434 /*
2435 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2436 * offload feature, if we load the context descriptor, we
2437 * MUST provide valid values for IPCSS and TUCSS fields.
2438 */
2439
2440 ipcs = WTX_TCPIP_IPCSS(offset) |
2441 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2442 WTX_TCPIP_IPCSE(ipcse);
2443 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2444 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2445 fields |= WTX_IXSM;
2446 }
2447
2448 offset += iphl;
2449
2450 if (m0->m_pkthdr.csum_flags &
2451 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2452 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2453 fields |= WTX_TXSM;
2454 tucs = WTX_TCPIP_TUCSS(offset) |
2455 WTX_TCPIP_TUCSO(offset +
2456 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2457 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2458 } else if ((m0->m_pkthdr.csum_flags &
2459 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2460 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2461 fields |= WTX_TXSM;
2462 tucs = WTX_TCPIP_TUCSS(offset) |
2463 WTX_TCPIP_TUCSO(offset +
2464 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2465 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2466 } else {
2467 /* Just initialize it to a valid TCP context. */
2468 tucs = WTX_TCPIP_TUCSS(offset) |
2469 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2470 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2471 }
2472
2473 /* Fill in the context descriptor. */
2474 t = (struct livengood_tcpip_ctxdesc *)
2475 &sc->sc_txdescs[sc->sc_txnext];
2476 t->tcpip_ipcs = htole32(ipcs);
2477 t->tcpip_tucs = htole32(tucs);
2478 t->tcpip_cmdlen = htole32(cmdlen);
2479 t->tcpip_seg = htole32(seg);
2480 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2481
2482 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2483 txs->txs_ndesc++;
2484
2485 *cmdp = cmd;
2486 *fieldsp = fields;
2487
2488 return 0;
2489 }
2490
2491 static void
2492 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2493 {
2494 struct mbuf *m;
2495 int i;
2496
2497 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2498 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2499 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2500 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2501 m->m_data, m->m_len, m->m_flags);
2502 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2503 i, i == 1 ? "" : "s");
2504 }
2505
2506 /*
2507 * wm_82547_txfifo_stall:
2508 *
2509 * Callout used to wait for the 82547 Tx FIFO to drain,
2510 * reset the FIFO pointers, and restart packet transmission.
2511 */
2512 static void
2513 wm_82547_txfifo_stall(void *arg)
2514 {
2515 struct wm_softc *sc = arg;
2516 #ifndef WM_MPSAFE
2517 int s;
2518
2519 s = splnet();
2520 #endif
2521 WM_LOCK(sc);
2522
2523 if (sc->sc_stopping)
2524 goto out;
2525
2526 if (sc->sc_txfifo_stall) {
2527 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2528 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2529 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2530 /*
2531 * Packets have drained. Stop transmitter, reset
2532 * FIFO pointers, restart transmitter, and kick
2533 * the packet queue.
2534 */
2535 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2536 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2537 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2538 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2539 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2540 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2541 CSR_WRITE(sc, WMREG_TCTL, tctl);
2542 CSR_WRITE_FLUSH(sc);
2543
2544 sc->sc_txfifo_head = 0;
2545 sc->sc_txfifo_stall = 0;
2546 wm_start_locked(&sc->sc_ethercom.ec_if);
2547 } else {
2548 /*
2549 * Still waiting for packets to drain; try again in
2550 * another tick.
2551 */
2552 callout_schedule(&sc->sc_txfifo_ch, 1);
2553 }
2554 }
2555
2556 out:
2557 WM_UNLOCK(sc);
2558 #ifndef WM_MPSAFE
2559 splx(s);
2560 #endif
2561 }
2562
2563 static void
2564 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2565 {
2566 uint32_t reg;
2567
2568 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2569
2570 if (on != 0)
2571 reg |= EXTCNFCTR_GATE_PHY_CFG;
2572 else
2573 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2574
2575 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2576 }
2577
2578 /*
2579 * wm_82547_txfifo_bugchk:
2580 *
2581 * Check for bug condition in the 82547 Tx FIFO. We need to
2582 * prevent enqueueing a packet that would wrap around the end
2583 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2584 *
2585 * We do this by checking the amount of space before the end
2586 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2587 * the Tx FIFO, wait for all remaining packets to drain, reset
2588 * the internal FIFO pointers to the beginning, and restart
2589 * transmission on the interface.
2590 */
2591 #define WM_FIFO_HDR 0x10
2592 #define WM_82547_PAD_LEN 0x3e0
2593 static int
2594 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2595 {
2596 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2597 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2598
2599 /* Just return if already stalled. */
2600 if (sc->sc_txfifo_stall)
2601 return 1;
2602
2603 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2604 /* Stall only occurs in half-duplex mode. */
2605 goto send_packet;
2606 }
2607
2608 if (len >= WM_82547_PAD_LEN + space) {
2609 sc->sc_txfifo_stall = 1;
2610 callout_schedule(&sc->sc_txfifo_ch, 1);
2611 return 1;
2612 }
2613
2614 send_packet:
2615 sc->sc_txfifo_head += len;
2616 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2617 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2618
2619 return 0;
2620 }
2621
2622 /*
2623 * wm_start: [ifnet interface function]
2624 *
2625 * Start packet transmission on the interface.
2626 */
2627 static void
2628 wm_start(struct ifnet *ifp)
2629 {
2630 struct wm_softc *sc = ifp->if_softc;
2631
2632 WM_LOCK(sc);
2633 if (!sc->sc_stopping)
2634 wm_start_locked(ifp);
2635 WM_UNLOCK(sc);
2636 }
2637
2638 static void
2639 wm_start_locked(struct ifnet *ifp)
2640 {
2641 struct wm_softc *sc = ifp->if_softc;
2642 struct mbuf *m0;
2643 struct m_tag *mtag;
2644 struct wm_txsoft *txs;
2645 bus_dmamap_t dmamap;
2646 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2647 bus_addr_t curaddr;
2648 bus_size_t seglen, curlen;
2649 uint32_t cksumcmd;
2650 uint8_t cksumfields;
2651
2652 KASSERT(WM_LOCKED(sc));
2653
2654 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2655 return;
2656
2657 /*
2658 * Remember the previous number of free descriptors.
2659 */
2660 ofree = sc->sc_txfree;
2661
2662 /*
2663 * Loop through the send queue, setting up transmit descriptors
2664 * until we drain the queue, or use up all available transmit
2665 * descriptors.
2666 */
2667 for (;;) {
2668 m0 = NULL;
2669
2670 /* Get a work queue entry. */
2671 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2672 wm_txintr(sc);
2673 if (sc->sc_txsfree == 0) {
2674 DPRINTF(WM_DEBUG_TX,
2675 ("%s: TX: no free job descriptors\n",
2676 device_xname(sc->sc_dev)));
2677 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2678 break;
2679 }
2680 }
2681
2682 /* Grab a packet off the queue. */
2683 IFQ_DEQUEUE(&ifp->if_snd, m0);
2684 if (m0 == NULL)
2685 break;
2686
2687 DPRINTF(WM_DEBUG_TX,
2688 ("%s: TX: have packet to transmit: %p\n",
2689 device_xname(sc->sc_dev), m0));
2690
2691 txs = &sc->sc_txsoft[sc->sc_txsnext];
2692 dmamap = txs->txs_dmamap;
2693
2694 use_tso = (m0->m_pkthdr.csum_flags &
2695 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2696
2697 /*
2698 * So says the Linux driver:
2699 * The controller does a simple calculation to make sure
2700 * there is enough room in the FIFO before initiating the
2701 * DMA for each buffer. The calc is:
2702 * 4 = ceil(buffer len / MSS)
2703 * To make sure we don't overrun the FIFO, adjust the max
2704 * buffer len if the MSS drops.
2705 */
2706 dmamap->dm_maxsegsz =
2707 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2708 ? m0->m_pkthdr.segsz << 2
2709 : WTX_MAX_LEN;
2710
2711 /*
2712 * Load the DMA map. If this fails, the packet either
2713 * didn't fit in the allotted number of segments, or we
2714 * were short on resources. For the too-many-segments
2715 * case, we simply report an error and drop the packet,
2716 * since we can't sanely copy a jumbo packet to a single
2717 * buffer.
2718 */
2719 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2720 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2721 if (error) {
2722 if (error == EFBIG) {
2723 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2724 log(LOG_ERR, "%s: Tx packet consumes too many "
2725 "DMA segments, dropping...\n",
2726 device_xname(sc->sc_dev));
2727 wm_dump_mbuf_chain(sc, m0);
2728 m_freem(m0);
2729 continue;
2730 }
2731 /*
2732 * Short on resources, just stop for now.
2733 */
2734 DPRINTF(WM_DEBUG_TX,
2735 ("%s: TX: dmamap load failed: %d\n",
2736 device_xname(sc->sc_dev), error));
2737 break;
2738 }
2739
2740 segs_needed = dmamap->dm_nsegs;
2741 if (use_tso) {
2742 /* For sentinel descriptor; see below. */
2743 segs_needed++;
2744 }
2745
2746 /*
2747 * Ensure we have enough descriptors free to describe
2748 * the packet. Note, we always reserve one descriptor
2749 * at the end of the ring due to the semantics of the
2750 * TDT register, plus one more in the event we need
2751 * to load offload context.
2752 */
2753 if (segs_needed > sc->sc_txfree - 2) {
2754 /*
2755 * Not enough free descriptors to transmit this
2756 * packet. We haven't committed anything yet,
2757 * so just unload the DMA map, put the packet
2758 * pack on the queue, and punt. Notify the upper
2759 * layer that there are no more slots left.
2760 */
2761 DPRINTF(WM_DEBUG_TX,
2762 ("%s: TX: need %d (%d) descriptors, have %d\n",
2763 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2764 segs_needed, sc->sc_txfree - 1));
2765 ifp->if_flags |= IFF_OACTIVE;
2766 bus_dmamap_unload(sc->sc_dmat, dmamap);
2767 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2768 break;
2769 }
2770
2771 /*
2772 * Check for 82547 Tx FIFO bug. We need to do this
2773 * once we know we can transmit the packet, since we
2774 * do some internal FIFO space accounting here.
2775 */
2776 if (sc->sc_type == WM_T_82547 &&
2777 wm_82547_txfifo_bugchk(sc, m0)) {
2778 DPRINTF(WM_DEBUG_TX,
2779 ("%s: TX: 82547 Tx FIFO bug detected\n",
2780 device_xname(sc->sc_dev)));
2781 ifp->if_flags |= IFF_OACTIVE;
2782 bus_dmamap_unload(sc->sc_dmat, dmamap);
2783 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2784 break;
2785 }
2786
2787 /*
2788 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2789 */
2790
2791 DPRINTF(WM_DEBUG_TX,
2792 ("%s: TX: packet has %d (%d) DMA segments\n",
2793 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2794
2795 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2796
2797 /*
2798 * Store a pointer to the packet so that we can free it
2799 * later.
2800 *
2801 * Initially, we consider the number of descriptors the
2802 * packet uses the number of DMA segments. This may be
2803 * incremented by 1 if we do checksum offload (a descriptor
2804 * is used to set the checksum context).
2805 */
2806 txs->txs_mbuf = m0;
2807 txs->txs_firstdesc = sc->sc_txnext;
2808 txs->txs_ndesc = segs_needed;
2809
2810 /* Set up offload parameters for this packet. */
2811 if (m0->m_pkthdr.csum_flags &
2812 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2813 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2814 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2815 if (wm_tx_offload(sc, txs, &cksumcmd,
2816 &cksumfields) != 0) {
2817 /* Error message already displayed. */
2818 bus_dmamap_unload(sc->sc_dmat, dmamap);
2819 continue;
2820 }
2821 } else {
2822 cksumcmd = 0;
2823 cksumfields = 0;
2824 }
2825
2826 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2827
2828 /* Sync the DMA map. */
2829 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2830 BUS_DMASYNC_PREWRITE);
2831
2832 /*
2833 * Initialize the transmit descriptor.
2834 */
2835 for (nexttx = sc->sc_txnext, seg = 0;
2836 seg < dmamap->dm_nsegs; seg++) {
2837 for (seglen = dmamap->dm_segs[seg].ds_len,
2838 curaddr = dmamap->dm_segs[seg].ds_addr;
2839 seglen != 0;
2840 curaddr += curlen, seglen -= curlen,
2841 nexttx = WM_NEXTTX(sc, nexttx)) {
2842 curlen = seglen;
2843
2844 /*
2845 * So says the Linux driver:
2846 * Work around for premature descriptor
2847 * write-backs in TSO mode. Append a
2848 * 4-byte sentinel descriptor.
2849 */
2850 if (use_tso &&
2851 seg == dmamap->dm_nsegs - 1 &&
2852 curlen > 8)
2853 curlen -= 4;
2854
2855 wm_set_dma_addr(
2856 &sc->sc_txdescs[nexttx].wtx_addr,
2857 curaddr);
2858 sc->sc_txdescs[nexttx].wtx_cmdlen =
2859 htole32(cksumcmd | curlen);
2860 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2861 0;
2862 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2863 cksumfields;
2864 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2865 lasttx = nexttx;
2866
2867 DPRINTF(WM_DEBUG_TX,
2868 ("%s: TX: desc %d: low %#" PRIx64 ", "
2869 "len %#04zx\n",
2870 device_xname(sc->sc_dev), nexttx,
2871 (uint64_t)curaddr, curlen));
2872 }
2873 }
2874
2875 KASSERT(lasttx != -1);
2876
2877 /*
2878 * Set up the command byte on the last descriptor of
2879 * the packet. If we're in the interrupt delay window,
2880 * delay the interrupt.
2881 */
2882 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2883 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2884
2885 /*
2886 * If VLANs are enabled and the packet has a VLAN tag, set
2887 * up the descriptor to encapsulate the packet for us.
2888 *
2889 * This is only valid on the last descriptor of the packet.
2890 */
2891 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2892 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2893 htole32(WTX_CMD_VLE);
2894 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2895 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2896 }
2897
2898 txs->txs_lastdesc = lasttx;
2899
2900 DPRINTF(WM_DEBUG_TX,
2901 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2902 device_xname(sc->sc_dev),
2903 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2904
2905 /* Sync the descriptors we're using. */
2906 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2907 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2908
2909 /* Give the packet to the chip. */
2910 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2911
2912 DPRINTF(WM_DEBUG_TX,
2913 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2914
2915 DPRINTF(WM_DEBUG_TX,
2916 ("%s: TX: finished transmitting packet, job %d\n",
2917 device_xname(sc->sc_dev), sc->sc_txsnext));
2918
2919 /* Advance the tx pointer. */
2920 sc->sc_txfree -= txs->txs_ndesc;
2921 sc->sc_txnext = nexttx;
2922
2923 sc->sc_txsfree--;
2924 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2925
2926 /* Pass the packet to any BPF listeners. */
2927 bpf_mtap(ifp, m0);
2928 }
2929
2930 if (m0 != NULL) {
2931 ifp->if_flags |= IFF_OACTIVE;
2932 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2933 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
2934 m_freem(m0);
2935 }
2936
2937 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2938 /* No more slots; notify upper layer. */
2939 ifp->if_flags |= IFF_OACTIVE;
2940 }
2941
2942 if (sc->sc_txfree != ofree) {
2943 /* Set a watchdog timer in case the chip flakes out. */
2944 ifp->if_timer = 5;
2945 }
2946 }
2947
2948 /*
2949 * wm_nq_tx_offload:
2950 *
2951 * Set up TCP/IP checksumming parameters for the
2952 * specified packet, for NEWQUEUE devices
2953 */
2954 static int
2955 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2956 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2957 {
2958 struct mbuf *m0 = txs->txs_mbuf;
2959 struct m_tag *mtag;
2960 uint32_t vl_len, mssidx, cmdc;
2961 struct ether_header *eh;
2962 int offset, iphl;
2963
2964 /*
2965 * XXX It would be nice if the mbuf pkthdr had offset
2966 * fields for the protocol headers.
2967 */
2968 *cmdlenp = 0;
2969 *fieldsp = 0;
2970
2971 eh = mtod(m0, struct ether_header *);
2972 switch (htons(eh->ether_type)) {
2973 case ETHERTYPE_IP:
2974 case ETHERTYPE_IPV6:
2975 offset = ETHER_HDR_LEN;
2976 break;
2977
2978 case ETHERTYPE_VLAN:
2979 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2980 break;
2981
2982 default:
2983 /*
2984 * Don't support this protocol or encapsulation.
2985 */
2986 *do_csum = false;
2987 return 0;
2988 }
2989 *do_csum = true;
2990 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2991 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2992
2993 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2994 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2995
2996 if ((m0->m_pkthdr.csum_flags &
2997 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2998 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2999 } else {
3000 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
3001 }
3002 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
3003 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
3004
3005 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
3006 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
3007 << NQTXC_VLLEN_VLAN_SHIFT);
3008 *cmdlenp |= NQTX_CMD_VLE;
3009 }
3010
3011 mssidx = 0;
3012
3013 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
3014 int hlen = offset + iphl;
3015 int tcp_hlen;
3016 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
3017
3018 if (__predict_false(m0->m_len <
3019 (hlen + sizeof(struct tcphdr)))) {
3020 /*
3021 * TCP/IP headers are not in the first mbuf; we need
3022 * to do this the slow and painful way. Let's just
3023 * hope this doesn't happen very often.
3024 */
3025 struct tcphdr th;
3026
3027 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
3028
3029 m_copydata(m0, hlen, sizeof(th), &th);
3030 if (v4) {
3031 struct ip ip;
3032
3033 m_copydata(m0, offset, sizeof(ip), &ip);
3034 ip.ip_len = 0;
3035 m_copyback(m0,
3036 offset + offsetof(struct ip, ip_len),
3037 sizeof(ip.ip_len), &ip.ip_len);
3038 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
3039 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
3040 } else {
3041 struct ip6_hdr ip6;
3042
3043 m_copydata(m0, offset, sizeof(ip6), &ip6);
3044 ip6.ip6_plen = 0;
3045 m_copyback(m0,
3046 offset + offsetof(struct ip6_hdr, ip6_plen),
3047 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
3048 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
3049 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
3050 }
3051 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
3052 sizeof(th.th_sum), &th.th_sum);
3053
3054 tcp_hlen = th.th_off << 2;
3055 } else {
3056 /*
3057 * TCP/IP headers are in the first mbuf; we can do
3058 * this the easy way.
3059 */
3060 struct tcphdr *th;
3061
3062 if (v4) {
3063 struct ip *ip =
3064 (void *)(mtod(m0, char *) + offset);
3065 th = (void *)(mtod(m0, char *) + hlen);
3066
3067 ip->ip_len = 0;
3068 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
3069 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3070 } else {
3071 struct ip6_hdr *ip6 =
3072 (void *)(mtod(m0, char *) + offset);
3073 th = (void *)(mtod(m0, char *) + hlen);
3074
3075 ip6->ip6_plen = 0;
3076 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
3077 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
3078 }
3079 tcp_hlen = th->th_off << 2;
3080 }
3081 hlen += tcp_hlen;
3082 *cmdlenp |= NQTX_CMD_TSE;
3083
3084 if (v4) {
3085 WM_EVCNT_INCR(&sc->sc_ev_txtso);
3086 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
3087 } else {
3088 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
3089 *fieldsp |= NQTXD_FIELDS_TUXSM;
3090 }
3091 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
3092 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
3093 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
3094 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
3095 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
3096 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
3097 } else {
3098 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
3099 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
3100 }
3101
3102 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3103 *fieldsp |= NQTXD_FIELDS_IXSM;
3104 cmdc |= NQTXC_CMD_IP4;
3105 }
3106
3107 if (m0->m_pkthdr.csum_flags &
3108 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3109 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
3110 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3111 cmdc |= NQTXC_CMD_TCP;
3112 } else {
3113 cmdc |= NQTXC_CMD_UDP;
3114 }
3115 cmdc |= NQTXC_CMD_IP4;
3116 *fieldsp |= NQTXD_FIELDS_TUXSM;
3117 }
3118 if (m0->m_pkthdr.csum_flags &
3119 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3120 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3121 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3122 cmdc |= NQTXC_CMD_TCP;
3123 } else {
3124 cmdc |= NQTXC_CMD_UDP;
3125 }
3126 cmdc |= NQTXC_CMD_IP6;
3127 *fieldsp |= NQTXD_FIELDS_TUXSM;
3128 }
3129
3130 /* Fill in the context descriptor. */
3131 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3132 htole32(vl_len);
3133 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3134 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3135 htole32(cmdc);
3136 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3137 htole32(mssidx);
3138 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3139 DPRINTF(WM_DEBUG_TX,
3140 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3141 sc->sc_txnext, 0, vl_len));
3142 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3143 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3144 txs->txs_ndesc++;
3145 return 0;
3146 }
3147
3148 /*
3149 * wm_nq_start: [ifnet interface function]
3150 *
3151 * Start packet transmission on the interface for NEWQUEUE devices
3152 */
3153 static void
3154 wm_nq_start(struct ifnet *ifp)
3155 {
3156 struct wm_softc *sc = ifp->if_softc;
3157
3158 WM_LOCK(sc);
3159 if (!sc->sc_stopping)
3160 wm_nq_start_locked(ifp);
3161 WM_UNLOCK(sc);
3162 }
3163
3164 static void
3165 wm_nq_start_locked(struct ifnet *ifp)
3166 {
3167 struct wm_softc *sc = ifp->if_softc;
3168 struct mbuf *m0;
3169 struct m_tag *mtag;
3170 struct wm_txsoft *txs;
3171 bus_dmamap_t dmamap;
3172 int error, nexttx, lasttx = -1, seg, segs_needed;
3173 bool do_csum, sent;
3174
3175 KASSERT(WM_LOCKED(sc));
3176
3177 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3178 return;
3179
3180 sent = false;
3181
3182 /*
3183 * Loop through the send queue, setting up transmit descriptors
3184 * until we drain the queue, or use up all available transmit
3185 * descriptors.
3186 */
3187 for (;;) {
3188 m0 = NULL;
3189
3190 /* Get a work queue entry. */
3191 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3192 wm_txintr(sc);
3193 if (sc->sc_txsfree == 0) {
3194 DPRINTF(WM_DEBUG_TX,
3195 ("%s: TX: no free job descriptors\n",
3196 device_xname(sc->sc_dev)));
3197 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3198 break;
3199 }
3200 }
3201
3202 /* Grab a packet off the queue. */
3203 IFQ_DEQUEUE(&ifp->if_snd, m0);
3204 if (m0 == NULL)
3205 break;
3206
3207 DPRINTF(WM_DEBUG_TX,
3208 ("%s: TX: have packet to transmit: %p\n",
3209 device_xname(sc->sc_dev), m0));
3210
3211 txs = &sc->sc_txsoft[sc->sc_txsnext];
3212 dmamap = txs->txs_dmamap;
3213
3214 /*
3215 * Load the DMA map. If this fails, the packet either
3216 * didn't fit in the allotted number of segments, or we
3217 * were short on resources. For the too-many-segments
3218 * case, we simply report an error and drop the packet,
3219 * since we can't sanely copy a jumbo packet to a single
3220 * buffer.
3221 */
3222 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3223 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3224 if (error) {
3225 if (error == EFBIG) {
3226 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3227 log(LOG_ERR, "%s: Tx packet consumes too many "
3228 "DMA segments, dropping...\n",
3229 device_xname(sc->sc_dev));
3230 wm_dump_mbuf_chain(sc, m0);
3231 m_freem(m0);
3232 continue;
3233 }
3234 /*
3235 * Short on resources, just stop for now.
3236 */
3237 DPRINTF(WM_DEBUG_TX,
3238 ("%s: TX: dmamap load failed: %d\n",
3239 device_xname(sc->sc_dev), error));
3240 break;
3241 }
3242
3243 segs_needed = dmamap->dm_nsegs;
3244
3245 /*
3246 * Ensure we have enough descriptors free to describe
3247 * the packet. Note, we always reserve one descriptor
3248 * at the end of the ring due to the semantics of the
3249 * TDT register, plus one more in the event we need
3250 * to load offload context.
3251 */
3252 if (segs_needed > sc->sc_txfree - 2) {
3253 /*
3254 * Not enough free descriptors to transmit this
3255 * packet. We haven't committed anything yet,
3256 * so just unload the DMA map, put the packet
3257 * pack on the queue, and punt. Notify the upper
3258 * layer that there are no more slots left.
3259 */
3260 DPRINTF(WM_DEBUG_TX,
3261 ("%s: TX: need %d (%d) descriptors, have %d\n",
3262 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3263 segs_needed, sc->sc_txfree - 1));
3264 ifp->if_flags |= IFF_OACTIVE;
3265 bus_dmamap_unload(sc->sc_dmat, dmamap);
3266 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3267 break;
3268 }
3269
3270 /*
3271 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3272 */
3273
3274 DPRINTF(WM_DEBUG_TX,
3275 ("%s: TX: packet has %d (%d) DMA segments\n",
3276 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3277
3278 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3279
3280 /*
3281 * Store a pointer to the packet so that we can free it
3282 * later.
3283 *
3284 * Initially, we consider the number of descriptors the
3285 * packet uses the number of DMA segments. This may be
3286 * incremented by 1 if we do checksum offload (a descriptor
3287 * is used to set the checksum context).
3288 */
3289 txs->txs_mbuf = m0;
3290 txs->txs_firstdesc = sc->sc_txnext;
3291 txs->txs_ndesc = segs_needed;
3292
3293 /* Set up offload parameters for this packet. */
3294 uint32_t cmdlen, fields, dcmdlen;
3295 if (m0->m_pkthdr.csum_flags &
3296 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3297 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3298 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3299 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3300 &do_csum) != 0) {
3301 /* Error message already displayed. */
3302 bus_dmamap_unload(sc->sc_dmat, dmamap);
3303 continue;
3304 }
3305 } else {
3306 do_csum = false;
3307 cmdlen = 0;
3308 fields = 0;
3309 }
3310
3311 /* Sync the DMA map. */
3312 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3313 BUS_DMASYNC_PREWRITE);
3314
3315 /*
3316 * Initialize the first transmit descriptor.
3317 */
3318 nexttx = sc->sc_txnext;
3319 if (!do_csum) {
3320 /* setup a legacy descriptor */
3321 wm_set_dma_addr(
3322 &sc->sc_txdescs[nexttx].wtx_addr,
3323 dmamap->dm_segs[0].ds_addr);
3324 sc->sc_txdescs[nexttx].wtx_cmdlen =
3325 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3326 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3327 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3328 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3329 NULL) {
3330 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3331 htole32(WTX_CMD_VLE);
3332 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3333 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3334 } else {
3335 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3336 }
3337 dcmdlen = 0;
3338 } else {
3339 /* setup an advanced data descriptor */
3340 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3341 htole64(dmamap->dm_segs[0].ds_addr);
3342 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3343 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3344 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3345 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3346 htole32(fields);
3347 DPRINTF(WM_DEBUG_TX,
3348 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3349 device_xname(sc->sc_dev), nexttx,
3350 (uint64_t)dmamap->dm_segs[0].ds_addr));
3351 DPRINTF(WM_DEBUG_TX,
3352 ("\t 0x%08x%08x\n", fields,
3353 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3354 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3355 }
3356
3357 lasttx = nexttx;
3358 nexttx = WM_NEXTTX(sc, nexttx);
3359 /*
3360 * fill in the next descriptors. legacy or adcanced format
3361 * is the same here
3362 */
3363 for (seg = 1; seg < dmamap->dm_nsegs;
3364 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3365 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3366 htole64(dmamap->dm_segs[seg].ds_addr);
3367 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3368 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3369 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3370 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3371 lasttx = nexttx;
3372
3373 DPRINTF(WM_DEBUG_TX,
3374 ("%s: TX: desc %d: %#" PRIx64 ", "
3375 "len %#04zx\n",
3376 device_xname(sc->sc_dev), nexttx,
3377 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3378 dmamap->dm_segs[seg].ds_len));
3379 }
3380
3381 KASSERT(lasttx != -1);
3382
3383 /*
3384 * Set up the command byte on the last descriptor of
3385 * the packet. If we're in the interrupt delay window,
3386 * delay the interrupt.
3387 */
3388 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3389 (NQTX_CMD_EOP | NQTX_CMD_RS));
3390 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3391 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3392
3393 txs->txs_lastdesc = lasttx;
3394
3395 DPRINTF(WM_DEBUG_TX,
3396 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3397 device_xname(sc->sc_dev),
3398 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3399
3400 /* Sync the descriptors we're using. */
3401 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3402 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3403
3404 /* Give the packet to the chip. */
3405 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3406 sent = true;
3407
3408 DPRINTF(WM_DEBUG_TX,
3409 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3410
3411 DPRINTF(WM_DEBUG_TX,
3412 ("%s: TX: finished transmitting packet, job %d\n",
3413 device_xname(sc->sc_dev), sc->sc_txsnext));
3414
3415 /* Advance the tx pointer. */
3416 sc->sc_txfree -= txs->txs_ndesc;
3417 sc->sc_txnext = nexttx;
3418
3419 sc->sc_txsfree--;
3420 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3421
3422 /* Pass the packet to any BPF listeners. */
3423 bpf_mtap(ifp, m0);
3424 }
3425
3426 if (m0 != NULL) {
3427 ifp->if_flags |= IFF_OACTIVE;
3428 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3429 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
3430 m_freem(m0);
3431 }
3432
3433 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3434 /* No more slots; notify upper layer. */
3435 ifp->if_flags |= IFF_OACTIVE;
3436 }
3437
3438 if (sent) {
3439 /* Set a watchdog timer in case the chip flakes out. */
3440 ifp->if_timer = 5;
3441 }
3442 }
3443
3444 /*
3445 * wm_watchdog: [ifnet interface function]
3446 *
3447 * Watchdog timer handler.
3448 */
3449 static void
3450 wm_watchdog(struct ifnet *ifp)
3451 {
3452 struct wm_softc *sc = ifp->if_softc;
3453
3454 /*
3455 * Since we're using delayed interrupts, sweep up
3456 * before we report an error.
3457 */
3458 WM_LOCK(sc);
3459 wm_txintr(sc);
3460 WM_UNLOCK(sc);
3461
3462 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3463 #ifdef WM_DEBUG
3464 int i, j;
3465 struct wm_txsoft *txs;
3466 #endif
3467 log(LOG_ERR,
3468 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3469 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3470 sc->sc_txnext);
3471 ifp->if_oerrors++;
3472 #ifdef WM_DEBUG
3473 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3474 i = WM_NEXTTXS(sc, i)) {
3475 txs = &sc->sc_txsoft[i];
3476 printf("txs %d tx %d -> %d\n",
3477 i, txs->txs_firstdesc, txs->txs_lastdesc);
3478 for (j = txs->txs_firstdesc; ;
3479 j = WM_NEXTTX(sc, j)) {
3480 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3481 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3482 printf("\t %#08x%08x\n",
3483 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3484 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3485 if (j == txs->txs_lastdesc)
3486 break;
3487 }
3488 }
3489 #endif
3490 /* Reset the interface. */
3491 (void) wm_init(ifp);
3492 }
3493
3494 /* Try to get more packets going. */
3495 ifp->if_start(ifp);
3496 }
3497
3498 static int
3499 wm_ifflags_cb(struct ethercom *ec)
3500 {
3501 struct ifnet *ifp = &ec->ec_if;
3502 struct wm_softc *sc = ifp->if_softc;
3503 int change = ifp->if_flags ^ sc->sc_if_flags;
3504 int rc = 0;
3505
3506 WM_LOCK(sc);
3507
3508 if (change != 0)
3509 sc->sc_if_flags = ifp->if_flags;
3510
3511 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
3512 rc = ENETRESET;
3513 goto out;
3514 }
3515
3516 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3517 wm_set_filter(sc);
3518
3519 wm_set_vlan(sc);
3520
3521 out:
3522 WM_UNLOCK(sc);
3523
3524 return rc;
3525 }
3526
3527 /*
3528 * wm_ioctl: [ifnet interface function]
3529 *
3530 * Handle control requests from the operator.
3531 */
3532 static int
3533 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3534 {
3535 struct wm_softc *sc = ifp->if_softc;
3536 struct ifreq *ifr = (struct ifreq *) data;
3537 struct ifaddr *ifa = (struct ifaddr *)data;
3538 struct sockaddr_dl *sdl;
3539 int s, error;
3540
3541 #ifndef WM_MPSAFE
3542 s = splnet();
3543 #endif
3544 WM_LOCK(sc);
3545
3546 switch (cmd) {
3547 case SIOCSIFMEDIA:
3548 case SIOCGIFMEDIA:
3549 /* Flow control requires full-duplex mode. */
3550 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3551 (ifr->ifr_media & IFM_FDX) == 0)
3552 ifr->ifr_media &= ~IFM_ETH_FMASK;
3553 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3554 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3555 /* We can do both TXPAUSE and RXPAUSE. */
3556 ifr->ifr_media |=
3557 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3558 }
3559 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3560 }
3561 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3562 break;
3563 case SIOCINITIFADDR:
3564 if (ifa->ifa_addr->sa_family == AF_LINK) {
3565 sdl = satosdl(ifp->if_dl->ifa_addr);
3566 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3567 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3568 /* unicast address is first multicast entry */
3569 wm_set_filter(sc);
3570 error = 0;
3571 break;
3572 }
3573 /*FALLTHROUGH*/
3574 default:
3575 WM_UNLOCK(sc);
3576 #ifdef WM_MPSAFE
3577 s = splnet();
3578 #endif
3579 /* It may call wm_start, so unlock here */
3580 error = ether_ioctl(ifp, cmd, data);
3581 #ifdef WM_MPSAFE
3582 splx(s);
3583 #endif
3584 WM_LOCK(sc);
3585
3586 if (error != ENETRESET)
3587 break;
3588
3589 error = 0;
3590
3591 if (cmd == SIOCSIFCAP) {
3592 WM_UNLOCK(sc);
3593 error = (*ifp->if_init)(ifp);
3594 WM_LOCK(sc);
3595 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3596 ;
3597 else if (ifp->if_flags & IFF_RUNNING) {
3598 /*
3599 * Multicast list has changed; set the hardware filter
3600 * accordingly.
3601 */
3602 wm_set_filter(sc);
3603 }
3604 break;
3605 }
3606
3607 WM_UNLOCK(sc);
3608
3609 /* Try to get more packets going. */
3610 ifp->if_start(ifp);
3611
3612 #ifndef WM_MPSAFE
3613 splx(s);
3614 #endif
3615 return error;
3616 }
3617
3618 /*
3619 * wm_intr:
3620 *
3621 * Interrupt service routine.
3622 */
3623 static int
3624 wm_intr(void *arg)
3625 {
3626 struct wm_softc *sc = arg;
3627 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3628 uint32_t icr;
3629 int handled = 0;
3630
3631 while (1 /* CONSTCOND */) {
3632 icr = CSR_READ(sc, WMREG_ICR);
3633 if ((icr & sc->sc_icr) == 0)
3634 break;
3635 rnd_add_uint32(&sc->rnd_source, icr);
3636
3637 WM_LOCK(sc);
3638
3639 if (sc->sc_stopping) {
3640 WM_UNLOCK(sc);
3641 break;
3642 }
3643
3644 handled = 1;
3645
3646 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3647 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3648 DPRINTF(WM_DEBUG_RX,
3649 ("%s: RX: got Rx intr 0x%08x\n",
3650 device_xname(sc->sc_dev),
3651 icr & (ICR_RXDMT0|ICR_RXT0)));
3652 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3653 }
3654 #endif
3655 wm_rxintr(sc);
3656
3657 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3658 if (icr & ICR_TXDW) {
3659 DPRINTF(WM_DEBUG_TX,
3660 ("%s: TX: got TXDW interrupt\n",
3661 device_xname(sc->sc_dev)));
3662 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3663 }
3664 #endif
3665 wm_txintr(sc);
3666
3667 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3668 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3669 wm_linkintr(sc, icr);
3670 }
3671
3672 WM_UNLOCK(sc);
3673
3674 if (icr & ICR_RXO) {
3675 #if defined(WM_DEBUG)
3676 log(LOG_WARNING, "%s: Receive overrun\n",
3677 device_xname(sc->sc_dev));
3678 #endif /* defined(WM_DEBUG) */
3679 }
3680 }
3681
3682 if (handled) {
3683 /* Try to get more packets going. */
3684 ifp->if_start(ifp);
3685 }
3686
3687 return handled;
3688 }
3689
3690 /*
3691 * wm_txintr:
3692 *
3693 * Helper; handle transmit interrupts.
3694 */
3695 static void
3696 wm_txintr(struct wm_softc *sc)
3697 {
3698 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3699 struct wm_txsoft *txs;
3700 uint8_t status;
3701 int i;
3702
3703 if (sc->sc_stopping)
3704 return;
3705
3706 ifp->if_flags &= ~IFF_OACTIVE;
3707
3708 /*
3709 * Go through the Tx list and free mbufs for those
3710 * frames which have been transmitted.
3711 */
3712 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3713 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3714 txs = &sc->sc_txsoft[i];
3715
3716 DPRINTF(WM_DEBUG_TX,
3717 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3718
3719 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3720 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3721
3722 status =
3723 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3724 if ((status & WTX_ST_DD) == 0) {
3725 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3726 BUS_DMASYNC_PREREAD);
3727 break;
3728 }
3729
3730 DPRINTF(WM_DEBUG_TX,
3731 ("%s: TX: job %d done: descs %d..%d\n",
3732 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3733 txs->txs_lastdesc));
3734
3735 /*
3736 * XXX We should probably be using the statistics
3737 * XXX registers, but I don't know if they exist
3738 * XXX on chips before the i82544.
3739 */
3740
3741 #ifdef WM_EVENT_COUNTERS
3742 if (status & WTX_ST_TU)
3743 WM_EVCNT_INCR(&sc->sc_ev_tu);
3744 #endif /* WM_EVENT_COUNTERS */
3745
3746 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3747 ifp->if_oerrors++;
3748 if (status & WTX_ST_LC)
3749 log(LOG_WARNING, "%s: late collision\n",
3750 device_xname(sc->sc_dev));
3751 else if (status & WTX_ST_EC) {
3752 ifp->if_collisions += 16;
3753 log(LOG_WARNING, "%s: excessive collisions\n",
3754 device_xname(sc->sc_dev));
3755 }
3756 } else
3757 ifp->if_opackets++;
3758
3759 sc->sc_txfree += txs->txs_ndesc;
3760 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3761 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3762 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3763 m_freem(txs->txs_mbuf);
3764 txs->txs_mbuf = NULL;
3765 }
3766
3767 /* Update the dirty transmit buffer pointer. */
3768 sc->sc_txsdirty = i;
3769 DPRINTF(WM_DEBUG_TX,
3770 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3771
3772 /*
3773 * If there are no more pending transmissions, cancel the watchdog
3774 * timer.
3775 */
3776 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3777 ifp->if_timer = 0;
3778 }
3779
3780 /*
3781 * wm_rxintr:
3782 *
3783 * Helper; handle receive interrupts.
3784 */
3785 static void
3786 wm_rxintr(struct wm_softc *sc)
3787 {
3788 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3789 struct wm_rxsoft *rxs;
3790 struct mbuf *m;
3791 int i, len;
3792 uint8_t status, errors;
3793 uint16_t vlantag;
3794
3795 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3796 rxs = &sc->sc_rxsoft[i];
3797
3798 DPRINTF(WM_DEBUG_RX,
3799 ("%s: RX: checking descriptor %d\n",
3800 device_xname(sc->sc_dev), i));
3801
3802 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3803
3804 status = sc->sc_rxdescs[i].wrx_status;
3805 errors = sc->sc_rxdescs[i].wrx_errors;
3806 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3807 vlantag = sc->sc_rxdescs[i].wrx_special;
3808
3809 if ((status & WRX_ST_DD) == 0) {
3810 /*
3811 * We have processed all of the receive descriptors.
3812 */
3813 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3814 break;
3815 }
3816
3817 if (__predict_false(sc->sc_rxdiscard)) {
3818 DPRINTF(WM_DEBUG_RX,
3819 ("%s: RX: discarding contents of descriptor %d\n",
3820 device_xname(sc->sc_dev), i));
3821 WM_INIT_RXDESC(sc, i);
3822 if (status & WRX_ST_EOP) {
3823 /* Reset our state. */
3824 DPRINTF(WM_DEBUG_RX,
3825 ("%s: RX: resetting rxdiscard -> 0\n",
3826 device_xname(sc->sc_dev)));
3827 sc->sc_rxdiscard = 0;
3828 }
3829 continue;
3830 }
3831
3832 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3833 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3834
3835 m = rxs->rxs_mbuf;
3836
3837 /*
3838 * Add a new receive buffer to the ring, unless of
3839 * course the length is zero. Treat the latter as a
3840 * failed mapping.
3841 */
3842 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3843 /*
3844 * Failed, throw away what we've done so
3845 * far, and discard the rest of the packet.
3846 */
3847 ifp->if_ierrors++;
3848 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3849 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3850 WM_INIT_RXDESC(sc, i);
3851 if ((status & WRX_ST_EOP) == 0)
3852 sc->sc_rxdiscard = 1;
3853 if (sc->sc_rxhead != NULL)
3854 m_freem(sc->sc_rxhead);
3855 WM_RXCHAIN_RESET(sc);
3856 DPRINTF(WM_DEBUG_RX,
3857 ("%s: RX: Rx buffer allocation failed, "
3858 "dropping packet%s\n", device_xname(sc->sc_dev),
3859 sc->sc_rxdiscard ? " (discard)" : ""));
3860 continue;
3861 }
3862
3863 m->m_len = len;
3864 sc->sc_rxlen += len;
3865 DPRINTF(WM_DEBUG_RX,
3866 ("%s: RX: buffer at %p len %d\n",
3867 device_xname(sc->sc_dev), m->m_data, len));
3868
3869 /*
3870 * If this is not the end of the packet, keep
3871 * looking.
3872 */
3873 if ((status & WRX_ST_EOP) == 0) {
3874 WM_RXCHAIN_LINK(sc, m);
3875 DPRINTF(WM_DEBUG_RX,
3876 ("%s: RX: not yet EOP, rxlen -> %d\n",
3877 device_xname(sc->sc_dev), sc->sc_rxlen));
3878 continue;
3879 }
3880
3881 /*
3882 * Okay, we have the entire packet now. The chip is
3883 * configured to include the FCS except I350 and I21[01]
3884 * (not all chips can be configured to strip it),
3885 * so we need to trim it.
3886 * May need to adjust length of previous mbuf in the
3887 * chain if the current mbuf is too short.
3888 * For an eratta, the RCTL_SECRC bit in RCTL register
3889 * is always set in I350, so we don't trim it.
3890 */
3891 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
3892 && (sc->sc_type != WM_T_I210)
3893 && (sc->sc_type != WM_T_I211)) {
3894 if (m->m_len < ETHER_CRC_LEN) {
3895 sc->sc_rxtail->m_len
3896 -= (ETHER_CRC_LEN - m->m_len);
3897 m->m_len = 0;
3898 } else
3899 m->m_len -= ETHER_CRC_LEN;
3900 len = sc->sc_rxlen - ETHER_CRC_LEN;
3901 } else
3902 len = sc->sc_rxlen;
3903
3904 WM_RXCHAIN_LINK(sc, m);
3905
3906 *sc->sc_rxtailp = NULL;
3907 m = sc->sc_rxhead;
3908
3909 WM_RXCHAIN_RESET(sc);
3910
3911 DPRINTF(WM_DEBUG_RX,
3912 ("%s: RX: have entire packet, len -> %d\n",
3913 device_xname(sc->sc_dev), len));
3914
3915 /*
3916 * If an error occurred, update stats and drop the packet.
3917 */
3918 if (errors &
3919 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3920 if (errors & WRX_ER_SE)
3921 log(LOG_WARNING, "%s: symbol error\n",
3922 device_xname(sc->sc_dev));
3923 else if (errors & WRX_ER_SEQ)
3924 log(LOG_WARNING, "%s: receive sequence error\n",
3925 device_xname(sc->sc_dev));
3926 else if (errors & WRX_ER_CE)
3927 log(LOG_WARNING, "%s: CRC error\n",
3928 device_xname(sc->sc_dev));
3929 m_freem(m);
3930 continue;
3931 }
3932
3933 /*
3934 * No errors. Receive the packet.
3935 */
3936 m->m_pkthdr.rcvif = ifp;
3937 m->m_pkthdr.len = len;
3938
3939 /*
3940 * If VLANs are enabled, VLAN packets have been unwrapped
3941 * for us. Associate the tag with the packet.
3942 */
3943 /* XXXX should check for i350 and i354 */
3944 if ((status & WRX_ST_VP) != 0) {
3945 VLAN_INPUT_TAG(ifp, m,
3946 le16toh(vlantag),
3947 continue);
3948 }
3949
3950 /*
3951 * Set up checksum info for this packet.
3952 */
3953 if ((status & WRX_ST_IXSM) == 0) {
3954 if (status & WRX_ST_IPCS) {
3955 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3956 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3957 if (errors & WRX_ER_IPE)
3958 m->m_pkthdr.csum_flags |=
3959 M_CSUM_IPv4_BAD;
3960 }
3961 if (status & WRX_ST_TCPCS) {
3962 /*
3963 * Note: we don't know if this was TCP or UDP,
3964 * so we just set both bits, and expect the
3965 * upper layers to deal.
3966 */
3967 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3968 m->m_pkthdr.csum_flags |=
3969 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3970 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3971 if (errors & WRX_ER_TCPE)
3972 m->m_pkthdr.csum_flags |=
3973 M_CSUM_TCP_UDP_BAD;
3974 }
3975 }
3976
3977 ifp->if_ipackets++;
3978
3979 WM_UNLOCK(sc);
3980
3981 /* Pass this up to any BPF listeners. */
3982 bpf_mtap(ifp, m);
3983
3984 /* Pass it on. */
3985 (*ifp->if_input)(ifp, m);
3986
3987 WM_LOCK(sc);
3988
3989 if (sc->sc_stopping)
3990 break;
3991 }
3992
3993 /* Update the receive pointer. */
3994 sc->sc_rxptr = i;
3995
3996 DPRINTF(WM_DEBUG_RX,
3997 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3998 }
3999
4000 /*
4001 * wm_linkintr_gmii:
4002 *
4003 * Helper; handle link interrupts for GMII.
4004 */
4005 static void
4006 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
4007 {
4008
4009 KASSERT(WM_LOCKED(sc));
4010
4011 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
4012 __func__));
4013
4014 if (icr & ICR_LSC) {
4015 DPRINTF(WM_DEBUG_LINK,
4016 ("%s: LINK: LSC -> mii_pollstat\n",
4017 device_xname(sc->sc_dev)));
4018 mii_pollstat(&sc->sc_mii);
4019 if (sc->sc_type == WM_T_82543) {
4020 int miistatus, active;
4021
4022 /*
4023 * With 82543, we need to force speed and
4024 * duplex on the MAC equal to what the PHY
4025 * speed and duplex configuration is.
4026 */
4027 miistatus = sc->sc_mii.mii_media_status;
4028
4029 if (miistatus & IFM_ACTIVE) {
4030 active = sc->sc_mii.mii_media_active;
4031 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4032 switch (IFM_SUBTYPE(active)) {
4033 case IFM_10_T:
4034 sc->sc_ctrl |= CTRL_SPEED_10;
4035 break;
4036 case IFM_100_TX:
4037 sc->sc_ctrl |= CTRL_SPEED_100;
4038 break;
4039 case IFM_1000_T:
4040 sc->sc_ctrl |= CTRL_SPEED_1000;
4041 break;
4042 default:
4043 /*
4044 * fiber?
4045 * Shoud not enter here.
4046 */
4047 printf("unknown media (%x)\n",
4048 active);
4049 break;
4050 }
4051 if (active & IFM_FDX)
4052 sc->sc_ctrl |= CTRL_FD;
4053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4054 }
4055 } else if ((sc->sc_type == WM_T_ICH8)
4056 && (sc->sc_phytype == WMPHY_IGP_3)) {
4057 wm_kmrn_lock_loss_workaround_ich8lan(sc);
4058 } else if (sc->sc_type == WM_T_PCH) {
4059 wm_k1_gig_workaround_hv(sc,
4060 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
4061 }
4062
4063 if ((sc->sc_phytype == WMPHY_82578)
4064 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
4065 == IFM_1000_T)) {
4066
4067 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
4068 delay(200*1000); /* XXX too big */
4069
4070 /* Link stall fix for link up */
4071 wm_gmii_hv_writereg(sc->sc_dev, 1,
4072 HV_MUX_DATA_CTRL,
4073 HV_MUX_DATA_CTRL_GEN_TO_MAC
4074 | HV_MUX_DATA_CTRL_FORCE_SPEED);
4075 wm_gmii_hv_writereg(sc->sc_dev, 1,
4076 HV_MUX_DATA_CTRL,
4077 HV_MUX_DATA_CTRL_GEN_TO_MAC);
4078 }
4079 }
4080 } else if (icr & ICR_RXSEQ) {
4081 DPRINTF(WM_DEBUG_LINK,
4082 ("%s: LINK Receive sequence error\n",
4083 device_xname(sc->sc_dev)));
4084 }
4085 }
4086
4087 /*
4088 * wm_linkintr_tbi:
4089 *
4090 * Helper; handle link interrupts for TBI mode.
4091 */
4092 static void
4093 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
4094 {
4095 uint32_t status;
4096
4097 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
4098 __func__));
4099
4100 status = CSR_READ(sc, WMREG_STATUS);
4101 if (icr & ICR_LSC) {
4102 if (status & STATUS_LU) {
4103 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
4104 device_xname(sc->sc_dev),
4105 (status & STATUS_FD) ? "FDX" : "HDX"));
4106 /*
4107 * NOTE: CTRL will update TFCE and RFCE automatically,
4108 * so we should update sc->sc_ctrl
4109 */
4110
4111 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4112 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4113 sc->sc_fcrtl &= ~FCRTL_XONE;
4114 if (status & STATUS_FD)
4115 sc->sc_tctl |=
4116 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4117 else
4118 sc->sc_tctl |=
4119 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4120 if (sc->sc_ctrl & CTRL_TFCE)
4121 sc->sc_fcrtl |= FCRTL_XONE;
4122 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4123 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4124 WMREG_OLD_FCRTL : WMREG_FCRTL,
4125 sc->sc_fcrtl);
4126 sc->sc_tbi_linkup = 1;
4127 } else {
4128 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
4129 device_xname(sc->sc_dev)));
4130 sc->sc_tbi_linkup = 0;
4131 }
4132 wm_tbi_set_linkled(sc);
4133 } else if (icr & ICR_RXCFG) {
4134 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
4135 device_xname(sc->sc_dev)));
4136 sc->sc_tbi_nrxcfg++;
4137 wm_check_for_link(sc);
4138 } else if (icr & ICR_RXSEQ) {
4139 DPRINTF(WM_DEBUG_LINK,
4140 ("%s: LINK: Receive sequence error\n",
4141 device_xname(sc->sc_dev)));
4142 }
4143 }
4144
4145 /*
4146 * wm_linkintr:
4147 *
4148 * Helper; handle link interrupts.
4149 */
4150 static void
4151 wm_linkintr(struct wm_softc *sc, uint32_t icr)
4152 {
4153
4154 if (sc->sc_flags & WM_F_HAS_MII)
4155 wm_linkintr_gmii(sc, icr);
4156 else
4157 wm_linkintr_tbi(sc, icr);
4158 }
4159
4160 /*
4161 * wm_tick:
4162 *
4163 * One second timer, used to check link status, sweep up
4164 * completed transmit jobs, etc.
4165 */
4166 static void
4167 wm_tick(void *arg)
4168 {
4169 struct wm_softc *sc = arg;
4170 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4171 #ifndef WM_MPSAFE
4172 int s;
4173
4174 s = splnet();
4175 #endif
4176
4177 WM_LOCK(sc);
4178
4179 if (sc->sc_stopping)
4180 goto out;
4181
4182 if (sc->sc_type >= WM_T_82542_2_1) {
4183 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
4184 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
4185 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
4186 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
4187 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
4188 }
4189
4190 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4191 ifp->if_ierrors += 0ULL + /* ensure quad_t */
4192 + CSR_READ(sc, WMREG_CRCERRS)
4193 + CSR_READ(sc, WMREG_ALGNERRC)
4194 + CSR_READ(sc, WMREG_SYMERRC)
4195 + CSR_READ(sc, WMREG_RXERRC)
4196 + CSR_READ(sc, WMREG_SEC)
4197 + CSR_READ(sc, WMREG_CEXTERR)
4198 + CSR_READ(sc, WMREG_RLEC);
4199 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4200
4201 if (sc->sc_flags & WM_F_HAS_MII)
4202 mii_tick(&sc->sc_mii);
4203 else
4204 wm_tbi_check_link(sc);
4205
4206 out:
4207 WM_UNLOCK(sc);
4208 #ifndef WM_MPSAFE
4209 splx(s);
4210 #endif
4211
4212 if (!sc->sc_stopping)
4213 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4214 }
4215
4216 /*
4217 * wm_reset:
4218 *
4219 * Reset the i82542 chip.
4220 */
4221 static void
4222 wm_reset(struct wm_softc *sc)
4223 {
4224 int phy_reset = 0;
4225 int error = 0;
4226 uint32_t reg, mask;
4227
4228 /*
4229 * Allocate on-chip memory according to the MTU size.
4230 * The Packet Buffer Allocation register must be written
4231 * before the chip is reset.
4232 */
4233 switch (sc->sc_type) {
4234 case WM_T_82547:
4235 case WM_T_82547_2:
4236 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4237 PBA_22K : PBA_30K;
4238 sc->sc_txfifo_head = 0;
4239 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4240 sc->sc_txfifo_size =
4241 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4242 sc->sc_txfifo_stall = 0;
4243 break;
4244 case WM_T_82571:
4245 case WM_T_82572:
4246 case WM_T_82575: /* XXX need special handing for jumbo frames */
4247 case WM_T_I350:
4248 case WM_T_I354:
4249 case WM_T_80003:
4250 sc->sc_pba = PBA_32K;
4251 break;
4252 case WM_T_82580:
4253 case WM_T_82580ER:
4254 sc->sc_pba = PBA_35K;
4255 break;
4256 case WM_T_I210:
4257 case WM_T_I211:
4258 sc->sc_pba = PBA_34K;
4259 break;
4260 case WM_T_82576:
4261 sc->sc_pba = PBA_64K;
4262 break;
4263 case WM_T_82573:
4264 sc->sc_pba = PBA_12K;
4265 break;
4266 case WM_T_82574:
4267 case WM_T_82583:
4268 sc->sc_pba = PBA_20K;
4269 break;
4270 case WM_T_ICH8:
4271 sc->sc_pba = PBA_8K;
4272 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4273 break;
4274 case WM_T_ICH9:
4275 case WM_T_ICH10:
4276 sc->sc_pba = PBA_10K;
4277 break;
4278 case WM_T_PCH:
4279 case WM_T_PCH2:
4280 case WM_T_PCH_LPT:
4281 sc->sc_pba = PBA_26K;
4282 break;
4283 default:
4284 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4285 PBA_40K : PBA_48K;
4286 break;
4287 }
4288 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4289
4290 /* Prevent the PCI-E bus from sticking */
4291 if (sc->sc_flags & WM_F_PCIE) {
4292 int timeout = 800;
4293
4294 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4295 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4296
4297 while (timeout--) {
4298 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4299 == 0)
4300 break;
4301 delay(100);
4302 }
4303 }
4304
4305 /* Set the completion timeout for interface */
4306 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4307 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4308 wm_set_pcie_completion_timeout(sc);
4309
4310 /* Clear interrupt */
4311 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4312
4313 /* Stop the transmit and receive processes. */
4314 CSR_WRITE(sc, WMREG_RCTL, 0);
4315 sc->sc_rctl &= ~RCTL_EN;
4316 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4317 CSR_WRITE_FLUSH(sc);
4318
4319 /* XXX set_tbi_sbp_82543() */
4320
4321 delay(10*1000);
4322
4323 /* Must acquire the MDIO ownership before MAC reset */
4324 switch (sc->sc_type) {
4325 case WM_T_82573:
4326 case WM_T_82574:
4327 case WM_T_82583:
4328 error = wm_get_hw_semaphore_82573(sc);
4329 break;
4330 default:
4331 break;
4332 }
4333
4334 /*
4335 * 82541 Errata 29? & 82547 Errata 28?
4336 * See also the description about PHY_RST bit in CTRL register
4337 * in 8254x_GBe_SDM.pdf.
4338 */
4339 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4340 CSR_WRITE(sc, WMREG_CTRL,
4341 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4342 CSR_WRITE_FLUSH(sc);
4343 delay(5000);
4344 }
4345
4346 switch (sc->sc_type) {
4347 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4348 case WM_T_82541:
4349 case WM_T_82541_2:
4350 case WM_T_82547:
4351 case WM_T_82547_2:
4352 /*
4353 * On some chipsets, a reset through a memory-mapped write
4354 * cycle can cause the chip to reset before completing the
4355 * write cycle. This causes major headache that can be
4356 * avoided by issuing the reset via indirect register writes
4357 * through I/O space.
4358 *
4359 * So, if we successfully mapped the I/O BAR at attach time,
4360 * use that. Otherwise, try our luck with a memory-mapped
4361 * reset.
4362 */
4363 if (sc->sc_flags & WM_F_IOH_VALID)
4364 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4365 else
4366 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4367 break;
4368 case WM_T_82545_3:
4369 case WM_T_82546_3:
4370 /* Use the shadow control register on these chips. */
4371 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4372 break;
4373 case WM_T_80003:
4374 mask = swfwphysem[sc->sc_funcid];
4375 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4376 wm_get_swfw_semaphore(sc, mask);
4377 CSR_WRITE(sc, WMREG_CTRL, reg);
4378 wm_put_swfw_semaphore(sc, mask);
4379 break;
4380 case WM_T_ICH8:
4381 case WM_T_ICH9:
4382 case WM_T_ICH10:
4383 case WM_T_PCH:
4384 case WM_T_PCH2:
4385 case WM_T_PCH_LPT:
4386 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4387 if (wm_check_reset_block(sc) == 0) {
4388 /*
4389 * Gate automatic PHY configuration by hardware on
4390 * non-managed 82579
4391 */
4392 if ((sc->sc_type == WM_T_PCH2)
4393 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4394 != 0))
4395 wm_gate_hw_phy_config_ich8lan(sc, 1);
4396
4397
4398 reg |= CTRL_PHY_RESET;
4399 phy_reset = 1;
4400 }
4401 wm_get_swfwhw_semaphore(sc);
4402 CSR_WRITE(sc, WMREG_CTRL, reg);
4403 /* Don't insert a completion barrier when reset */
4404 delay(20*1000);
4405 wm_put_swfwhw_semaphore(sc);
4406 break;
4407 case WM_T_82542_2_0:
4408 case WM_T_82542_2_1:
4409 case WM_T_82543:
4410 case WM_T_82540:
4411 case WM_T_82545:
4412 case WM_T_82546:
4413 case WM_T_82571:
4414 case WM_T_82572:
4415 case WM_T_82573:
4416 case WM_T_82574:
4417 case WM_T_82575:
4418 case WM_T_82576:
4419 case WM_T_82580:
4420 case WM_T_82580ER:
4421 case WM_T_82583:
4422 case WM_T_I350:
4423 case WM_T_I354:
4424 case WM_T_I210:
4425 case WM_T_I211:
4426 default:
4427 /* Everything else can safely use the documented method. */
4428 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4429 break;
4430 }
4431
4432 /* Must release the MDIO ownership after MAC reset */
4433 switch (sc->sc_type) {
4434 case WM_T_82573:
4435 case WM_T_82574:
4436 case WM_T_82583:
4437 if (error == 0)
4438 wm_put_hw_semaphore_82573(sc);
4439 break;
4440 default:
4441 break;
4442 }
4443
4444 if (phy_reset != 0)
4445 wm_get_cfg_done(sc);
4446
4447 /* reload EEPROM */
4448 switch (sc->sc_type) {
4449 case WM_T_82542_2_0:
4450 case WM_T_82542_2_1:
4451 case WM_T_82543:
4452 case WM_T_82544:
4453 delay(10);
4454 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4455 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4456 CSR_WRITE_FLUSH(sc);
4457 delay(2000);
4458 break;
4459 case WM_T_82540:
4460 case WM_T_82545:
4461 case WM_T_82545_3:
4462 case WM_T_82546:
4463 case WM_T_82546_3:
4464 delay(5*1000);
4465 /* XXX Disable HW ARPs on ASF enabled adapters */
4466 break;
4467 case WM_T_82541:
4468 case WM_T_82541_2:
4469 case WM_T_82547:
4470 case WM_T_82547_2:
4471 delay(20000);
4472 /* XXX Disable HW ARPs on ASF enabled adapters */
4473 break;
4474 case WM_T_82571:
4475 case WM_T_82572:
4476 case WM_T_82573:
4477 case WM_T_82574:
4478 case WM_T_82583:
4479 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4480 delay(10);
4481 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4482 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4483 CSR_WRITE_FLUSH(sc);
4484 }
4485 /* check EECD_EE_AUTORD */
4486 wm_get_auto_rd_done(sc);
4487 /*
4488 * Phy configuration from NVM just starts after EECD_AUTO_RD
4489 * is set.
4490 */
4491 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4492 || (sc->sc_type == WM_T_82583))
4493 delay(25*1000);
4494 break;
4495 case WM_T_82575:
4496 case WM_T_82576:
4497 case WM_T_82580:
4498 case WM_T_82580ER:
4499 case WM_T_I350:
4500 case WM_T_I354:
4501 case WM_T_I210:
4502 case WM_T_I211:
4503 case WM_T_80003:
4504 /* check EECD_EE_AUTORD */
4505 wm_get_auto_rd_done(sc);
4506 break;
4507 case WM_T_ICH8:
4508 case WM_T_ICH9:
4509 case WM_T_ICH10:
4510 case WM_T_PCH:
4511 case WM_T_PCH2:
4512 case WM_T_PCH_LPT:
4513 break;
4514 default:
4515 panic("%s: unknown type\n", __func__);
4516 }
4517
4518 /* Check whether EEPROM is present or not */
4519 switch (sc->sc_type) {
4520 case WM_T_82575:
4521 case WM_T_82576:
4522 #if 0 /* XXX */
4523 case WM_T_82580:
4524 case WM_T_82580ER:
4525 #endif
4526 case WM_T_I350:
4527 case WM_T_I354:
4528 case WM_T_ICH8:
4529 case WM_T_ICH9:
4530 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4531 /* Not found */
4532 sc->sc_flags |= WM_F_EEPROM_INVALID;
4533 if ((sc->sc_type == WM_T_82575)
4534 || (sc->sc_type == WM_T_82576)
4535 || (sc->sc_type == WM_T_82580)
4536 || (sc->sc_type == WM_T_82580ER)
4537 || (sc->sc_type == WM_T_I350)
4538 || (sc->sc_type == WM_T_I354))
4539 wm_reset_init_script_82575(sc);
4540 }
4541 break;
4542 default:
4543 break;
4544 }
4545
4546 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4547 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4548 /* clear global device reset status bit */
4549 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4550 }
4551
4552 /* Clear any pending interrupt events. */
4553 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4554 reg = CSR_READ(sc, WMREG_ICR);
4555
4556 /* reload sc_ctrl */
4557 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4558
4559 if (sc->sc_type == WM_T_I350)
4560 wm_set_eee_i350(sc);
4561
4562 /* dummy read from WUC */
4563 if (sc->sc_type == WM_T_PCH)
4564 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4565 /*
4566 * For PCH, this write will make sure that any noise will be detected
4567 * as a CRC error and be dropped rather than show up as a bad packet
4568 * to the DMA engine
4569 */
4570 if (sc->sc_type == WM_T_PCH)
4571 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4572
4573 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4574 CSR_WRITE(sc, WMREG_WUC, 0);
4575
4576 /* XXX need special handling for 82580 */
4577 }
4578
4579 static void
4580 wm_set_vlan(struct wm_softc *sc)
4581 {
4582 /* Deal with VLAN enables. */
4583 if (VLAN_ATTACHED(&sc->sc_ethercom))
4584 sc->sc_ctrl |= CTRL_VME;
4585 else
4586 sc->sc_ctrl &= ~CTRL_VME;
4587
4588 /* Write the control registers. */
4589 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4590 }
4591
4592 /*
4593 * wm_init: [ifnet interface function]
4594 *
4595 * Initialize the interface.
4596 */
4597 static int
4598 wm_init(struct ifnet *ifp)
4599 {
4600 struct wm_softc *sc = ifp->if_softc;
4601 int ret;
4602
4603 WM_LOCK(sc);
4604 ret = wm_init_locked(ifp);
4605 WM_UNLOCK(sc);
4606
4607 return ret;
4608 }
4609
4610 static int
4611 wm_init_locked(struct ifnet *ifp)
4612 {
4613 struct wm_softc *sc = ifp->if_softc;
4614 struct wm_rxsoft *rxs;
4615 int i, j, trynum, error = 0;
4616 uint32_t reg;
4617
4618 KASSERT(WM_LOCKED(sc));
4619 /*
4620 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4621 * There is a small but measurable benefit to avoiding the adjusment
4622 * of the descriptor so that the headers are aligned, for normal mtu,
4623 * on such platforms. One possibility is that the DMA itself is
4624 * slightly more efficient if the front of the entire packet (instead
4625 * of the front of the headers) is aligned.
4626 *
4627 * Note we must always set align_tweak to 0 if we are using
4628 * jumbo frames.
4629 */
4630 #ifdef __NO_STRICT_ALIGNMENT
4631 sc->sc_align_tweak = 0;
4632 #else
4633 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4634 sc->sc_align_tweak = 0;
4635 else
4636 sc->sc_align_tweak = 2;
4637 #endif /* __NO_STRICT_ALIGNMENT */
4638
4639 /* Cancel any pending I/O. */
4640 wm_stop_locked(ifp, 0);
4641
4642 /* update statistics before reset */
4643 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4644 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4645
4646 /* Reset the chip to a known state. */
4647 wm_reset(sc);
4648
4649 switch (sc->sc_type) {
4650 case WM_T_82571:
4651 case WM_T_82572:
4652 case WM_T_82573:
4653 case WM_T_82574:
4654 case WM_T_82583:
4655 case WM_T_80003:
4656 case WM_T_ICH8:
4657 case WM_T_ICH9:
4658 case WM_T_ICH10:
4659 case WM_T_PCH:
4660 case WM_T_PCH2:
4661 case WM_T_PCH_LPT:
4662 if (wm_check_mng_mode(sc) != 0)
4663 wm_get_hw_control(sc);
4664 break;
4665 default:
4666 break;
4667 }
4668
4669 /* Reset the PHY. */
4670 if (sc->sc_flags & WM_F_HAS_MII)
4671 wm_gmii_reset(sc);
4672
4673 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4674 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4675 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4676 || (sc->sc_type == WM_T_PCH_LPT))
4677 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4678
4679 /* Initialize the transmit descriptor ring. */
4680 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4681 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4682 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4683 sc->sc_txfree = WM_NTXDESC(sc);
4684 sc->sc_txnext = 0;
4685
4686 if (sc->sc_type < WM_T_82543) {
4687 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4688 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4689 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4690 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4691 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4692 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4693 } else {
4694 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4695 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4696 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4697 CSR_WRITE(sc, WMREG_TDH, 0);
4698 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4699 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4700
4701 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4702 /*
4703 * Don't write TDT before TCTL.EN is set.
4704 * See the document.
4705 */
4706 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4707 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4708 | TXDCTL_WTHRESH(0));
4709 else {
4710 CSR_WRITE(sc, WMREG_TDT, 0);
4711 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4712 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4713 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4714 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4715 }
4716 }
4717 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4718 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4719
4720 /* Initialize the transmit job descriptors. */
4721 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4722 sc->sc_txsoft[i].txs_mbuf = NULL;
4723 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4724 sc->sc_txsnext = 0;
4725 sc->sc_txsdirty = 0;
4726
4727 /*
4728 * Initialize the receive descriptor and receive job
4729 * descriptor rings.
4730 */
4731 if (sc->sc_type < WM_T_82543) {
4732 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4733 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4734 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4735 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4736 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4737 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4738
4739 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4740 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4741 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4742 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4743 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4744 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4745 } else {
4746 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4747 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4748 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4749 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4750 CSR_WRITE(sc, WMREG_EITR(0), 450);
4751 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4752 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4753 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4754 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4755 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4756 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4757 | RXDCTL_WTHRESH(1));
4758 } else {
4759 CSR_WRITE(sc, WMREG_RDH, 0);
4760 CSR_WRITE(sc, WMREG_RDT, 0);
4761 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4762 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4763 }
4764 }
4765 for (i = 0; i < WM_NRXDESC; i++) {
4766 rxs = &sc->sc_rxsoft[i];
4767 if (rxs->rxs_mbuf == NULL) {
4768 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4769 log(LOG_ERR, "%s: unable to allocate or map "
4770 "rx buffer %d, error = %d\n",
4771 device_xname(sc->sc_dev), i, error);
4772 /*
4773 * XXX Should attempt to run with fewer receive
4774 * XXX buffers instead of just failing.
4775 */
4776 wm_rxdrain(sc);
4777 goto out;
4778 }
4779 } else {
4780 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4781 WM_INIT_RXDESC(sc, i);
4782 /*
4783 * For 82575 and newer device, the RX descriptors
4784 * must be initialized after the setting of RCTL.EN in
4785 * wm_set_filter()
4786 */
4787 }
4788 }
4789 sc->sc_rxptr = 0;
4790 sc->sc_rxdiscard = 0;
4791 WM_RXCHAIN_RESET(sc);
4792
4793 /*
4794 * Clear out the VLAN table -- we don't use it (yet).
4795 */
4796 CSR_WRITE(sc, WMREG_VET, 0);
4797 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4798 trynum = 10; /* Due to hw errata */
4799 else
4800 trynum = 1;
4801 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4802 for (j = 0; j < trynum; j++)
4803 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4804
4805 /*
4806 * Set up flow-control parameters.
4807 *
4808 * XXX Values could probably stand some tuning.
4809 */
4810 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4811 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4812 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4813 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4814 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4815 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4816 }
4817
4818 sc->sc_fcrtl = FCRTL_DFLT;
4819 if (sc->sc_type < WM_T_82543) {
4820 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4821 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4822 } else {
4823 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4824 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4825 }
4826
4827 if (sc->sc_type == WM_T_80003)
4828 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4829 else
4830 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4831
4832 /* Writes the control register. */
4833 wm_set_vlan(sc);
4834
4835 if (sc->sc_flags & WM_F_HAS_MII) {
4836 int val;
4837
4838 switch (sc->sc_type) {
4839 case WM_T_80003:
4840 case WM_T_ICH8:
4841 case WM_T_ICH9:
4842 case WM_T_ICH10:
4843 case WM_T_PCH:
4844 case WM_T_PCH2:
4845 case WM_T_PCH_LPT:
4846 /*
4847 * Set the mac to wait the maximum time between each
4848 * iteration and increase the max iterations when
4849 * polling the phy; this fixes erroneous timeouts at
4850 * 10Mbps.
4851 */
4852 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4853 0xFFFF);
4854 val = wm_kmrn_readreg(sc,
4855 KUMCTRLSTA_OFFSET_INB_PARAM);
4856 val |= 0x3F;
4857 wm_kmrn_writereg(sc,
4858 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4859 break;
4860 default:
4861 break;
4862 }
4863
4864 if (sc->sc_type == WM_T_80003) {
4865 val = CSR_READ(sc, WMREG_CTRL_EXT);
4866 val &= ~CTRL_EXT_LINK_MODE_MASK;
4867 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4868
4869 /* Bypass RX and TX FIFO's */
4870 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4871 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4872 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4873 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4874 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4875 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4876 }
4877 }
4878 #if 0
4879 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4880 #endif
4881
4882 /*
4883 * Set up checksum offload parameters.
4884 */
4885 reg = CSR_READ(sc, WMREG_RXCSUM);
4886 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4887 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4888 reg |= RXCSUM_IPOFL;
4889 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4890 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4891 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4892 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4893 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4894
4895 /* Reset TBI's RXCFG count */
4896 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4897
4898 /*
4899 * Set up the interrupt registers.
4900 */
4901 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4902 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4903 ICR_RXO | ICR_RXT0;
4904 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4905 sc->sc_icr |= ICR_RXCFG;
4906 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4907
4908 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4909 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4910 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4911 reg = CSR_READ(sc, WMREG_KABGTXD);
4912 reg |= KABGTXD_BGSQLBIAS;
4913 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4914 }
4915
4916 /* Set up the inter-packet gap. */
4917 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4918
4919 if (sc->sc_type >= WM_T_82543) {
4920 /*
4921 * Set up the interrupt throttling register (units of 256ns)
4922 * Note that a footnote in Intel's documentation says this
4923 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4924 * or 10Mbit mode. Empirically, it appears to be the case
4925 * that that is also true for the 1024ns units of the other
4926 * interrupt-related timer registers -- so, really, we ought
4927 * to divide this value by 4 when the link speed is low.
4928 *
4929 * XXX implement this division at link speed change!
4930 */
4931
4932 /*
4933 * For N interrupts/sec, set this value to:
4934 * 1000000000 / (N * 256). Note that we set the
4935 * absolute and packet timer values to this value
4936 * divided by 4 to get "simple timer" behavior.
4937 */
4938
4939 sc->sc_itr = 1500; /* 2604 ints/sec */
4940 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4941 }
4942
4943 /* Set the VLAN ethernetype. */
4944 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4945
4946 /*
4947 * Set up the transmit control register; we start out with
4948 * a collision distance suitable for FDX, but update it whe
4949 * we resolve the media type.
4950 */
4951 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4952 | TCTL_CT(TX_COLLISION_THRESHOLD)
4953 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4954 if (sc->sc_type >= WM_T_82571)
4955 sc->sc_tctl |= TCTL_MULR;
4956 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4957
4958 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4959 /*
4960 * Write TDT after TCTL.EN is set.
4961 * See the document.
4962 */
4963 CSR_WRITE(sc, WMREG_TDT, 0);
4964 }
4965
4966 if (sc->sc_type == WM_T_80003) {
4967 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4968 reg &= ~TCTL_EXT_GCEX_MASK;
4969 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4970 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4971 }
4972
4973 /* Set the media. */
4974 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4975 goto out;
4976
4977 /* Configure for OS presence */
4978 wm_init_manageability(sc);
4979
4980 /*
4981 * Set up the receive control register; we actually program
4982 * the register when we set the receive filter. Use multicast
4983 * address offset type 0.
4984 *
4985 * Only the i82544 has the ability to strip the incoming
4986 * CRC, so we don't enable that feature.
4987 */
4988 sc->sc_mchash_type = 0;
4989 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4990 | RCTL_MO(sc->sc_mchash_type);
4991
4992 /*
4993 * The I350 has a bug where it always strips the CRC whether
4994 * asked to or not. So ask for stripped CRC here and cope in rxeof
4995 */
4996 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4997 || (sc->sc_type == WM_T_I210))
4998 sc->sc_rctl |= RCTL_SECRC;
4999
5000 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5001 && (ifp->if_mtu > ETHERMTU)) {
5002 sc->sc_rctl |= RCTL_LPE;
5003 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5004 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5005 }
5006
5007 if (MCLBYTES == 2048) {
5008 sc->sc_rctl |= RCTL_2k;
5009 } else {
5010 if (sc->sc_type >= WM_T_82543) {
5011 switch (MCLBYTES) {
5012 case 4096:
5013 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5014 break;
5015 case 8192:
5016 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5017 break;
5018 case 16384:
5019 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5020 break;
5021 default:
5022 panic("wm_init: MCLBYTES %d unsupported",
5023 MCLBYTES);
5024 break;
5025 }
5026 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
5027 }
5028
5029 /* Set the receive filter. */
5030 wm_set_filter(sc);
5031
5032 /* Enable ECC */
5033 switch (sc->sc_type) {
5034 case WM_T_82571:
5035 reg = CSR_READ(sc, WMREG_PBA_ECC);
5036 reg |= PBA_ECC_CORR_EN;
5037 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5038 break;
5039 case WM_T_PCH_LPT:
5040 reg = CSR_READ(sc, WMREG_PBECCSTS);
5041 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5042 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5043
5044 reg = CSR_READ(sc, WMREG_CTRL);
5045 reg |= CTRL_MEHE;
5046 CSR_WRITE(sc, WMREG_CTRL, reg);
5047 break;
5048 default:
5049 break;
5050 }
5051
5052 /* On 575 and later set RDT only if RX enabled */
5053 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5054 for (i = 0; i < WM_NRXDESC; i++)
5055 WM_INIT_RXDESC(sc, i);
5056
5057 sc->sc_stopping = false;
5058
5059 /* Start the one second link check clock. */
5060 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5061
5062 /* ...all done! */
5063 ifp->if_flags |= IFF_RUNNING;
5064 ifp->if_flags &= ~IFF_OACTIVE;
5065
5066 out:
5067 sc->sc_if_flags = ifp->if_flags;
5068 if (error)
5069 log(LOG_ERR, "%s: interface not running\n",
5070 device_xname(sc->sc_dev));
5071 return error;
5072 }
5073
5074 /*
5075 * wm_rxdrain:
5076 *
5077 * Drain the receive queue.
5078 */
5079 static void
5080 wm_rxdrain(struct wm_softc *sc)
5081 {
5082 struct wm_rxsoft *rxs;
5083 int i;
5084
5085 KASSERT(WM_LOCKED(sc));
5086
5087 for (i = 0; i < WM_NRXDESC; i++) {
5088 rxs = &sc->sc_rxsoft[i];
5089 if (rxs->rxs_mbuf != NULL) {
5090 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5091 m_freem(rxs->rxs_mbuf);
5092 rxs->rxs_mbuf = NULL;
5093 }
5094 }
5095 }
5096
5097 /*
5098 * wm_stop: [ifnet interface function]
5099 *
5100 * Stop transmission on the interface.
5101 */
5102 static void
5103 wm_stop(struct ifnet *ifp, int disable)
5104 {
5105 struct wm_softc *sc = ifp->if_softc;
5106
5107 WM_LOCK(sc);
5108 wm_stop_locked(ifp, disable);
5109 WM_UNLOCK(sc);
5110 }
5111
5112 static void
5113 wm_stop_locked(struct ifnet *ifp, int disable)
5114 {
5115 struct wm_softc *sc = ifp->if_softc;
5116 struct wm_txsoft *txs;
5117 int i;
5118
5119 KASSERT(WM_LOCKED(sc));
5120
5121 sc->sc_stopping = true;
5122
5123 /* Stop the one second clock. */
5124 callout_stop(&sc->sc_tick_ch);
5125
5126 /* Stop the 82547 Tx FIFO stall check timer. */
5127 if (sc->sc_type == WM_T_82547)
5128 callout_stop(&sc->sc_txfifo_ch);
5129
5130 if (sc->sc_flags & WM_F_HAS_MII) {
5131 /* Down the MII. */
5132 mii_down(&sc->sc_mii);
5133 } else {
5134 #if 0
5135 /* Should we clear PHY's status properly? */
5136 wm_reset(sc);
5137 #endif
5138 }
5139
5140 /* Stop the transmit and receive processes. */
5141 CSR_WRITE(sc, WMREG_TCTL, 0);
5142 CSR_WRITE(sc, WMREG_RCTL, 0);
5143 sc->sc_rctl &= ~RCTL_EN;
5144
5145 /*
5146 * Clear the interrupt mask to ensure the device cannot assert its
5147 * interrupt line.
5148 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
5149 * any currently pending or shared interrupt.
5150 */
5151 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5152 sc->sc_icr = 0;
5153
5154 /* Release any queued transmit buffers. */
5155 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
5156 txs = &sc->sc_txsoft[i];
5157 if (txs->txs_mbuf != NULL) {
5158 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5159 m_freem(txs->txs_mbuf);
5160 txs->txs_mbuf = NULL;
5161 }
5162 }
5163
5164 /* Mark the interface as down and cancel the watchdog timer. */
5165 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5166 ifp->if_timer = 0;
5167
5168 if (disable)
5169 wm_rxdrain(sc);
5170
5171 #if 0 /* notyet */
5172 if (sc->sc_type >= WM_T_82544)
5173 CSR_WRITE(sc, WMREG_WUC, 0);
5174 #endif
5175 }
5176
5177 void
5178 wm_get_auto_rd_done(struct wm_softc *sc)
5179 {
5180 int i;
5181
5182 /* wait for eeprom to reload */
5183 switch (sc->sc_type) {
5184 case WM_T_82571:
5185 case WM_T_82572:
5186 case WM_T_82573:
5187 case WM_T_82574:
5188 case WM_T_82583:
5189 case WM_T_82575:
5190 case WM_T_82576:
5191 case WM_T_82580:
5192 case WM_T_82580ER:
5193 case WM_T_I350:
5194 case WM_T_I354:
5195 case WM_T_I210:
5196 case WM_T_I211:
5197 case WM_T_80003:
5198 case WM_T_ICH8:
5199 case WM_T_ICH9:
5200 for (i = 0; i < 10; i++) {
5201 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
5202 break;
5203 delay(1000);
5204 }
5205 if (i == 10) {
5206 log(LOG_ERR, "%s: auto read from eeprom failed to "
5207 "complete\n", device_xname(sc->sc_dev));
5208 }
5209 break;
5210 default:
5211 break;
5212 }
5213 }
5214
5215 void
5216 wm_lan_init_done(struct wm_softc *sc)
5217 {
5218 uint32_t reg = 0;
5219 int i;
5220
5221 /* wait for eeprom to reload */
5222 switch (sc->sc_type) {
5223 case WM_T_ICH10:
5224 case WM_T_PCH:
5225 case WM_T_PCH2:
5226 case WM_T_PCH_LPT:
5227 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
5228 reg = CSR_READ(sc, WMREG_STATUS);
5229 if ((reg & STATUS_LAN_INIT_DONE) != 0)
5230 break;
5231 delay(100);
5232 }
5233 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
5234 log(LOG_ERR, "%s: %s: lan_init_done failed to "
5235 "complete\n", device_xname(sc->sc_dev), __func__);
5236 }
5237 break;
5238 default:
5239 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5240 __func__);
5241 break;
5242 }
5243
5244 reg &= ~STATUS_LAN_INIT_DONE;
5245 CSR_WRITE(sc, WMREG_STATUS, reg);
5246 }
5247
5248 void
5249 wm_get_cfg_done(struct wm_softc *sc)
5250 {
5251 int mask;
5252 uint32_t reg;
5253 int i;
5254
5255 /* wait for eeprom to reload */
5256 switch (sc->sc_type) {
5257 case WM_T_82542_2_0:
5258 case WM_T_82542_2_1:
5259 /* null */
5260 break;
5261 case WM_T_82543:
5262 case WM_T_82544:
5263 case WM_T_82540:
5264 case WM_T_82545:
5265 case WM_T_82545_3:
5266 case WM_T_82546:
5267 case WM_T_82546_3:
5268 case WM_T_82541:
5269 case WM_T_82541_2:
5270 case WM_T_82547:
5271 case WM_T_82547_2:
5272 case WM_T_82573:
5273 case WM_T_82574:
5274 case WM_T_82583:
5275 /* generic */
5276 delay(10*1000);
5277 break;
5278 case WM_T_80003:
5279 case WM_T_82571:
5280 case WM_T_82572:
5281 case WM_T_82575:
5282 case WM_T_82576:
5283 case WM_T_82580:
5284 case WM_T_82580ER:
5285 case WM_T_I350:
5286 case WM_T_I354:
5287 case WM_T_I210:
5288 case WM_T_I211:
5289 if (sc->sc_type == WM_T_82571) {
5290 /* Only 82571 shares port 0 */
5291 mask = EEMNGCTL_CFGDONE_0;
5292 } else
5293 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5294 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5295 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5296 break;
5297 delay(1000);
5298 }
5299 if (i >= WM_PHY_CFG_TIMEOUT) {
5300 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5301 device_xname(sc->sc_dev), __func__));
5302 }
5303 break;
5304 case WM_T_ICH8:
5305 case WM_T_ICH9:
5306 case WM_T_ICH10:
5307 case WM_T_PCH:
5308 case WM_T_PCH2:
5309 case WM_T_PCH_LPT:
5310 delay(10*1000);
5311 if (sc->sc_type >= WM_T_ICH10)
5312 wm_lan_init_done(sc);
5313 else
5314 wm_get_auto_rd_done(sc);
5315
5316 reg = CSR_READ(sc, WMREG_STATUS);
5317 if ((reg & STATUS_PHYRA) != 0)
5318 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5319 break;
5320 default:
5321 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5322 __func__);
5323 break;
5324 }
5325 }
5326
5327 /*
5328 * wm_acquire_eeprom:
5329 *
5330 * Perform the EEPROM handshake required on some chips.
5331 */
5332 static int
5333 wm_acquire_eeprom(struct wm_softc *sc)
5334 {
5335 uint32_t reg;
5336 int x;
5337 int ret = 0;
5338
5339 /* always success */
5340 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5341 return 0;
5342
5343 if (sc->sc_flags & LOCK_EXTCNF) {
5344 ret = wm_get_swfwhw_semaphore(sc);
5345 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
5346 /* this will also do wm_get_swsm_semaphore() if needed */
5347 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5348 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
5349 ret = wm_get_swsm_semaphore(sc);
5350 }
5351
5352 if (ret) {
5353 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5354 __func__);
5355 return 1;
5356 }
5357
5358 if (sc->sc_flags & WM_F_LOCK_EECD) {
5359 reg = CSR_READ(sc, WMREG_EECD);
5360
5361 /* Request EEPROM access. */
5362 reg |= EECD_EE_REQ;
5363 CSR_WRITE(sc, WMREG_EECD, reg);
5364
5365 /* ..and wait for it to be granted. */
5366 for (x = 0; x < 1000; x++) {
5367 reg = CSR_READ(sc, WMREG_EECD);
5368 if (reg & EECD_EE_GNT)
5369 break;
5370 delay(5);
5371 }
5372 if ((reg & EECD_EE_GNT) == 0) {
5373 aprint_error_dev(sc->sc_dev,
5374 "could not acquire EEPROM GNT\n");
5375 reg &= ~EECD_EE_REQ;
5376 CSR_WRITE(sc, WMREG_EECD, reg);
5377 if (sc->sc_flags & LOCK_EXTCNF)
5378 wm_put_swfwhw_semaphore(sc);
5379 if (sc->sc_flags & WM_F_LOCK_SWFW)
5380 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5381 else if (sc->sc_flags & WM_F_LOCK_SWSM)
5382 wm_put_swsm_semaphore(sc);
5383 return 1;
5384 }
5385 }
5386
5387 return 0;
5388 }
5389
5390 /*
5391 * wm_release_eeprom:
5392 *
5393 * Release the EEPROM mutex.
5394 */
5395 static void
5396 wm_release_eeprom(struct wm_softc *sc)
5397 {
5398 uint32_t reg;
5399
5400 /* always success */
5401 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5402 return;
5403
5404 if (sc->sc_flags & WM_F_LOCK_EECD) {
5405 reg = CSR_READ(sc, WMREG_EECD);
5406 reg &= ~EECD_EE_REQ;
5407 CSR_WRITE(sc, WMREG_EECD, reg);
5408 }
5409
5410 if (sc->sc_flags & LOCK_EXTCNF)
5411 wm_put_swfwhw_semaphore(sc);
5412 if (sc->sc_flags & WM_F_LOCK_SWFW)
5413 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5414 else if (sc->sc_flags & WM_F_LOCK_SWSM)
5415 wm_put_swsm_semaphore(sc);
5416 }
5417
5418 /*
5419 * wm_eeprom_sendbits:
5420 *
5421 * Send a series of bits to the EEPROM.
5422 */
5423 static void
5424 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5425 {
5426 uint32_t reg;
5427 int x;
5428
5429 reg = CSR_READ(sc, WMREG_EECD);
5430
5431 for (x = nbits; x > 0; x--) {
5432 if (bits & (1U << (x - 1)))
5433 reg |= EECD_DI;
5434 else
5435 reg &= ~EECD_DI;
5436 CSR_WRITE(sc, WMREG_EECD, reg);
5437 CSR_WRITE_FLUSH(sc);
5438 delay(2);
5439 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5440 CSR_WRITE_FLUSH(sc);
5441 delay(2);
5442 CSR_WRITE(sc, WMREG_EECD, reg);
5443 CSR_WRITE_FLUSH(sc);
5444 delay(2);
5445 }
5446 }
5447
5448 /*
5449 * wm_eeprom_recvbits:
5450 *
5451 * Receive a series of bits from the EEPROM.
5452 */
5453 static void
5454 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5455 {
5456 uint32_t reg, val;
5457 int x;
5458
5459 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5460
5461 val = 0;
5462 for (x = nbits; x > 0; x--) {
5463 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5464 CSR_WRITE_FLUSH(sc);
5465 delay(2);
5466 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5467 val |= (1U << (x - 1));
5468 CSR_WRITE(sc, WMREG_EECD, reg);
5469 CSR_WRITE_FLUSH(sc);
5470 delay(2);
5471 }
5472 *valp = val;
5473 }
5474
5475 /*
5476 * wm_read_eeprom_uwire:
5477 *
5478 * Read a word from the EEPROM using the MicroWire protocol.
5479 */
5480 static int
5481 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5482 {
5483 uint32_t reg, val;
5484 int i;
5485
5486 for (i = 0; i < wordcnt; i++) {
5487 /* Clear SK and DI. */
5488 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5489 CSR_WRITE(sc, WMREG_EECD, reg);
5490
5491 /*
5492 * XXX: workaround for a bug in qemu-0.12.x and prior
5493 * and Xen.
5494 *
5495 * We use this workaround only for 82540 because qemu's
5496 * e1000 act as 82540.
5497 */
5498 if (sc->sc_type == WM_T_82540) {
5499 reg |= EECD_SK;
5500 CSR_WRITE(sc, WMREG_EECD, reg);
5501 reg &= ~EECD_SK;
5502 CSR_WRITE(sc, WMREG_EECD, reg);
5503 CSR_WRITE_FLUSH(sc);
5504 delay(2);
5505 }
5506 /* XXX: end of workaround */
5507
5508 /* Set CHIP SELECT. */
5509 reg |= EECD_CS;
5510 CSR_WRITE(sc, WMREG_EECD, reg);
5511 CSR_WRITE_FLUSH(sc);
5512 delay(2);
5513
5514 /* Shift in the READ command. */
5515 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5516
5517 /* Shift in address. */
5518 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5519
5520 /* Shift out the data. */
5521 wm_eeprom_recvbits(sc, &val, 16);
5522 data[i] = val & 0xffff;
5523
5524 /* Clear CHIP SELECT. */
5525 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5526 CSR_WRITE(sc, WMREG_EECD, reg);
5527 CSR_WRITE_FLUSH(sc);
5528 delay(2);
5529 }
5530
5531 return 0;
5532 }
5533
5534 /*
5535 * wm_spi_eeprom_ready:
5536 *
5537 * Wait for a SPI EEPROM to be ready for commands.
5538 */
5539 static int
5540 wm_spi_eeprom_ready(struct wm_softc *sc)
5541 {
5542 uint32_t val;
5543 int usec;
5544
5545 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5546 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5547 wm_eeprom_recvbits(sc, &val, 8);
5548 if ((val & SPI_SR_RDY) == 0)
5549 break;
5550 }
5551 if (usec >= SPI_MAX_RETRIES) {
5552 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5553 return 1;
5554 }
5555 return 0;
5556 }
5557
5558 /*
5559 * wm_read_eeprom_spi:
5560 *
5561 * Read a work from the EEPROM using the SPI protocol.
5562 */
5563 static int
5564 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5565 {
5566 uint32_t reg, val;
5567 int i;
5568 uint8_t opc;
5569
5570 /* Clear SK and CS. */
5571 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5572 CSR_WRITE(sc, WMREG_EECD, reg);
5573 CSR_WRITE_FLUSH(sc);
5574 delay(2);
5575
5576 if (wm_spi_eeprom_ready(sc))
5577 return 1;
5578
5579 /* Toggle CS to flush commands. */
5580 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5581 CSR_WRITE_FLUSH(sc);
5582 delay(2);
5583 CSR_WRITE(sc, WMREG_EECD, reg);
5584 CSR_WRITE_FLUSH(sc);
5585 delay(2);
5586
5587 opc = SPI_OPC_READ;
5588 if (sc->sc_ee_addrbits == 8 && word >= 128)
5589 opc |= SPI_OPC_A8;
5590
5591 wm_eeprom_sendbits(sc, opc, 8);
5592 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5593
5594 for (i = 0; i < wordcnt; i++) {
5595 wm_eeprom_recvbits(sc, &val, 16);
5596 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5597 }
5598
5599 /* Raise CS and clear SK. */
5600 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5601 CSR_WRITE(sc, WMREG_EECD, reg);
5602 CSR_WRITE_FLUSH(sc);
5603 delay(2);
5604
5605 return 0;
5606 }
5607
5608 #define NVM_CHECKSUM 0xBABA
5609 #define EEPROM_SIZE 0x0040
5610 #define NVM_COMPAT 0x0003
5611 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
5612 #define NVM_FUTURE_INIT_WORD1 0x0019
5613 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
5614
5615 /*
5616 * wm_validate_eeprom_checksum
5617 *
5618 * The checksum is defined as the sum of the first 64 (16 bit) words.
5619 */
5620 static int
5621 wm_validate_eeprom_checksum(struct wm_softc *sc)
5622 {
5623 uint16_t checksum;
5624 uint16_t eeprom_data;
5625 #ifdef WM_DEBUG
5626 uint16_t csum_wordaddr, valid_checksum;
5627 #endif
5628 int i;
5629
5630 checksum = 0;
5631
5632 /* Don't check for I211 */
5633 if (sc->sc_type == WM_T_I211)
5634 return 0;
5635
5636 #ifdef WM_DEBUG
5637 if (sc->sc_type == WM_T_PCH_LPT) {
5638 csum_wordaddr = NVM_COMPAT;
5639 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5640 } else {
5641 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5642 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5643 }
5644
5645 /* Dump EEPROM image for debug */
5646 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5647 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5648 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5649 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5650 if ((eeprom_data & valid_checksum) == 0) {
5651 DPRINTF(WM_DEBUG_NVM,
5652 ("%s: NVM need to be updated (%04x != %04x)\n",
5653 device_xname(sc->sc_dev), eeprom_data,
5654 valid_checksum));
5655 }
5656 }
5657
5658 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5659 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5660 for (i = 0; i < EEPROM_SIZE; i++) {
5661 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5662 printf("XX ");
5663 else
5664 printf("%04x ", eeprom_data);
5665 if (i % 8 == 7)
5666 printf("\n");
5667 }
5668 }
5669
5670 #endif /* WM_DEBUG */
5671
5672 for (i = 0; i < EEPROM_SIZE; i++) {
5673 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5674 return 1;
5675 checksum += eeprom_data;
5676 }
5677
5678 if (checksum != (uint16_t) NVM_CHECKSUM) {
5679 #ifdef WM_DEBUG
5680 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5681 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5682 #endif
5683 }
5684
5685 return 0;
5686 }
5687
5688 /*
5689 * wm_read_eeprom:
5690 *
5691 * Read data from the serial EEPROM.
5692 */
5693 static int
5694 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5695 {
5696 int rv;
5697
5698 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5699 return 1;
5700
5701 if (wm_acquire_eeprom(sc))
5702 return 1;
5703
5704 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5705 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5706 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5707 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5708 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5709 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5710 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5711 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5712 else
5713 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5714
5715 wm_release_eeprom(sc);
5716 return rv;
5717 }
5718
5719 static int
5720 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5721 uint16_t *data)
5722 {
5723 int i, eerd = 0;
5724 int error = 0;
5725
5726 for (i = 0; i < wordcnt; i++) {
5727 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5728
5729 CSR_WRITE(sc, WMREG_EERD, eerd);
5730 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5731 if (error != 0)
5732 break;
5733
5734 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5735 }
5736
5737 return error;
5738 }
5739
5740 static int
5741 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5742 {
5743 uint32_t attempts = 100000;
5744 uint32_t i, reg = 0;
5745 int32_t done = -1;
5746
5747 for (i = 0; i < attempts; i++) {
5748 reg = CSR_READ(sc, rw);
5749
5750 if (reg & EERD_DONE) {
5751 done = 0;
5752 break;
5753 }
5754 delay(5);
5755 }
5756
5757 return done;
5758 }
5759
5760 static int
5761 wm_check_alt_mac_addr(struct wm_softc *sc)
5762 {
5763 uint16_t myea[ETHER_ADDR_LEN / 2];
5764 uint16_t offset = EEPROM_OFF_MACADDR;
5765
5766 /* Try to read alternative MAC address pointer */
5767 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5768 return -1;
5769
5770 /* Check pointer */
5771 if (offset == 0xffff)
5772 return -1;
5773
5774 /*
5775 * Check whether alternative MAC address is valid or not.
5776 * Some cards have non 0xffff pointer but those don't use
5777 * alternative MAC address in reality.
5778 *
5779 * Check whether the broadcast bit is set or not.
5780 */
5781 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5782 if (((myea[0] & 0xff) & 0x01) == 0)
5783 return 0; /* found! */
5784
5785 /* not found */
5786 return -1;
5787 }
5788
5789 static int
5790 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5791 {
5792 uint16_t myea[ETHER_ADDR_LEN / 2];
5793 uint16_t offset = EEPROM_OFF_MACADDR;
5794 int do_invert = 0;
5795
5796 switch (sc->sc_type) {
5797 case WM_T_82580:
5798 case WM_T_82580ER:
5799 case WM_T_I350:
5800 case WM_T_I354:
5801 switch (sc->sc_funcid) {
5802 case 0:
5803 /* default value (== EEPROM_OFF_MACADDR) */
5804 break;
5805 case 1:
5806 offset = EEPROM_OFF_LAN1;
5807 break;
5808 case 2:
5809 offset = EEPROM_OFF_LAN2;
5810 break;
5811 case 3:
5812 offset = EEPROM_OFF_LAN3;
5813 break;
5814 default:
5815 goto bad;
5816 /* NOTREACHED */
5817 break;
5818 }
5819 break;
5820 case WM_T_82571:
5821 case WM_T_82575:
5822 case WM_T_82576:
5823 case WM_T_80003:
5824 case WM_T_I210:
5825 case WM_T_I211:
5826 if (wm_check_alt_mac_addr(sc) != 0) {
5827 /* reset the offset to LAN0 */
5828 offset = EEPROM_OFF_MACADDR;
5829 if ((sc->sc_funcid & 0x01) == 1)
5830 do_invert = 1;
5831 goto do_read;
5832 }
5833 switch (sc->sc_funcid) {
5834 case 0:
5835 /*
5836 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5837 * itself.
5838 */
5839 break;
5840 case 1:
5841 offset += EEPROM_OFF_MACADDR_LAN1;
5842 break;
5843 case 2:
5844 offset += EEPROM_OFF_MACADDR_LAN2;
5845 break;
5846 case 3:
5847 offset += EEPROM_OFF_MACADDR_LAN3;
5848 break;
5849 default:
5850 goto bad;
5851 /* NOTREACHED */
5852 break;
5853 }
5854 break;
5855 default:
5856 if ((sc->sc_funcid & 0x01) == 1)
5857 do_invert = 1;
5858 break;
5859 }
5860
5861 do_read:
5862 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5863 myea) != 0) {
5864 goto bad;
5865 }
5866
5867 enaddr[0] = myea[0] & 0xff;
5868 enaddr[1] = myea[0] >> 8;
5869 enaddr[2] = myea[1] & 0xff;
5870 enaddr[3] = myea[1] >> 8;
5871 enaddr[4] = myea[2] & 0xff;
5872 enaddr[5] = myea[2] >> 8;
5873
5874 /*
5875 * Toggle the LSB of the MAC address on the second port
5876 * of some dual port cards.
5877 */
5878 if (do_invert != 0)
5879 enaddr[5] ^= 1;
5880
5881 return 0;
5882
5883 bad:
5884 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5885
5886 return -1;
5887 }
5888
5889 /*
5890 * wm_add_rxbuf:
5891 *
5892 * Add a receive buffer to the indiciated descriptor.
5893 */
5894 static int
5895 wm_add_rxbuf(struct wm_softc *sc, int idx)
5896 {
5897 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5898 struct mbuf *m;
5899 int error;
5900
5901 KASSERT(WM_LOCKED(sc));
5902
5903 MGETHDR(m, M_DONTWAIT, MT_DATA);
5904 if (m == NULL)
5905 return ENOBUFS;
5906
5907 MCLGET(m, M_DONTWAIT);
5908 if ((m->m_flags & M_EXT) == 0) {
5909 m_freem(m);
5910 return ENOBUFS;
5911 }
5912
5913 if (rxs->rxs_mbuf != NULL)
5914 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5915
5916 rxs->rxs_mbuf = m;
5917
5918 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5919 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5920 BUS_DMA_READ|BUS_DMA_NOWAIT);
5921 if (error) {
5922 /* XXX XXX XXX */
5923 aprint_error_dev(sc->sc_dev,
5924 "unable to load rx DMA map %d, error = %d\n",
5925 idx, error);
5926 panic("wm_add_rxbuf");
5927 }
5928
5929 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5930 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5931
5932 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5933 if ((sc->sc_rctl & RCTL_EN) != 0)
5934 WM_INIT_RXDESC(sc, idx);
5935 } else
5936 WM_INIT_RXDESC(sc, idx);
5937
5938 return 0;
5939 }
5940
5941 /*
5942 * wm_set_ral:
5943 *
5944 * Set an entery in the receive address list.
5945 */
5946 static void
5947 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5948 {
5949 uint32_t ral_lo, ral_hi;
5950
5951 if (enaddr != NULL) {
5952 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5953 (enaddr[3] << 24);
5954 ral_hi = enaddr[4] | (enaddr[5] << 8);
5955 ral_hi |= RAL_AV;
5956 } else {
5957 ral_lo = 0;
5958 ral_hi = 0;
5959 }
5960
5961 if (sc->sc_type >= WM_T_82544) {
5962 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5963 ral_lo);
5964 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5965 ral_hi);
5966 } else {
5967 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5968 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5969 }
5970 }
5971
5972 /*
5973 * wm_mchash:
5974 *
5975 * Compute the hash of the multicast address for the 4096-bit
5976 * multicast filter.
5977 */
5978 static uint32_t
5979 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5980 {
5981 static const int lo_shift[4] = { 4, 3, 2, 0 };
5982 static const int hi_shift[4] = { 4, 5, 6, 8 };
5983 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5984 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5985 uint32_t hash;
5986
5987 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5988 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5989 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5990 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5991 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5992 return (hash & 0x3ff);
5993 }
5994 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5995 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5996
5997 return (hash & 0xfff);
5998 }
5999
6000 /*
6001 * wm_set_filter:
6002 *
6003 * Set up the receive filter.
6004 */
6005 static void
6006 wm_set_filter(struct wm_softc *sc)
6007 {
6008 struct ethercom *ec = &sc->sc_ethercom;
6009 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6010 struct ether_multi *enm;
6011 struct ether_multistep step;
6012 bus_addr_t mta_reg;
6013 uint32_t hash, reg, bit;
6014 int i, size;
6015
6016 if (sc->sc_type >= WM_T_82544)
6017 mta_reg = WMREG_CORDOVA_MTA;
6018 else
6019 mta_reg = WMREG_MTA;
6020
6021 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
6022
6023 if (ifp->if_flags & IFF_BROADCAST)
6024 sc->sc_rctl |= RCTL_BAM;
6025 if (ifp->if_flags & IFF_PROMISC) {
6026 sc->sc_rctl |= RCTL_UPE;
6027 goto allmulti;
6028 }
6029
6030 /*
6031 * Set the station address in the first RAL slot, and
6032 * clear the remaining slots.
6033 */
6034 if (sc->sc_type == WM_T_ICH8)
6035 size = WM_RAL_TABSIZE_ICH8 -1;
6036 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
6037 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6038 || (sc->sc_type == WM_T_PCH_LPT))
6039 size = WM_RAL_TABSIZE_ICH8;
6040 else if (sc->sc_type == WM_T_82575)
6041 size = WM_RAL_TABSIZE_82575;
6042 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
6043 size = WM_RAL_TABSIZE_82576;
6044 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6045 size = WM_RAL_TABSIZE_I350;
6046 else
6047 size = WM_RAL_TABSIZE;
6048 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
6049 for (i = 1; i < size; i++)
6050 wm_set_ral(sc, NULL, i);
6051
6052 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
6053 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
6054 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
6055 size = WM_ICH8_MC_TABSIZE;
6056 else
6057 size = WM_MC_TABSIZE;
6058 /* Clear out the multicast table. */
6059 for (i = 0; i < size; i++)
6060 CSR_WRITE(sc, mta_reg + (i << 2), 0);
6061
6062 ETHER_FIRST_MULTI(step, ec, enm);
6063 while (enm != NULL) {
6064 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
6065 /*
6066 * We must listen to a range of multicast addresses.
6067 * For now, just accept all multicasts, rather than
6068 * trying to set only those filter bits needed to match
6069 * the range. (At this time, the only use of address
6070 * ranges is for IP multicast routing, for which the
6071 * range is big enough to require all bits set.)
6072 */
6073 goto allmulti;
6074 }
6075
6076 hash = wm_mchash(sc, enm->enm_addrlo);
6077
6078 reg = (hash >> 5);
6079 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
6080 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
6081 || (sc->sc_type == WM_T_PCH2)
6082 || (sc->sc_type == WM_T_PCH_LPT))
6083 reg &= 0x1f;
6084 else
6085 reg &= 0x7f;
6086 bit = hash & 0x1f;
6087
6088 hash = CSR_READ(sc, mta_reg + (reg << 2));
6089 hash |= 1U << bit;
6090
6091 /* XXX Hardware bug?? */
6092 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
6093 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
6094 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
6095 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
6096 } else
6097 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
6098
6099 ETHER_NEXT_MULTI(step, enm);
6100 }
6101
6102 ifp->if_flags &= ~IFF_ALLMULTI;
6103 goto setit;
6104
6105 allmulti:
6106 ifp->if_flags |= IFF_ALLMULTI;
6107 sc->sc_rctl |= RCTL_MPE;
6108
6109 setit:
6110 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
6111 }
6112
6113 /*
6114 * wm_tbi_mediainit:
6115 *
6116 * Initialize media for use on 1000BASE-X devices.
6117 */
6118 static void
6119 wm_tbi_mediainit(struct wm_softc *sc)
6120 {
6121 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6122 const char *sep = "";
6123
6124 if (sc->sc_type < WM_T_82543)
6125 sc->sc_tipg = TIPG_WM_DFLT;
6126 else
6127 sc->sc_tipg = TIPG_LG_DFLT;
6128
6129 sc->sc_tbi_anegticks = 5;
6130
6131 /* Initialize our media structures */
6132 sc->sc_mii.mii_ifp = ifp;
6133
6134 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6135 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
6136 wm_tbi_mediastatus);
6137
6138 /*
6139 * SWD Pins:
6140 *
6141 * 0 = Link LED (output)
6142 * 1 = Loss Of Signal (input)
6143 */
6144 sc->sc_ctrl |= CTRL_SWDPIO(0);
6145 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
6146
6147 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6148
6149 #define ADD(ss, mm, dd) \
6150 do { \
6151 aprint_normal("%s%s", sep, ss); \
6152 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
6153 sep = ", "; \
6154 } while (/*CONSTCOND*/0)
6155
6156 aprint_normal_dev(sc->sc_dev, "");
6157 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
6158 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
6159 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
6160 aprint_normal("\n");
6161
6162 #undef ADD
6163
6164 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
6165 }
6166
6167 /*
6168 * wm_tbi_mediastatus: [ifmedia interface function]
6169 *
6170 * Get the current interface media status on a 1000BASE-X device.
6171 */
6172 static void
6173 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6174 {
6175 struct wm_softc *sc = ifp->if_softc;
6176 uint32_t ctrl, status;
6177
6178 ifmr->ifm_status = IFM_AVALID;
6179 ifmr->ifm_active = IFM_ETHER;
6180
6181 status = CSR_READ(sc, WMREG_STATUS);
6182 if ((status & STATUS_LU) == 0) {
6183 ifmr->ifm_active |= IFM_NONE;
6184 return;
6185 }
6186
6187 ifmr->ifm_status |= IFM_ACTIVE;
6188 ifmr->ifm_active |= IFM_1000_SX;
6189 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
6190 ifmr->ifm_active |= IFM_FDX;
6191 else
6192 ifmr->ifm_active |= IFM_HDX;
6193 ctrl = CSR_READ(sc, WMREG_CTRL);
6194 if (ctrl & CTRL_RFCE)
6195 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
6196 if (ctrl & CTRL_TFCE)
6197 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
6198 }
6199
6200 /*
6201 * wm_tbi_mediachange: [ifmedia interface function]
6202 *
6203 * Set hardware to newly-selected media on a 1000BASE-X device.
6204 */
6205 static int
6206 wm_tbi_mediachange(struct ifnet *ifp)
6207 {
6208 struct wm_softc *sc = ifp->if_softc;
6209 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6210 uint32_t status;
6211 int i;
6212
6213 sc->sc_txcw = 0;
6214 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
6215 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
6216 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
6217 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6218 sc->sc_txcw |= TXCW_ANE;
6219 } else {
6220 /*
6221 * If autonegotiation is turned off, force link up and turn on
6222 * full duplex
6223 */
6224 sc->sc_txcw &= ~TXCW_ANE;
6225 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
6226 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6227 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6228 CSR_WRITE_FLUSH(sc);
6229 delay(1000);
6230 }
6231
6232 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
6233 device_xname(sc->sc_dev),sc->sc_txcw));
6234 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6235 CSR_WRITE_FLUSH(sc);
6236 delay(10000);
6237
6238 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
6239 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
6240
6241 /*
6242 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
6243 * optics detect a signal, 0 if they don't.
6244 */
6245 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
6246 /* Have signal; wait for the link to come up. */
6247
6248 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6249 /*
6250 * Reset the link, and let autonegotiation do its thing
6251 */
6252 sc->sc_ctrl |= CTRL_LRST;
6253 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6254 CSR_WRITE_FLUSH(sc);
6255 delay(1000);
6256 sc->sc_ctrl &= ~CTRL_LRST;
6257 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6258 CSR_WRITE_FLUSH(sc);
6259 delay(1000);
6260 }
6261
6262 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
6263 delay(10000);
6264 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
6265 break;
6266 }
6267
6268 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
6269 device_xname(sc->sc_dev),i));
6270
6271 status = CSR_READ(sc, WMREG_STATUS);
6272 DPRINTF(WM_DEBUG_LINK,
6273 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6274 device_xname(sc->sc_dev),status, STATUS_LU));
6275 if (status & STATUS_LU) {
6276 /* Link is up. */
6277 DPRINTF(WM_DEBUG_LINK,
6278 ("%s: LINK: set media -> link up %s\n",
6279 device_xname(sc->sc_dev),
6280 (status & STATUS_FD) ? "FDX" : "HDX"));
6281
6282 /*
6283 * NOTE: CTRL will update TFCE and RFCE automatically,
6284 * so we should update sc->sc_ctrl
6285 */
6286 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6287 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6288 sc->sc_fcrtl &= ~FCRTL_XONE;
6289 if (status & STATUS_FD)
6290 sc->sc_tctl |=
6291 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6292 else
6293 sc->sc_tctl |=
6294 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6295 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6296 sc->sc_fcrtl |= FCRTL_XONE;
6297 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6298 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6299 WMREG_OLD_FCRTL : WMREG_FCRTL,
6300 sc->sc_fcrtl);
6301 sc->sc_tbi_linkup = 1;
6302 } else {
6303 if (i == WM_LINKUP_TIMEOUT)
6304 wm_check_for_link(sc);
6305 /* Link is down. */
6306 DPRINTF(WM_DEBUG_LINK,
6307 ("%s: LINK: set media -> link down\n",
6308 device_xname(sc->sc_dev)));
6309 sc->sc_tbi_linkup = 0;
6310 }
6311 } else {
6312 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6313 device_xname(sc->sc_dev)));
6314 sc->sc_tbi_linkup = 0;
6315 }
6316
6317 wm_tbi_set_linkled(sc);
6318
6319 return 0;
6320 }
6321
6322 /*
6323 * wm_tbi_set_linkled:
6324 *
6325 * Update the link LED on 1000BASE-X devices.
6326 */
6327 static void
6328 wm_tbi_set_linkled(struct wm_softc *sc)
6329 {
6330
6331 if (sc->sc_tbi_linkup)
6332 sc->sc_ctrl |= CTRL_SWDPIN(0);
6333 else
6334 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6335
6336 /* 82540 or newer devices are active low */
6337 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6338
6339 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6340 }
6341
6342 /*
6343 * wm_tbi_check_link:
6344 *
6345 * Check the link on 1000BASE-X devices.
6346 */
6347 static void
6348 wm_tbi_check_link(struct wm_softc *sc)
6349 {
6350 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6351 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6352 uint32_t status;
6353
6354 KASSERT(WM_LOCKED(sc));
6355
6356 status = CSR_READ(sc, WMREG_STATUS);
6357
6358 /* XXX is this needed? */
6359 (void)CSR_READ(sc, WMREG_RXCW);
6360 (void)CSR_READ(sc, WMREG_CTRL);
6361
6362 /* set link status */
6363 if ((status & STATUS_LU) == 0) {
6364 DPRINTF(WM_DEBUG_LINK,
6365 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6366 sc->sc_tbi_linkup = 0;
6367 } else if (sc->sc_tbi_linkup == 0) {
6368 DPRINTF(WM_DEBUG_LINK,
6369 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6370 (status & STATUS_FD) ? "FDX" : "HDX"));
6371 sc->sc_tbi_linkup = 1;
6372 }
6373
6374 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6375 && ((status & STATUS_LU) == 0)) {
6376 sc->sc_tbi_linkup = 0;
6377 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6378 /* RXCFG storm! */
6379 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6380 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6381 wm_init_locked(ifp);
6382 WM_UNLOCK(sc);
6383 ifp->if_start(ifp);
6384 WM_LOCK(sc);
6385 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6386 /* If the timer expired, retry autonegotiation */
6387 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6388 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6389 sc->sc_tbi_ticks = 0;
6390 /*
6391 * Reset the link, and let autonegotiation do
6392 * its thing
6393 */
6394 sc->sc_ctrl |= CTRL_LRST;
6395 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6396 CSR_WRITE_FLUSH(sc);
6397 delay(1000);
6398 sc->sc_ctrl &= ~CTRL_LRST;
6399 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6400 CSR_WRITE_FLUSH(sc);
6401 delay(1000);
6402 CSR_WRITE(sc, WMREG_TXCW,
6403 sc->sc_txcw & ~TXCW_ANE);
6404 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6405 }
6406 }
6407 }
6408
6409 wm_tbi_set_linkled(sc);
6410 }
6411
6412 /*
6413 * wm_gmii_reset:
6414 *
6415 * Reset the PHY.
6416 */
6417 static void
6418 wm_gmii_reset(struct wm_softc *sc)
6419 {
6420 uint32_t reg;
6421 int rv;
6422
6423 /* get phy semaphore */
6424 switch (sc->sc_type) {
6425 case WM_T_82571:
6426 case WM_T_82572:
6427 case WM_T_82573:
6428 case WM_T_82574:
6429 case WM_T_82583:
6430 /* XXX should get sw semaphore, too */
6431 rv = wm_get_swsm_semaphore(sc);
6432 break;
6433 case WM_T_82575:
6434 case WM_T_82576:
6435 case WM_T_82580:
6436 case WM_T_82580ER:
6437 case WM_T_I350:
6438 case WM_T_I354:
6439 case WM_T_I210:
6440 case WM_T_I211:
6441 case WM_T_80003:
6442 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6443 break;
6444 case WM_T_ICH8:
6445 case WM_T_ICH9:
6446 case WM_T_ICH10:
6447 case WM_T_PCH:
6448 case WM_T_PCH2:
6449 case WM_T_PCH_LPT:
6450 rv = wm_get_swfwhw_semaphore(sc);
6451 break;
6452 default:
6453 /* nothing to do*/
6454 rv = 0;
6455 break;
6456 }
6457 if (rv != 0) {
6458 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6459 __func__);
6460 return;
6461 }
6462
6463 switch (sc->sc_type) {
6464 case WM_T_82542_2_0:
6465 case WM_T_82542_2_1:
6466 /* null */
6467 break;
6468 case WM_T_82543:
6469 /*
6470 * With 82543, we need to force speed and duplex on the MAC
6471 * equal to what the PHY speed and duplex configuration is.
6472 * In addition, we need to perform a hardware reset on the PHY
6473 * to take it out of reset.
6474 */
6475 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6476 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6477
6478 /* The PHY reset pin is active-low. */
6479 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6480 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6481 CTRL_EXT_SWDPIN(4));
6482 reg |= CTRL_EXT_SWDPIO(4);
6483
6484 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6485 CSR_WRITE_FLUSH(sc);
6486 delay(10*1000);
6487
6488 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6489 CSR_WRITE_FLUSH(sc);
6490 delay(150);
6491 #if 0
6492 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6493 #endif
6494 delay(20*1000); /* XXX extra delay to get PHY ID? */
6495 break;
6496 case WM_T_82544: /* reset 10000us */
6497 case WM_T_82540:
6498 case WM_T_82545:
6499 case WM_T_82545_3:
6500 case WM_T_82546:
6501 case WM_T_82546_3:
6502 case WM_T_82541:
6503 case WM_T_82541_2:
6504 case WM_T_82547:
6505 case WM_T_82547_2:
6506 case WM_T_82571: /* reset 100us */
6507 case WM_T_82572:
6508 case WM_T_82573:
6509 case WM_T_82574:
6510 case WM_T_82575:
6511 case WM_T_82576:
6512 case WM_T_82580:
6513 case WM_T_82580ER:
6514 case WM_T_I350:
6515 case WM_T_I354:
6516 case WM_T_I210:
6517 case WM_T_I211:
6518 case WM_T_82583:
6519 case WM_T_80003:
6520 /* generic reset */
6521 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6522 CSR_WRITE_FLUSH(sc);
6523 delay(20000);
6524 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6525 CSR_WRITE_FLUSH(sc);
6526 delay(20000);
6527
6528 if ((sc->sc_type == WM_T_82541)
6529 || (sc->sc_type == WM_T_82541_2)
6530 || (sc->sc_type == WM_T_82547)
6531 || (sc->sc_type == WM_T_82547_2)) {
6532 /* workaround for igp are done in igp_reset() */
6533 /* XXX add code to set LED after phy reset */
6534 }
6535 break;
6536 case WM_T_ICH8:
6537 case WM_T_ICH9:
6538 case WM_T_ICH10:
6539 case WM_T_PCH:
6540 case WM_T_PCH2:
6541 case WM_T_PCH_LPT:
6542 /* generic reset */
6543 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6544 CSR_WRITE_FLUSH(sc);
6545 delay(100);
6546 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6547 CSR_WRITE_FLUSH(sc);
6548 delay(150);
6549 break;
6550 default:
6551 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6552 __func__);
6553 break;
6554 }
6555
6556 /* release PHY semaphore */
6557 switch (sc->sc_type) {
6558 case WM_T_82571:
6559 case WM_T_82572:
6560 case WM_T_82573:
6561 case WM_T_82574:
6562 case WM_T_82583:
6563 /* XXX should put sw semaphore, too */
6564 wm_put_swsm_semaphore(sc);
6565 break;
6566 case WM_T_82575:
6567 case WM_T_82576:
6568 case WM_T_82580:
6569 case WM_T_82580ER:
6570 case WM_T_I350:
6571 case WM_T_I354:
6572 case WM_T_I210:
6573 case WM_T_I211:
6574 case WM_T_80003:
6575 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6576 break;
6577 case WM_T_ICH8:
6578 case WM_T_ICH9:
6579 case WM_T_ICH10:
6580 case WM_T_PCH:
6581 case WM_T_PCH2:
6582 case WM_T_PCH_LPT:
6583 wm_put_swfwhw_semaphore(sc);
6584 break;
6585 default:
6586 /* nothing to do*/
6587 rv = 0;
6588 break;
6589 }
6590
6591 /* get_cfg_done */
6592 wm_get_cfg_done(sc);
6593
6594 /* extra setup */
6595 switch (sc->sc_type) {
6596 case WM_T_82542_2_0:
6597 case WM_T_82542_2_1:
6598 case WM_T_82543:
6599 case WM_T_82544:
6600 case WM_T_82540:
6601 case WM_T_82545:
6602 case WM_T_82545_3:
6603 case WM_T_82546:
6604 case WM_T_82546_3:
6605 case WM_T_82541_2:
6606 case WM_T_82547_2:
6607 case WM_T_82571:
6608 case WM_T_82572:
6609 case WM_T_82573:
6610 case WM_T_82574:
6611 case WM_T_82575:
6612 case WM_T_82576:
6613 case WM_T_82580:
6614 case WM_T_82580ER:
6615 case WM_T_I350:
6616 case WM_T_I354:
6617 case WM_T_I210:
6618 case WM_T_I211:
6619 case WM_T_82583:
6620 case WM_T_80003:
6621 /* null */
6622 break;
6623 case WM_T_82541:
6624 case WM_T_82547:
6625 /* XXX Configure actively LED after PHY reset */
6626 break;
6627 case WM_T_ICH8:
6628 case WM_T_ICH9:
6629 case WM_T_ICH10:
6630 case WM_T_PCH:
6631 case WM_T_PCH2:
6632 case WM_T_PCH_LPT:
6633 /* Allow time for h/w to get to a quiescent state afer reset */
6634 delay(10*1000);
6635
6636 if (sc->sc_type == WM_T_PCH)
6637 wm_hv_phy_workaround_ich8lan(sc);
6638
6639 if (sc->sc_type == WM_T_PCH2)
6640 wm_lv_phy_workaround_ich8lan(sc);
6641
6642 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6643 /*
6644 * dummy read to clear the phy wakeup bit after lcd
6645 * reset
6646 */
6647 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6648 }
6649
6650 /*
6651 * XXX Configure the LCD with th extended configuration region
6652 * in NVM
6653 */
6654
6655 /* Configure the LCD with the OEM bits in NVM */
6656 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6657 || (sc->sc_type == WM_T_PCH_LPT)) {
6658 /*
6659 * Disable LPLU.
6660 * XXX It seems that 82567 has LPLU, too.
6661 */
6662 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6663 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6664 reg |= HV_OEM_BITS_ANEGNOW;
6665 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6666 }
6667 break;
6668 default:
6669 panic("%s: unknown type\n", __func__);
6670 break;
6671 }
6672 }
6673
6674 /*
6675 * wm_get_phy_id_82575:
6676 *
6677 * Return PHY ID. Return -1 if it failed.
6678 */
6679 static int
6680 wm_get_phy_id_82575(struct wm_softc *sc)
6681 {
6682 uint32_t reg;
6683 int phyid = -1;
6684
6685 /* XXX */
6686 if ((sc->sc_flags & WM_F_SGMII) == 0)
6687 return -1;
6688
6689 if (wm_sgmii_uses_mdio(sc)) {
6690 switch (sc->sc_type) {
6691 case WM_T_82575:
6692 case WM_T_82576:
6693 reg = CSR_READ(sc, WMREG_MDIC);
6694 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6695 break;
6696 case WM_T_82580:
6697 case WM_T_I350:
6698 case WM_T_I354:
6699 case WM_T_I210:
6700 case WM_T_I211:
6701 reg = CSR_READ(sc, WMREG_MDICNFG);
6702 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6703 break;
6704 default:
6705 return -1;
6706 }
6707 }
6708
6709 return phyid;
6710 }
6711
6712
6713 /*
6714 * wm_gmii_mediainit:
6715 *
6716 * Initialize media for use on 1000BASE-T devices.
6717 */
6718 static void
6719 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6720 {
6721 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6722 struct mii_data *mii = &sc->sc_mii;
6723
6724 /* We have MII. */
6725 sc->sc_flags |= WM_F_HAS_MII;
6726
6727 if (sc->sc_type == WM_T_80003)
6728 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6729 else
6730 sc->sc_tipg = TIPG_1000T_DFLT;
6731
6732 /*
6733 * Let the chip set speed/duplex on its own based on
6734 * signals from the PHY.
6735 * XXXbouyer - I'm not sure this is right for the 80003,
6736 * the em driver only sets CTRL_SLU here - but it seems to work.
6737 */
6738 sc->sc_ctrl |= CTRL_SLU;
6739 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6740
6741 /* Initialize our media structures and probe the GMII. */
6742 mii->mii_ifp = ifp;
6743
6744 /*
6745 * Determine the PHY access method.
6746 *
6747 * For SGMII, use SGMII specific method.
6748 *
6749 * For some devices, we can determine the PHY access method
6750 * from sc_type.
6751 *
6752 * For ICH8 variants, it's difficult to detemine the PHY access
6753 * method by sc_type, so use the PCI product ID for some devices.
6754 * For other ICH8 variants, try to use igp's method. If the PHY
6755 * can't detect, then use bm's method.
6756 */
6757 switch (prodid) {
6758 case PCI_PRODUCT_INTEL_PCH_M_LM:
6759 case PCI_PRODUCT_INTEL_PCH_M_LC:
6760 /* 82577 */
6761 sc->sc_phytype = WMPHY_82577;
6762 mii->mii_readreg = wm_gmii_hv_readreg;
6763 mii->mii_writereg = wm_gmii_hv_writereg;
6764 break;
6765 case PCI_PRODUCT_INTEL_PCH_D_DM:
6766 case PCI_PRODUCT_INTEL_PCH_D_DC:
6767 /* 82578 */
6768 sc->sc_phytype = WMPHY_82578;
6769 mii->mii_readreg = wm_gmii_hv_readreg;
6770 mii->mii_writereg = wm_gmii_hv_writereg;
6771 break;
6772 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6773 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6774 /* 82579 */
6775 sc->sc_phytype = WMPHY_82579;
6776 mii->mii_readreg = wm_gmii_hv_readreg;
6777 mii->mii_writereg = wm_gmii_hv_writereg;
6778 break;
6779 case PCI_PRODUCT_INTEL_I217_LM:
6780 case PCI_PRODUCT_INTEL_I217_V:
6781 case PCI_PRODUCT_INTEL_I218_LM:
6782 case PCI_PRODUCT_INTEL_I218_V:
6783 /* I21[78] */
6784 mii->mii_readreg = wm_gmii_hv_readreg;
6785 mii->mii_writereg = wm_gmii_hv_writereg;
6786 break;
6787 case PCI_PRODUCT_INTEL_82801I_BM:
6788 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6789 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6790 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6791 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6792 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6793 /* 82567 */
6794 sc->sc_phytype = WMPHY_BM;
6795 mii->mii_readreg = wm_gmii_bm_readreg;
6796 mii->mii_writereg = wm_gmii_bm_writereg;
6797 break;
6798 default:
6799 if (((sc->sc_flags & WM_F_SGMII) != 0)
6800 && !wm_sgmii_uses_mdio(sc)){
6801 mii->mii_readreg = wm_sgmii_readreg;
6802 mii->mii_writereg = wm_sgmii_writereg;
6803 } else if (sc->sc_type >= WM_T_80003) {
6804 mii->mii_readreg = wm_gmii_i80003_readreg;
6805 mii->mii_writereg = wm_gmii_i80003_writereg;
6806 } else if (sc->sc_type >= WM_T_I210) {
6807 mii->mii_readreg = wm_gmii_i82544_readreg;
6808 mii->mii_writereg = wm_gmii_i82544_writereg;
6809 } else if (sc->sc_type >= WM_T_82580) {
6810 sc->sc_phytype = WMPHY_82580;
6811 mii->mii_readreg = wm_gmii_82580_readreg;
6812 mii->mii_writereg = wm_gmii_82580_writereg;
6813 } else if (sc->sc_type >= WM_T_82544) {
6814 mii->mii_readreg = wm_gmii_i82544_readreg;
6815 mii->mii_writereg = wm_gmii_i82544_writereg;
6816 } else {
6817 mii->mii_readreg = wm_gmii_i82543_readreg;
6818 mii->mii_writereg = wm_gmii_i82543_writereg;
6819 }
6820 break;
6821 }
6822 mii->mii_statchg = wm_gmii_statchg;
6823
6824 wm_gmii_reset(sc);
6825
6826 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6827 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6828 wm_gmii_mediastatus);
6829
6830 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6831 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6832 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6833 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6834 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6835 /* Attach only one port */
6836 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6837 MII_OFFSET_ANY, MIIF_DOPAUSE);
6838 } else {
6839 int i, id;
6840 uint32_t ctrl_ext;
6841
6842 id = wm_get_phy_id_82575(sc);
6843 if (id != -1) {
6844 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6845 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6846 }
6847 if ((id == -1)
6848 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6849 /* Power on sgmii phy if it is disabled */
6850 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6851 CSR_WRITE(sc, WMREG_CTRL_EXT,
6852 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6853 CSR_WRITE_FLUSH(sc);
6854 delay(300*1000); /* XXX too long */
6855
6856 /* from 1 to 8 */
6857 for (i = 1; i < 8; i++)
6858 mii_attach(sc->sc_dev, &sc->sc_mii,
6859 0xffffffff, i, MII_OFFSET_ANY,
6860 MIIF_DOPAUSE);
6861
6862 /* restore previous sfp cage power state */
6863 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6864 }
6865 }
6866 } else {
6867 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6868 MII_OFFSET_ANY, MIIF_DOPAUSE);
6869 }
6870
6871 /*
6872 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6873 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6874 */
6875 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6876 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6877 wm_set_mdio_slow_mode_hv(sc);
6878 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6879 MII_OFFSET_ANY, MIIF_DOPAUSE);
6880 }
6881
6882 /*
6883 * (For ICH8 variants)
6884 * If PHY detection failed, use BM's r/w function and retry.
6885 */
6886 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6887 /* if failed, retry with *_bm_* */
6888 mii->mii_readreg = wm_gmii_bm_readreg;
6889 mii->mii_writereg = wm_gmii_bm_writereg;
6890
6891 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6892 MII_OFFSET_ANY, MIIF_DOPAUSE);
6893 }
6894
6895 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6896 /* Any PHY wasn't find */
6897 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6898 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6899 sc->sc_phytype = WMPHY_NONE;
6900 } else {
6901 /*
6902 * PHY Found!
6903 * Check PHY type.
6904 */
6905 uint32_t model;
6906 struct mii_softc *child;
6907
6908 child = LIST_FIRST(&mii->mii_phys);
6909 if (device_is_a(child->mii_dev, "igphy")) {
6910 struct igphy_softc *isc = (struct igphy_softc *)child;
6911
6912 model = isc->sc_mii.mii_mpd_model;
6913 if (model == MII_MODEL_yyINTEL_I82566)
6914 sc->sc_phytype = WMPHY_IGP_3;
6915 }
6916
6917 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6918 }
6919 }
6920
6921 /*
6922 * wm_gmii_mediastatus: [ifmedia interface function]
6923 *
6924 * Get the current interface media status on a 1000BASE-T device.
6925 */
6926 static void
6927 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6928 {
6929 struct wm_softc *sc = ifp->if_softc;
6930
6931 ether_mediastatus(ifp, ifmr);
6932 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6933 | sc->sc_flowflags;
6934 }
6935
6936 /*
6937 * wm_gmii_mediachange: [ifmedia interface function]
6938 *
6939 * Set hardware to newly-selected media on a 1000BASE-T device.
6940 */
6941 static int
6942 wm_gmii_mediachange(struct ifnet *ifp)
6943 {
6944 struct wm_softc *sc = ifp->if_softc;
6945 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6946 int rc;
6947
6948 if ((ifp->if_flags & IFF_UP) == 0)
6949 return 0;
6950
6951 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6952 sc->sc_ctrl |= CTRL_SLU;
6953 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6954 || (sc->sc_type > WM_T_82543)) {
6955 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6956 } else {
6957 sc->sc_ctrl &= ~CTRL_ASDE;
6958 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6959 if (ife->ifm_media & IFM_FDX)
6960 sc->sc_ctrl |= CTRL_FD;
6961 switch (IFM_SUBTYPE(ife->ifm_media)) {
6962 case IFM_10_T:
6963 sc->sc_ctrl |= CTRL_SPEED_10;
6964 break;
6965 case IFM_100_TX:
6966 sc->sc_ctrl |= CTRL_SPEED_100;
6967 break;
6968 case IFM_1000_T:
6969 sc->sc_ctrl |= CTRL_SPEED_1000;
6970 break;
6971 default:
6972 panic("wm_gmii_mediachange: bad media 0x%x",
6973 ife->ifm_media);
6974 }
6975 }
6976 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6977 if (sc->sc_type <= WM_T_82543)
6978 wm_gmii_reset(sc);
6979
6980 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6981 return 0;
6982 return rc;
6983 }
6984
6985 #define MDI_IO CTRL_SWDPIN(2)
6986 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6987 #define MDI_CLK CTRL_SWDPIN(3)
6988
6989 static void
6990 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6991 {
6992 uint32_t i, v;
6993
6994 v = CSR_READ(sc, WMREG_CTRL);
6995 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6996 v |= MDI_DIR | CTRL_SWDPIO(3);
6997
6998 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6999 if (data & i)
7000 v |= MDI_IO;
7001 else
7002 v &= ~MDI_IO;
7003 CSR_WRITE(sc, WMREG_CTRL, v);
7004 CSR_WRITE_FLUSH(sc);
7005 delay(10);
7006 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7007 CSR_WRITE_FLUSH(sc);
7008 delay(10);
7009 CSR_WRITE(sc, WMREG_CTRL, v);
7010 CSR_WRITE_FLUSH(sc);
7011 delay(10);
7012 }
7013 }
7014
7015 static uint32_t
7016 i82543_mii_recvbits(struct wm_softc *sc)
7017 {
7018 uint32_t v, i, data = 0;
7019
7020 v = CSR_READ(sc, WMREG_CTRL);
7021 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7022 v |= CTRL_SWDPIO(3);
7023
7024 CSR_WRITE(sc, WMREG_CTRL, v);
7025 CSR_WRITE_FLUSH(sc);
7026 delay(10);
7027 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7028 CSR_WRITE_FLUSH(sc);
7029 delay(10);
7030 CSR_WRITE(sc, WMREG_CTRL, v);
7031 CSR_WRITE_FLUSH(sc);
7032 delay(10);
7033
7034 for (i = 0; i < 16; i++) {
7035 data <<= 1;
7036 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7037 CSR_WRITE_FLUSH(sc);
7038 delay(10);
7039 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7040 data |= 1;
7041 CSR_WRITE(sc, WMREG_CTRL, v);
7042 CSR_WRITE_FLUSH(sc);
7043 delay(10);
7044 }
7045
7046 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7047 CSR_WRITE_FLUSH(sc);
7048 delay(10);
7049 CSR_WRITE(sc, WMREG_CTRL, v);
7050 CSR_WRITE_FLUSH(sc);
7051 delay(10);
7052
7053 return data;
7054 }
7055
7056 #undef MDI_IO
7057 #undef MDI_DIR
7058 #undef MDI_CLK
7059
7060 /*
7061 * wm_gmii_i82543_readreg: [mii interface function]
7062 *
7063 * Read a PHY register on the GMII (i82543 version).
7064 */
7065 static int
7066 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7067 {
7068 struct wm_softc *sc = device_private(self);
7069 int rv;
7070
7071 i82543_mii_sendbits(sc, 0xffffffffU, 32);
7072 i82543_mii_sendbits(sc, reg | (phy << 5) |
7073 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7074 rv = i82543_mii_recvbits(sc) & 0xffff;
7075
7076 DPRINTF(WM_DEBUG_GMII,
7077 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7078 device_xname(sc->sc_dev), phy, reg, rv));
7079
7080 return rv;
7081 }
7082
7083 /*
7084 * wm_gmii_i82543_writereg: [mii interface function]
7085 *
7086 * Write a PHY register on the GMII (i82543 version).
7087 */
7088 static void
7089 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7090 {
7091 struct wm_softc *sc = device_private(self);
7092
7093 i82543_mii_sendbits(sc, 0xffffffffU, 32);
7094 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7095 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7096 (MII_COMMAND_START << 30), 32);
7097 }
7098
7099 /*
7100 * wm_gmii_i82544_readreg: [mii interface function]
7101 *
7102 * Read a PHY register on the GMII.
7103 */
7104 static int
7105 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7106 {
7107 struct wm_softc *sc = device_private(self);
7108 uint32_t mdic = 0;
7109 int i, rv;
7110
7111 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7112 MDIC_REGADD(reg));
7113
7114 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7115 mdic = CSR_READ(sc, WMREG_MDIC);
7116 if (mdic & MDIC_READY)
7117 break;
7118 delay(50);
7119 }
7120
7121 if ((mdic & MDIC_READY) == 0) {
7122 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7123 device_xname(sc->sc_dev), phy, reg);
7124 rv = 0;
7125 } else if (mdic & MDIC_E) {
7126 #if 0 /* This is normal if no PHY is present. */
7127 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7128 device_xname(sc->sc_dev), phy, reg);
7129 #endif
7130 rv = 0;
7131 } else {
7132 rv = MDIC_DATA(mdic);
7133 if (rv == 0xffff)
7134 rv = 0;
7135 }
7136
7137 return rv;
7138 }
7139
7140 /*
7141 * wm_gmii_i82544_writereg: [mii interface function]
7142 *
7143 * Write a PHY register on the GMII.
7144 */
7145 static void
7146 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7147 {
7148 struct wm_softc *sc = device_private(self);
7149 uint32_t mdic = 0;
7150 int i;
7151
7152 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7153 MDIC_REGADD(reg) | MDIC_DATA(val));
7154
7155 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7156 mdic = CSR_READ(sc, WMREG_MDIC);
7157 if (mdic & MDIC_READY)
7158 break;
7159 delay(50);
7160 }
7161
7162 if ((mdic & MDIC_READY) == 0)
7163 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7164 device_xname(sc->sc_dev), phy, reg);
7165 else if (mdic & MDIC_E)
7166 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7167 device_xname(sc->sc_dev), phy, reg);
7168 }
7169
7170 /*
7171 * wm_gmii_i80003_readreg: [mii interface function]
7172 *
7173 * Read a PHY register on the kumeran
7174 * This could be handled by the PHY layer if we didn't have to lock the
7175 * ressource ...
7176 */
7177 static int
7178 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7179 {
7180 struct wm_softc *sc = device_private(self);
7181 int sem;
7182 int rv;
7183
7184 if (phy != 1) /* only one PHY on kumeran bus */
7185 return 0;
7186
7187 sem = swfwphysem[sc->sc_funcid];
7188 if (wm_get_swfw_semaphore(sc, sem)) {
7189 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7190 __func__);
7191 return 0;
7192 }
7193
7194 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7195 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7196 reg >> GG82563_PAGE_SHIFT);
7197 } else {
7198 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7199 reg >> GG82563_PAGE_SHIFT);
7200 }
7201 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7202 delay(200);
7203 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7204 delay(200);
7205
7206 wm_put_swfw_semaphore(sc, sem);
7207 return rv;
7208 }
7209
7210 /*
7211 * wm_gmii_i80003_writereg: [mii interface function]
7212 *
7213 * Write a PHY register on the kumeran.
7214 * This could be handled by the PHY layer if we didn't have to lock the
7215 * ressource ...
7216 */
7217 static void
7218 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7219 {
7220 struct wm_softc *sc = device_private(self);
7221 int sem;
7222
7223 if (phy != 1) /* only one PHY on kumeran bus */
7224 return;
7225
7226 sem = swfwphysem[sc->sc_funcid];
7227 if (wm_get_swfw_semaphore(sc, sem)) {
7228 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7229 __func__);
7230 return;
7231 }
7232
7233 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7234 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7235 reg >> GG82563_PAGE_SHIFT);
7236 } else {
7237 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7238 reg >> GG82563_PAGE_SHIFT);
7239 }
7240 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7241 delay(200);
7242 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7243 delay(200);
7244
7245 wm_put_swfw_semaphore(sc, sem);
7246 }
7247
7248 /*
7249 * wm_gmii_bm_readreg: [mii interface function]
7250 *
7251 * Read a PHY register on the kumeran
7252 * This could be handled by the PHY layer if we didn't have to lock the
7253 * ressource ...
7254 */
7255 static int
7256 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7257 {
7258 struct wm_softc *sc = device_private(self);
7259 int sem;
7260 int rv;
7261
7262 sem = swfwphysem[sc->sc_funcid];
7263 if (wm_get_swfw_semaphore(sc, sem)) {
7264 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7265 __func__);
7266 return 0;
7267 }
7268
7269 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7270 if (phy == 1)
7271 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7272 reg);
7273 else
7274 wm_gmii_i82544_writereg(self, phy,
7275 GG82563_PHY_PAGE_SELECT,
7276 reg >> GG82563_PAGE_SHIFT);
7277 }
7278
7279 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7280 wm_put_swfw_semaphore(sc, sem);
7281 return rv;
7282 }
7283
7284 /*
7285 * wm_gmii_bm_writereg: [mii interface function]
7286 *
7287 * Write a PHY register on the kumeran.
7288 * This could be handled by the PHY layer if we didn't have to lock the
7289 * ressource ...
7290 */
7291 static void
7292 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7293 {
7294 struct wm_softc *sc = device_private(self);
7295 int sem;
7296
7297 sem = swfwphysem[sc->sc_funcid];
7298 if (wm_get_swfw_semaphore(sc, sem)) {
7299 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7300 __func__);
7301 return;
7302 }
7303
7304 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7305 if (phy == 1)
7306 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7307 reg);
7308 else
7309 wm_gmii_i82544_writereg(self, phy,
7310 GG82563_PHY_PAGE_SELECT,
7311 reg >> GG82563_PAGE_SHIFT);
7312 }
7313
7314 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7315 wm_put_swfw_semaphore(sc, sem);
7316 }
7317
7318 static void
7319 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7320 {
7321 struct wm_softc *sc = device_private(self);
7322 uint16_t regnum = BM_PHY_REG_NUM(offset);
7323 uint16_t wuce;
7324
7325 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7326 if (sc->sc_type == WM_T_PCH) {
7327 /* XXX e1000 driver do nothing... why? */
7328 }
7329
7330 /* Set page 769 */
7331 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7332 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7333
7334 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7335
7336 wuce &= ~BM_WUC_HOST_WU_BIT;
7337 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7338 wuce | BM_WUC_ENABLE_BIT);
7339
7340 /* Select page 800 */
7341 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7342 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7343
7344 /* Write page 800 */
7345 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7346
7347 if (rd)
7348 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7349 else
7350 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7351
7352 /* Set page 769 */
7353 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7354 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7355
7356 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7357 }
7358
7359 /*
7360 * wm_gmii_hv_readreg: [mii interface function]
7361 *
7362 * Read a PHY register on the kumeran
7363 * This could be handled by the PHY layer if we didn't have to lock the
7364 * ressource ...
7365 */
7366 static int
7367 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7368 {
7369 struct wm_softc *sc = device_private(self);
7370 uint16_t page = BM_PHY_REG_PAGE(reg);
7371 uint16_t regnum = BM_PHY_REG_NUM(reg);
7372 uint16_t val;
7373 int rv;
7374
7375 if (wm_get_swfwhw_semaphore(sc)) {
7376 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7377 __func__);
7378 return 0;
7379 }
7380
7381 /* XXX Workaround failure in MDIO access while cable is disconnected */
7382 if (sc->sc_phytype == WMPHY_82577) {
7383 /* XXX must write */
7384 }
7385
7386 /* Page 800 works differently than the rest so it has its own func */
7387 if (page == BM_WUC_PAGE) {
7388 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7389 return val;
7390 }
7391
7392 /*
7393 * Lower than page 768 works differently than the rest so it has its
7394 * own func
7395 */
7396 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7397 printf("gmii_hv_readreg!!!\n");
7398 return 0;
7399 }
7400
7401 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7402 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7403 page << BME1000_PAGE_SHIFT);
7404 }
7405
7406 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7407 wm_put_swfwhw_semaphore(sc);
7408 return rv;
7409 }
7410
7411 /*
7412 * wm_gmii_hv_writereg: [mii interface function]
7413 *
7414 * Write a PHY register on the kumeran.
7415 * This could be handled by the PHY layer if we didn't have to lock the
7416 * ressource ...
7417 */
7418 static void
7419 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7420 {
7421 struct wm_softc *sc = device_private(self);
7422 uint16_t page = BM_PHY_REG_PAGE(reg);
7423 uint16_t regnum = BM_PHY_REG_NUM(reg);
7424
7425 if (wm_get_swfwhw_semaphore(sc)) {
7426 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7427 __func__);
7428 return;
7429 }
7430
7431 /* XXX Workaround failure in MDIO access while cable is disconnected */
7432
7433 /* Page 800 works differently than the rest so it has its own func */
7434 if (page == BM_WUC_PAGE) {
7435 uint16_t tmp;
7436
7437 tmp = val;
7438 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7439 return;
7440 }
7441
7442 /*
7443 * Lower than page 768 works differently than the rest so it has its
7444 * own func
7445 */
7446 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7447 printf("gmii_hv_writereg!!!\n");
7448 return;
7449 }
7450
7451 /*
7452 * XXX Workaround MDIO accesses being disabled after entering IEEE
7453 * Power Down (whenever bit 11 of the PHY control register is set)
7454 */
7455
7456 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7457 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7458 page << BME1000_PAGE_SHIFT);
7459 }
7460
7461 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7462 wm_put_swfwhw_semaphore(sc);
7463 }
7464
7465 /*
7466 * wm_sgmii_uses_mdio
7467 *
7468 * Check whether the transaction is to the internal PHY or the external
7469 * MDIO interface. Return true if it's MDIO.
7470 */
7471 static bool
7472 wm_sgmii_uses_mdio(struct wm_softc *sc)
7473 {
7474 uint32_t reg;
7475 bool ismdio = false;
7476
7477 switch (sc->sc_type) {
7478 case WM_T_82575:
7479 case WM_T_82576:
7480 reg = CSR_READ(sc, WMREG_MDIC);
7481 ismdio = ((reg & MDIC_DEST) != 0);
7482 break;
7483 case WM_T_82580:
7484 case WM_T_82580ER:
7485 case WM_T_I350:
7486 case WM_T_I354:
7487 case WM_T_I210:
7488 case WM_T_I211:
7489 reg = CSR_READ(sc, WMREG_MDICNFG);
7490 ismdio = ((reg & MDICNFG_DEST) != 0);
7491 break;
7492 default:
7493 break;
7494 }
7495
7496 return ismdio;
7497 }
7498
7499 /*
7500 * wm_sgmii_readreg: [mii interface function]
7501 *
7502 * Read a PHY register on the SGMII
7503 * This could be handled by the PHY layer if we didn't have to lock the
7504 * ressource ...
7505 */
7506 static int
7507 wm_sgmii_readreg(device_t self, int phy, int reg)
7508 {
7509 struct wm_softc *sc = device_private(self);
7510 uint32_t i2ccmd;
7511 int i, rv;
7512
7513 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7514 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7515 __func__);
7516 return 0;
7517 }
7518
7519 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7520 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7521 | I2CCMD_OPCODE_READ;
7522 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7523
7524 /* Poll the ready bit */
7525 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7526 delay(50);
7527 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7528 if (i2ccmd & I2CCMD_READY)
7529 break;
7530 }
7531 if ((i2ccmd & I2CCMD_READY) == 0)
7532 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7533 if ((i2ccmd & I2CCMD_ERROR) != 0)
7534 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7535
7536 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7537
7538 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7539 return rv;
7540 }
7541
7542 /*
7543 * wm_sgmii_writereg: [mii interface function]
7544 *
7545 * Write a PHY register on the SGMII.
7546 * This could be handled by the PHY layer if we didn't have to lock the
7547 * ressource ...
7548 */
7549 static void
7550 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7551 {
7552 struct wm_softc *sc = device_private(self);
7553 uint32_t i2ccmd;
7554 int i;
7555
7556 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7557 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7558 __func__);
7559 return;
7560 }
7561
7562 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7563 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7564 | I2CCMD_OPCODE_WRITE;
7565 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7566
7567 /* Poll the ready bit */
7568 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7569 delay(50);
7570 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7571 if (i2ccmd & I2CCMD_READY)
7572 break;
7573 }
7574 if ((i2ccmd & I2CCMD_READY) == 0)
7575 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7576 if ((i2ccmd & I2CCMD_ERROR) != 0)
7577 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7578
7579 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7580 }
7581
7582 /*
7583 * wm_gmii_82580_readreg: [mii interface function]
7584 *
7585 * Read a PHY register on the 82580 and I350.
7586 * This could be handled by the PHY layer if we didn't have to lock the
7587 * ressource ...
7588 */
7589 static int
7590 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7591 {
7592 struct wm_softc *sc = device_private(self);
7593 int sem;
7594 int rv;
7595
7596 sem = swfwphysem[sc->sc_funcid];
7597 if (wm_get_swfw_semaphore(sc, sem)) {
7598 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7599 __func__);
7600 return 0;
7601 }
7602
7603 rv = wm_gmii_i82544_readreg(self, phy, reg);
7604
7605 wm_put_swfw_semaphore(sc, sem);
7606 return rv;
7607 }
7608
7609 /*
7610 * wm_gmii_82580_writereg: [mii interface function]
7611 *
7612 * Write a PHY register on the 82580 and I350.
7613 * This could be handled by the PHY layer if we didn't have to lock the
7614 * ressource ...
7615 */
7616 static void
7617 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7618 {
7619 struct wm_softc *sc = device_private(self);
7620 int sem;
7621
7622 sem = swfwphysem[sc->sc_funcid];
7623 if (wm_get_swfw_semaphore(sc, sem)) {
7624 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7625 __func__);
7626 return;
7627 }
7628
7629 wm_gmii_i82544_writereg(self, phy, reg, val);
7630
7631 wm_put_swfw_semaphore(sc, sem);
7632 }
7633
7634 /*
7635 * wm_gmii_statchg: [mii interface function]
7636 *
7637 * Callback from MII layer when media changes.
7638 */
7639 static void
7640 wm_gmii_statchg(struct ifnet *ifp)
7641 {
7642 struct wm_softc *sc = ifp->if_softc;
7643 struct mii_data *mii = &sc->sc_mii;
7644
7645 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7646 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7647 sc->sc_fcrtl &= ~FCRTL_XONE;
7648
7649 /*
7650 * Get flow control negotiation result.
7651 */
7652 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7653 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7654 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7655 mii->mii_media_active &= ~IFM_ETH_FMASK;
7656 }
7657
7658 if (sc->sc_flowflags & IFM_FLOW) {
7659 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7660 sc->sc_ctrl |= CTRL_TFCE;
7661 sc->sc_fcrtl |= FCRTL_XONE;
7662 }
7663 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7664 sc->sc_ctrl |= CTRL_RFCE;
7665 }
7666
7667 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7668 DPRINTF(WM_DEBUG_LINK,
7669 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7670 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7671 } else {
7672 DPRINTF(WM_DEBUG_LINK,
7673 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7674 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7675 }
7676
7677 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7678 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7679 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7680 : WMREG_FCRTL, sc->sc_fcrtl);
7681 if (sc->sc_type == WM_T_80003) {
7682 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7683 case IFM_1000_T:
7684 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7685 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7686 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7687 break;
7688 default:
7689 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7690 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7691 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7692 break;
7693 }
7694 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7695 }
7696 }
7697
7698 /*
7699 * wm_kmrn_readreg:
7700 *
7701 * Read a kumeran register
7702 */
7703 static int
7704 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7705 {
7706 int rv;
7707
7708 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7709 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7710 aprint_error_dev(sc->sc_dev,
7711 "%s: failed to get semaphore\n", __func__);
7712 return 0;
7713 }
7714 } else if (sc->sc_flags == LOCK_EXTCNF) {
7715 if (wm_get_swfwhw_semaphore(sc)) {
7716 aprint_error_dev(sc->sc_dev,
7717 "%s: failed to get semaphore\n", __func__);
7718 return 0;
7719 }
7720 }
7721
7722 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7723 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7724 KUMCTRLSTA_REN);
7725 CSR_WRITE_FLUSH(sc);
7726 delay(2);
7727
7728 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7729
7730 if (sc->sc_flags == WM_F_LOCK_SWFW)
7731 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7732 else if (sc->sc_flags == LOCK_EXTCNF)
7733 wm_put_swfwhw_semaphore(sc);
7734
7735 return rv;
7736 }
7737
7738 /*
7739 * wm_kmrn_writereg:
7740 *
7741 * Write a kumeran register
7742 */
7743 static void
7744 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7745 {
7746
7747 if (sc->sc_flags == WM_F_LOCK_SWFW) {
7748 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7749 aprint_error_dev(sc->sc_dev,
7750 "%s: failed to get semaphore\n", __func__);
7751 return;
7752 }
7753 } else if (sc->sc_flags == LOCK_EXTCNF) {
7754 if (wm_get_swfwhw_semaphore(sc)) {
7755 aprint_error_dev(sc->sc_dev,
7756 "%s: failed to get semaphore\n", __func__);
7757 return;
7758 }
7759 }
7760
7761 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7762 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7763 (val & KUMCTRLSTA_MASK));
7764
7765 if (sc->sc_flags == WM_F_LOCK_SWFW)
7766 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7767 else if (sc->sc_flags == LOCK_EXTCNF)
7768 wm_put_swfwhw_semaphore(sc);
7769 }
7770
7771 static int
7772 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7773 {
7774 uint32_t eecd = 0;
7775
7776 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7777 || sc->sc_type == WM_T_82583) {
7778 eecd = CSR_READ(sc, WMREG_EECD);
7779
7780 /* Isolate bits 15 & 16 */
7781 eecd = ((eecd >> 15) & 0x03);
7782
7783 /* If both bits are set, device is Flash type */
7784 if (eecd == 0x03)
7785 return 0;
7786 }
7787 return 1;
7788 }
7789
7790 static int
7791 wm_get_swsm_semaphore(struct wm_softc *sc)
7792 {
7793 int32_t timeout;
7794 uint32_t swsm;
7795
7796 /* Get the SW semaphore. */
7797 timeout = 1000 + 1; /* XXX */
7798 while (timeout) {
7799 swsm = CSR_READ(sc, WMREG_SWSM);
7800
7801 if ((swsm & SWSM_SMBI) == 0)
7802 break;
7803
7804 delay(50);
7805 timeout--;
7806 }
7807
7808 if (timeout == 0) {
7809 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
7810 return 1;
7811 }
7812
7813 /* Get the FW semaphore. */
7814 timeout = 1000 + 1; /* XXX */
7815 while (timeout) {
7816 swsm = CSR_READ(sc, WMREG_SWSM);
7817 swsm |= SWSM_SWESMBI;
7818 CSR_WRITE(sc, WMREG_SWSM, swsm);
7819 /* if we managed to set the bit we got the semaphore. */
7820 swsm = CSR_READ(sc, WMREG_SWSM);
7821 if (swsm & SWSM_SWESMBI)
7822 break;
7823
7824 delay(50);
7825 timeout--;
7826 }
7827
7828 if (timeout == 0) {
7829 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
7830 /* Release semaphores */
7831 wm_put_swsm_semaphore(sc);
7832 return 1;
7833 }
7834 return 0;
7835 }
7836
7837 static void
7838 wm_put_swsm_semaphore(struct wm_softc *sc)
7839 {
7840 uint32_t swsm;
7841
7842 swsm = CSR_READ(sc, WMREG_SWSM);
7843 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
7844 CSR_WRITE(sc, WMREG_SWSM, swsm);
7845 }
7846
7847 static int
7848 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7849 {
7850 uint32_t swfw_sync;
7851 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7852 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7853 int timeout = 200;
7854
7855 for (timeout = 0; timeout < 200; timeout++) {
7856 if (sc->sc_flags & WM_F_LOCK_SWSM) {
7857 if (wm_get_swsm_semaphore(sc)) {
7858 aprint_error_dev(sc->sc_dev,
7859 "%s: failed to get semaphore\n",
7860 __func__);
7861 return 1;
7862 }
7863 }
7864 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7865 if ((swfw_sync & (swmask | fwmask)) == 0) {
7866 swfw_sync |= swmask;
7867 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7868 if (sc->sc_flags & WM_F_LOCK_SWSM)
7869 wm_put_swsm_semaphore(sc);
7870 return 0;
7871 }
7872 if (sc->sc_flags & WM_F_LOCK_SWSM)
7873 wm_put_swsm_semaphore(sc);
7874 delay(5000);
7875 }
7876 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7877 device_xname(sc->sc_dev), mask, swfw_sync);
7878 return 1;
7879 }
7880
7881 static void
7882 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7883 {
7884 uint32_t swfw_sync;
7885
7886 if (sc->sc_flags & WM_F_LOCK_SWSM) {
7887 while (wm_get_swsm_semaphore(sc) != 0)
7888 continue;
7889 }
7890 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7891 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7892 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7893 if (sc->sc_flags & WM_F_LOCK_SWSM)
7894 wm_put_swsm_semaphore(sc);
7895 }
7896
7897 static int
7898 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7899 {
7900 uint32_t ext_ctrl;
7901 int timeout = 200;
7902
7903 for (timeout = 0; timeout < 200; timeout++) {
7904 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7905 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7906 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7907
7908 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7909 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7910 return 0;
7911 delay(5000);
7912 }
7913 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7914 device_xname(sc->sc_dev), ext_ctrl);
7915 return 1;
7916 }
7917
7918 static void
7919 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7920 {
7921 uint32_t ext_ctrl;
7922 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7923 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7924 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7925 }
7926
7927 static int
7928 wm_get_hw_semaphore_82573(struct wm_softc *sc)
7929 {
7930 int i = 0;
7931 uint32_t reg;
7932
7933 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7934 do {
7935 CSR_WRITE(sc, WMREG_EXTCNFCTR,
7936 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
7937 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7938 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
7939 break;
7940 delay(2*1000);
7941 i++;
7942 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
7943
7944 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
7945 wm_put_hw_semaphore_82573(sc);
7946 log(LOG_ERR, "%s: Driver can't access the PHY\n",
7947 device_xname(sc->sc_dev));
7948 return -1;
7949 }
7950
7951 return 0;
7952 }
7953
7954 static void
7955 wm_put_hw_semaphore_82573(struct wm_softc *sc)
7956 {
7957 uint32_t reg;
7958
7959 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7960 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
7961 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
7962 }
7963
7964 static int
7965 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7966 {
7967 uint32_t eecd;
7968 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7969 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7970 uint8_t sig_byte = 0;
7971
7972 switch (sc->sc_type) {
7973 case WM_T_ICH8:
7974 case WM_T_ICH9:
7975 eecd = CSR_READ(sc, WMREG_EECD);
7976 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7977 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7978 return 0;
7979 }
7980 /* FALLTHROUGH */
7981 default:
7982 /* Default to 0 */
7983 *bank = 0;
7984
7985 /* Check bank 0 */
7986 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7987 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7988 *bank = 0;
7989 return 0;
7990 }
7991
7992 /* Check bank 1 */
7993 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7994 &sig_byte);
7995 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7996 *bank = 1;
7997 return 0;
7998 }
7999 }
8000
8001 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8002 device_xname(sc->sc_dev)));
8003 return -1;
8004 }
8005
8006 /******************************************************************************
8007 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8008 * register.
8009 *
8010 * sc - Struct containing variables accessed by shared code
8011 * offset - offset of word in the EEPROM to read
8012 * data - word read from the EEPROM
8013 * words - number of words to read
8014 *****************************************************************************/
8015 static int
8016 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8017 {
8018 int32_t error = 0;
8019 uint32_t flash_bank = 0;
8020 uint32_t act_offset = 0;
8021 uint32_t bank_offset = 0;
8022 uint16_t word = 0;
8023 uint16_t i = 0;
8024
8025 /* We need to know which is the valid flash bank. In the event
8026 * that we didn't allocate eeprom_shadow_ram, we may not be
8027 * managing flash_bank. So it cannot be trusted and needs
8028 * to be updated with each read.
8029 */
8030 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
8031 if (error) {
8032 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
8033 __func__);
8034 flash_bank = 0;
8035 }
8036
8037 /*
8038 * Adjust offset appropriately if we're on bank 1 - adjust for word
8039 * size
8040 */
8041 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8042
8043 error = wm_get_swfwhw_semaphore(sc);
8044 if (error) {
8045 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8046 __func__);
8047 return error;
8048 }
8049
8050 for (i = 0; i < words; i++) {
8051 /* The NVM part needs a byte offset, hence * 2 */
8052 act_offset = bank_offset + ((offset + i) * 2);
8053 error = wm_read_ich8_word(sc, act_offset, &word);
8054 if (error) {
8055 aprint_error_dev(sc->sc_dev,
8056 "%s: failed to read NVM\n", __func__);
8057 break;
8058 }
8059 data[i] = word;
8060 }
8061
8062 wm_put_swfwhw_semaphore(sc);
8063 return error;
8064 }
8065
8066 /******************************************************************************
8067 * This function does initial flash setup so that a new read/write/erase cycle
8068 * can be started.
8069 *
8070 * sc - The pointer to the hw structure
8071 ****************************************************************************/
8072 static int32_t
8073 wm_ich8_cycle_init(struct wm_softc *sc)
8074 {
8075 uint16_t hsfsts;
8076 int32_t error = 1;
8077 int32_t i = 0;
8078
8079 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8080
8081 /* May be check the Flash Des Valid bit in Hw status */
8082 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8083 return error;
8084 }
8085
8086 /* Clear FCERR in Hw status by writing 1 */
8087 /* Clear DAEL in Hw status by writing a 1 */
8088 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8089
8090 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8091
8092 /*
8093 * Either we should have a hardware SPI cycle in progress bit to check
8094 * against, in order to start a new cycle or FDONE bit should be
8095 * changed in the hardware so that it is 1 after harware reset, which
8096 * can then be used as an indication whether a cycle is in progress or
8097 * has been completed .. we should also have some software semaphore
8098 * mechanism to guard FDONE or the cycle in progress bit so that two
8099 * threads access to those bits can be sequentiallized or a way so that
8100 * 2 threads dont start the cycle at the same time
8101 */
8102
8103 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8104 /*
8105 * There is no cycle running at present, so we can start a
8106 * cycle
8107 */
8108
8109 /* Begin by setting Flash Cycle Done. */
8110 hsfsts |= HSFSTS_DONE;
8111 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8112 error = 0;
8113 } else {
8114 /*
8115 * otherwise poll for sometime so the current cycle has a
8116 * chance to end before giving up.
8117 */
8118 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8119 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8120 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8121 error = 0;
8122 break;
8123 }
8124 delay(1);
8125 }
8126 if (error == 0) {
8127 /*
8128 * Successful in waiting for previous cycle to timeout,
8129 * now set the Flash Cycle Done.
8130 */
8131 hsfsts |= HSFSTS_DONE;
8132 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8133 }
8134 }
8135 return error;
8136 }
8137
8138 /******************************************************************************
8139 * This function starts a flash cycle and waits for its completion
8140 *
8141 * sc - The pointer to the hw structure
8142 ****************************************************************************/
8143 static int32_t
8144 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8145 {
8146 uint16_t hsflctl;
8147 uint16_t hsfsts;
8148 int32_t error = 1;
8149 uint32_t i = 0;
8150
8151 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8152 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8153 hsflctl |= HSFCTL_GO;
8154 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8155
8156 /* wait till FDONE bit is set to 1 */
8157 do {
8158 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8159 if (hsfsts & HSFSTS_DONE)
8160 break;
8161 delay(1);
8162 i++;
8163 } while (i < timeout);
8164 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8165 error = 0;
8166
8167 return error;
8168 }
8169
8170 /******************************************************************************
8171 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8172 *
8173 * sc - The pointer to the hw structure
8174 * index - The index of the byte or word to read.
8175 * size - Size of data to read, 1=byte 2=word
8176 * data - Pointer to the word to store the value read.
8177 *****************************************************************************/
8178 static int32_t
8179 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8180 uint32_t size, uint16_t* data)
8181 {
8182 uint16_t hsfsts;
8183 uint16_t hsflctl;
8184 uint32_t flash_linear_address;
8185 uint32_t flash_data = 0;
8186 int32_t error = 1;
8187 int32_t count = 0;
8188
8189 if (size < 1 || size > 2 || data == 0x0 ||
8190 index > ICH_FLASH_LINEAR_ADDR_MASK)
8191 return error;
8192
8193 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8194 sc->sc_ich8_flash_base;
8195
8196 do {
8197 delay(1);
8198 /* Steps */
8199 error = wm_ich8_cycle_init(sc);
8200 if (error)
8201 break;
8202
8203 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8204 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8205 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8206 & HSFCTL_BCOUNT_MASK;
8207 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8208 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8209
8210 /*
8211 * Write the last 24 bits of index into Flash Linear address
8212 * field in Flash Address
8213 */
8214 /* TODO: TBD maybe check the index against the size of flash */
8215
8216 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8217
8218 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8219
8220 /*
8221 * Check if FCERR is set to 1, if set to 1, clear it and try
8222 * the whole sequence a few more times, else read in (shift in)
8223 * the Flash Data0, the order is least significant byte first
8224 * msb to lsb
8225 */
8226 if (error == 0) {
8227 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8228 if (size == 1)
8229 *data = (uint8_t)(flash_data & 0x000000FF);
8230 else if (size == 2)
8231 *data = (uint16_t)(flash_data & 0x0000FFFF);
8232 break;
8233 } else {
8234 /*
8235 * If we've gotten here, then things are probably
8236 * completely hosed, but if the error condition is
8237 * detected, it won't hurt to give it another try...
8238 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8239 */
8240 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8241 if (hsfsts & HSFSTS_ERR) {
8242 /* Repeat for some time before giving up. */
8243 continue;
8244 } else if ((hsfsts & HSFSTS_DONE) == 0)
8245 break;
8246 }
8247 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8248
8249 return error;
8250 }
8251
8252 /******************************************************************************
8253 * Reads a single byte from the NVM using the ICH8 flash access registers.
8254 *
8255 * sc - pointer to wm_hw structure
8256 * index - The index of the byte to read.
8257 * data - Pointer to a byte to store the value read.
8258 *****************************************************************************/
8259 static int32_t
8260 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8261 {
8262 int32_t status;
8263 uint16_t word = 0;
8264
8265 status = wm_read_ich8_data(sc, index, 1, &word);
8266 if (status == 0)
8267 *data = (uint8_t)word;
8268 else
8269 *data = 0;
8270
8271 return status;
8272 }
8273
8274 /******************************************************************************
8275 * Reads a word from the NVM using the ICH8 flash access registers.
8276 *
8277 * sc - pointer to wm_hw structure
8278 * index - The starting byte index of the word to read.
8279 * data - Pointer to a word to store the value read.
8280 *****************************************************************************/
8281 static int32_t
8282 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8283 {
8284 int32_t status;
8285
8286 status = wm_read_ich8_data(sc, index, 2, data);
8287 return status;
8288 }
8289
8290 static int
8291 wm_check_mng_mode(struct wm_softc *sc)
8292 {
8293 int rv;
8294
8295 switch (sc->sc_type) {
8296 case WM_T_ICH8:
8297 case WM_T_ICH9:
8298 case WM_T_ICH10:
8299 case WM_T_PCH:
8300 case WM_T_PCH2:
8301 case WM_T_PCH_LPT:
8302 rv = wm_check_mng_mode_ich8lan(sc);
8303 break;
8304 case WM_T_82574:
8305 case WM_T_82583:
8306 rv = wm_check_mng_mode_82574(sc);
8307 break;
8308 case WM_T_82571:
8309 case WM_T_82572:
8310 case WM_T_82573:
8311 case WM_T_80003:
8312 rv = wm_check_mng_mode_generic(sc);
8313 break;
8314 default:
8315 /* noting to do */
8316 rv = 0;
8317 break;
8318 }
8319
8320 return rv;
8321 }
8322
8323 static int
8324 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8325 {
8326 uint32_t fwsm;
8327
8328 fwsm = CSR_READ(sc, WMREG_FWSM);
8329
8330 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8331 return 1;
8332
8333 return 0;
8334 }
8335
8336 static int
8337 wm_check_mng_mode_82574(struct wm_softc *sc)
8338 {
8339 uint16_t data;
8340
8341 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8342
8343 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8344 return 1;
8345
8346 return 0;
8347 }
8348
8349 static int
8350 wm_check_mng_mode_generic(struct wm_softc *sc)
8351 {
8352 uint32_t fwsm;
8353
8354 fwsm = CSR_READ(sc, WMREG_FWSM);
8355
8356 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8357 return 1;
8358
8359 return 0;
8360 }
8361
8362 static int
8363 wm_enable_mng_pass_thru(struct wm_softc *sc)
8364 {
8365 uint32_t manc, fwsm, factps;
8366
8367 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8368 return 0;
8369
8370 manc = CSR_READ(sc, WMREG_MANC);
8371
8372 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8373 device_xname(sc->sc_dev), manc));
8374 if ((manc & MANC_RECV_TCO_EN) == 0)
8375 return 0;
8376
8377 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8378 fwsm = CSR_READ(sc, WMREG_FWSM);
8379 factps = CSR_READ(sc, WMREG_FACTPS);
8380 if (((factps & FACTPS_MNGCG) == 0)
8381 && ((fwsm & FWSM_MODE_MASK)
8382 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8383 return 1;
8384 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8385 uint16_t data;
8386
8387 factps = CSR_READ(sc, WMREG_FACTPS);
8388 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8389 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8390 device_xname(sc->sc_dev), factps, data));
8391 if (((factps & FACTPS_MNGCG) == 0)
8392 && ((data & EEPROM_CFG2_MNGM_MASK)
8393 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8394 return 1;
8395 } else if (((manc & MANC_SMBUS_EN) != 0)
8396 && ((manc & MANC_ASF_EN) == 0))
8397 return 1;
8398
8399 return 0;
8400 }
8401
8402 static int
8403 wm_check_reset_block(struct wm_softc *sc)
8404 {
8405 uint32_t reg;
8406
8407 switch (sc->sc_type) {
8408 case WM_T_ICH8:
8409 case WM_T_ICH9:
8410 case WM_T_ICH10:
8411 case WM_T_PCH:
8412 case WM_T_PCH2:
8413 case WM_T_PCH_LPT:
8414 reg = CSR_READ(sc, WMREG_FWSM);
8415 if ((reg & FWSM_RSPCIPHY) != 0)
8416 return 0;
8417 else
8418 return -1;
8419 break;
8420 case WM_T_82571:
8421 case WM_T_82572:
8422 case WM_T_82573:
8423 case WM_T_82574:
8424 case WM_T_82583:
8425 case WM_T_80003:
8426 reg = CSR_READ(sc, WMREG_MANC);
8427 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8428 return -1;
8429 else
8430 return 0;
8431 break;
8432 default:
8433 /* no problem */
8434 break;
8435 }
8436
8437 return 0;
8438 }
8439
8440 static void
8441 wm_get_hw_control(struct wm_softc *sc)
8442 {
8443 uint32_t reg;
8444
8445 switch (sc->sc_type) {
8446 case WM_T_82573:
8447 reg = CSR_READ(sc, WMREG_SWSM);
8448 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8449 break;
8450 case WM_T_82571:
8451 case WM_T_82572:
8452 case WM_T_82574:
8453 case WM_T_82583:
8454 case WM_T_80003:
8455 case WM_T_ICH8:
8456 case WM_T_ICH9:
8457 case WM_T_ICH10:
8458 case WM_T_PCH:
8459 case WM_T_PCH2:
8460 case WM_T_PCH_LPT:
8461 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8462 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8463 break;
8464 default:
8465 break;
8466 }
8467 }
8468
8469 static void
8470 wm_release_hw_control(struct wm_softc *sc)
8471 {
8472 uint32_t reg;
8473
8474 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8475 return;
8476
8477 if (sc->sc_type == WM_T_82573) {
8478 reg = CSR_READ(sc, WMREG_SWSM);
8479 reg &= ~SWSM_DRV_LOAD;
8480 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8481 } else {
8482 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8483 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8484 }
8485 }
8486
8487 /* XXX Currently TBI only */
8488 static int
8489 wm_check_for_link(struct wm_softc *sc)
8490 {
8491 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8492 uint32_t rxcw;
8493 uint32_t ctrl;
8494 uint32_t status;
8495 uint32_t sig;
8496
8497 rxcw = CSR_READ(sc, WMREG_RXCW);
8498 ctrl = CSR_READ(sc, WMREG_CTRL);
8499 status = CSR_READ(sc, WMREG_STATUS);
8500
8501 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8502
8503 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8504 device_xname(sc->sc_dev), __func__,
8505 ((ctrl & CTRL_SWDPIN(1)) == sig),
8506 ((status & STATUS_LU) != 0),
8507 ((rxcw & RXCW_C) != 0)
8508 ));
8509
8510 /*
8511 * SWDPIN LU RXCW
8512 * 0 0 0
8513 * 0 0 1 (should not happen)
8514 * 0 1 0 (should not happen)
8515 * 0 1 1 (should not happen)
8516 * 1 0 0 Disable autonego and force linkup
8517 * 1 0 1 got /C/ but not linkup yet
8518 * 1 1 0 (linkup)
8519 * 1 1 1 If IFM_AUTO, back to autonego
8520 *
8521 */
8522 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8523 && ((status & STATUS_LU) == 0)
8524 && ((rxcw & RXCW_C) == 0)) {
8525 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8526 __func__));
8527 sc->sc_tbi_linkup = 0;
8528 /* Disable auto-negotiation in the TXCW register */
8529 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8530
8531 /*
8532 * Force link-up and also force full-duplex.
8533 *
8534 * NOTE: CTRL was updated TFCE and RFCE automatically,
8535 * so we should update sc->sc_ctrl
8536 */
8537 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8538 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8539 } else if (((status & STATUS_LU) != 0)
8540 && ((rxcw & RXCW_C) != 0)
8541 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8542 sc->sc_tbi_linkup = 1;
8543 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8544 __func__));
8545 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8546 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8547 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8548 && ((rxcw & RXCW_C) != 0)) {
8549 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8550 } else {
8551 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8552 status));
8553 }
8554
8555 return 0;
8556 }
8557
8558 /* Work-around for 82566 Kumeran PCS lock loss */
8559 static void
8560 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8561 {
8562 int miistatus, active, i;
8563 int reg;
8564
8565 miistatus = sc->sc_mii.mii_media_status;
8566
8567 /* If the link is not up, do nothing */
8568 if ((miistatus & IFM_ACTIVE) != 0)
8569 return;
8570
8571 active = sc->sc_mii.mii_media_active;
8572
8573 /* Nothing to do if the link is other than 1Gbps */
8574 if (IFM_SUBTYPE(active) != IFM_1000_T)
8575 return;
8576
8577 for (i = 0; i < 10; i++) {
8578 /* read twice */
8579 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8580 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8581 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8582 goto out; /* GOOD! */
8583
8584 /* Reset the PHY */
8585 wm_gmii_reset(sc);
8586 delay(5*1000);
8587 }
8588
8589 /* Disable GigE link negotiation */
8590 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8591 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8592 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8593
8594 /*
8595 * Call gig speed drop workaround on Gig disable before accessing
8596 * any PHY registers.
8597 */
8598 wm_gig_downshift_workaround_ich8lan(sc);
8599
8600 out:
8601 return;
8602 }
8603
8604 /* WOL from S5 stops working */
8605 static void
8606 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8607 {
8608 uint16_t kmrn_reg;
8609
8610 /* Only for igp3 */
8611 if (sc->sc_phytype == WMPHY_IGP_3) {
8612 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8613 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8614 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8615 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8616 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8617 }
8618 }
8619
8620 #ifdef WM_WOL
8621 /* Power down workaround on D3 */
8622 static void
8623 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8624 {
8625 uint32_t reg;
8626 int i;
8627
8628 for (i = 0; i < 2; i++) {
8629 /* Disable link */
8630 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8631 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8632 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8633
8634 /*
8635 * Call gig speed drop workaround on Gig disable before
8636 * accessing any PHY registers
8637 */
8638 if (sc->sc_type == WM_T_ICH8)
8639 wm_gig_downshift_workaround_ich8lan(sc);
8640
8641 /* Write VR power-down enable */
8642 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8643 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8644 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8645 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8646
8647 /* Read it back and test */
8648 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8649 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8650 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8651 break;
8652
8653 /* Issue PHY reset and repeat at most one more time */
8654 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8655 }
8656 }
8657 #endif /* WM_WOL */
8658
8659 /*
8660 * Workaround for pch's PHYs
8661 * XXX should be moved to new PHY driver?
8662 */
8663 static void
8664 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8665 {
8666 if (sc->sc_phytype == WMPHY_82577)
8667 wm_set_mdio_slow_mode_hv(sc);
8668
8669 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8670
8671 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8672
8673 /* 82578 */
8674 if (sc->sc_phytype == WMPHY_82578) {
8675 /* PCH rev. < 3 */
8676 if (sc->sc_rev < 3) {
8677 /* XXX 6 bit shift? Why? Is it page2? */
8678 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8679 0x66c0);
8680 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8681 0xffff);
8682 }
8683
8684 /* XXX phy rev. < 2 */
8685 }
8686
8687 /* Select page 0 */
8688
8689 /* XXX acquire semaphore */
8690 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8691 /* XXX release semaphore */
8692
8693 /*
8694 * Configure the K1 Si workaround during phy reset assuming there is
8695 * link so that it disables K1 if link is in 1Gbps.
8696 */
8697 wm_k1_gig_workaround_hv(sc, 1);
8698 }
8699
8700 static void
8701 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8702 {
8703
8704 wm_set_mdio_slow_mode_hv(sc);
8705 }
8706
8707 static void
8708 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8709 {
8710 int k1_enable = sc->sc_nvm_k1_enabled;
8711
8712 /* XXX acquire semaphore */
8713
8714 if (link) {
8715 k1_enable = 0;
8716
8717 /* Link stall fix for link up */
8718 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8719 } else {
8720 /* Link stall fix for link down */
8721 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8722 }
8723
8724 wm_configure_k1_ich8lan(sc, k1_enable);
8725
8726 /* XXX release semaphore */
8727 }
8728
8729 static void
8730 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8731 {
8732 uint32_t reg;
8733
8734 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8735 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8736 reg | HV_KMRN_MDIO_SLOW);
8737 }
8738
8739 static void
8740 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8741 {
8742 uint32_t ctrl, ctrl_ext, tmp;
8743 uint16_t kmrn_reg;
8744
8745 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8746
8747 if (k1_enable)
8748 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8749 else
8750 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8751
8752 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8753
8754 delay(20);
8755
8756 ctrl = CSR_READ(sc, WMREG_CTRL);
8757 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8758
8759 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8760 tmp |= CTRL_FRCSPD;
8761
8762 CSR_WRITE(sc, WMREG_CTRL, tmp);
8763 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8764 CSR_WRITE_FLUSH(sc);
8765 delay(20);
8766
8767 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8768 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8769 CSR_WRITE_FLUSH(sc);
8770 delay(20);
8771 }
8772
8773 static void
8774 wm_smbustopci(struct wm_softc *sc)
8775 {
8776 uint32_t fwsm;
8777
8778 fwsm = CSR_READ(sc, WMREG_FWSM);
8779 if (((fwsm & FWSM_FW_VALID) == 0)
8780 && ((wm_check_reset_block(sc) == 0))) {
8781 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8782 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8783 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8784 CSR_WRITE_FLUSH(sc);
8785 delay(10);
8786 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8787 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8788 CSR_WRITE_FLUSH(sc);
8789 delay(50*1000);
8790
8791 /*
8792 * Gate automatic PHY configuration by hardware on non-managed
8793 * 82579
8794 */
8795 if (sc->sc_type == WM_T_PCH2)
8796 wm_gate_hw_phy_config_ich8lan(sc, 1);
8797 }
8798 }
8799
8800 static void
8801 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8802 {
8803 uint32_t gcr;
8804 pcireg_t ctrl2;
8805
8806 gcr = CSR_READ(sc, WMREG_GCR);
8807
8808 /* Only take action if timeout value is defaulted to 0 */
8809 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8810 goto out;
8811
8812 if ((gcr & GCR_CAP_VER2) == 0) {
8813 gcr |= GCR_CMPL_TMOUT_10MS;
8814 goto out;
8815 }
8816
8817 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8818 sc->sc_pcixe_capoff + PCIE_DCSR2);
8819 ctrl2 |= WM_PCIE_DCSR2_16MS;
8820 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8821 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8822
8823 out:
8824 /* Disable completion timeout resend */
8825 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8826
8827 CSR_WRITE(sc, WMREG_GCR, gcr);
8828 }
8829
8830 /* special case - for 82575 - need to do manual init ... */
8831 static void
8832 wm_reset_init_script_82575(struct wm_softc *sc)
8833 {
8834 /*
8835 * remark: this is untested code - we have no board without EEPROM
8836 * same setup as mentioned int the freeBSD driver for the i82575
8837 */
8838
8839 /* SerDes configuration via SERDESCTRL */
8840 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8841 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8842 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8843 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8844
8845 /* CCM configuration via CCMCTL register */
8846 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8847 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8848
8849 /* PCIe lanes configuration */
8850 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8851 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8852 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8853 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8854
8855 /* PCIe PLL Configuration */
8856 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8857 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8858 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8859 }
8860
8861 static void
8862 wm_init_manageability(struct wm_softc *sc)
8863 {
8864
8865 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8866 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8867 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8868
8869 /* disabl hardware interception of ARP */
8870 manc &= ~MANC_ARP_EN;
8871
8872 /* enable receiving management packets to the host */
8873 if (sc->sc_type >= WM_T_82571) {
8874 manc |= MANC_EN_MNG2HOST;
8875 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8876 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8877
8878 }
8879
8880 CSR_WRITE(sc, WMREG_MANC, manc);
8881 }
8882 }
8883
8884 static void
8885 wm_release_manageability(struct wm_softc *sc)
8886 {
8887
8888 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8889 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8890
8891 manc |= MANC_ARP_EN;
8892 if (sc->sc_type >= WM_T_82571)
8893 manc &= ~MANC_EN_MNG2HOST;
8894
8895 CSR_WRITE(sc, WMREG_MANC, manc);
8896 }
8897 }
8898
8899 static void
8900 wm_get_wakeup(struct wm_softc *sc)
8901 {
8902
8903 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8904 switch (sc->sc_type) {
8905 case WM_T_82573:
8906 case WM_T_82583:
8907 sc->sc_flags |= WM_F_HAS_AMT;
8908 /* FALLTHROUGH */
8909 case WM_T_80003:
8910 case WM_T_82541:
8911 case WM_T_82547:
8912 case WM_T_82571:
8913 case WM_T_82572:
8914 case WM_T_82574:
8915 case WM_T_82575:
8916 case WM_T_82576:
8917 case WM_T_82580:
8918 case WM_T_82580ER:
8919 case WM_T_I350:
8920 case WM_T_I354:
8921 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8922 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8923 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8924 break;
8925 case WM_T_ICH8:
8926 case WM_T_ICH9:
8927 case WM_T_ICH10:
8928 case WM_T_PCH:
8929 case WM_T_PCH2:
8930 case WM_T_PCH_LPT:
8931 sc->sc_flags |= WM_F_HAS_AMT;
8932 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8933 break;
8934 default:
8935 break;
8936 }
8937
8938 /* 1: HAS_MANAGE */
8939 if (wm_enable_mng_pass_thru(sc) != 0)
8940 sc->sc_flags |= WM_F_HAS_MANAGE;
8941
8942 #ifdef WM_DEBUG
8943 printf("\n");
8944 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8945 printf("HAS_AMT,");
8946 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8947 printf("ARC_SUBSYS_VALID,");
8948 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8949 printf("ASF_FIRMWARE_PRES,");
8950 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8951 printf("HAS_MANAGE,");
8952 printf("\n");
8953 #endif
8954 /*
8955 * Note that the WOL flags is set after the resetting of the eeprom
8956 * stuff
8957 */
8958 }
8959
8960 #ifdef WM_WOL
8961 /* WOL in the newer chipset interfaces (pchlan) */
8962 static void
8963 wm_enable_phy_wakeup(struct wm_softc *sc)
8964 {
8965 #if 0
8966 uint16_t preg;
8967
8968 /* Copy MAC RARs to PHY RARs */
8969
8970 /* Copy MAC MTA to PHY MTA */
8971
8972 /* Configure PHY Rx Control register */
8973
8974 /* Enable PHY wakeup in MAC register */
8975
8976 /* Configure and enable PHY wakeup in PHY registers */
8977
8978 /* Activate PHY wakeup */
8979
8980 /* XXX */
8981 #endif
8982 }
8983
8984 static void
8985 wm_enable_wakeup(struct wm_softc *sc)
8986 {
8987 uint32_t reg, pmreg;
8988 pcireg_t pmode;
8989
8990 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8991 &pmreg, NULL) == 0)
8992 return;
8993
8994 /* Advertise the wakeup capability */
8995 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8996 | CTRL_SWDPIN(3));
8997 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8998
8999 /* ICH workaround */
9000 switch (sc->sc_type) {
9001 case WM_T_ICH8:
9002 case WM_T_ICH9:
9003 case WM_T_ICH10:
9004 case WM_T_PCH:
9005 case WM_T_PCH2:
9006 case WM_T_PCH_LPT:
9007 /* Disable gig during WOL */
9008 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9009 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9010 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9011 if (sc->sc_type == WM_T_PCH)
9012 wm_gmii_reset(sc);
9013
9014 /* Power down workaround */
9015 if (sc->sc_phytype == WMPHY_82577) {
9016 struct mii_softc *child;
9017
9018 /* Assume that the PHY is copper */
9019 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9020 if (child->mii_mpd_rev <= 2)
9021 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9022 (768 << 5) | 25, 0x0444); /* magic num */
9023 }
9024 break;
9025 default:
9026 break;
9027 }
9028
9029 /* Keep the laser running on fiber adapters */
9030 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
9031 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
9032 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9033 reg |= CTRL_EXT_SWDPIN(3);
9034 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9035 }
9036
9037 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9038 #if 0 /* for the multicast packet */
9039 reg |= WUFC_MC;
9040 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9041 #endif
9042
9043 if (sc->sc_type == WM_T_PCH) {
9044 wm_enable_phy_wakeup(sc);
9045 } else {
9046 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9047 CSR_WRITE(sc, WMREG_WUFC, reg);
9048 }
9049
9050 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9051 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9052 || (sc->sc_type == WM_T_PCH2))
9053 && (sc->sc_phytype == WMPHY_IGP_3))
9054 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9055
9056 /* Request PME */
9057 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9058 #if 0
9059 /* Disable WOL */
9060 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9061 #else
9062 /* For WOL */
9063 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9064 #endif
9065 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9066 }
9067 #endif /* WM_WOL */
9068
9069 static bool
9070 wm_suspend(device_t self, const pmf_qual_t *qual)
9071 {
9072 struct wm_softc *sc = device_private(self);
9073
9074 wm_release_manageability(sc);
9075 wm_release_hw_control(sc);
9076 #ifdef WM_WOL
9077 wm_enable_wakeup(sc);
9078 #endif
9079
9080 return true;
9081 }
9082
9083 static bool
9084 wm_resume(device_t self, const pmf_qual_t *qual)
9085 {
9086 struct wm_softc *sc = device_private(self);
9087
9088 wm_init_manageability(sc);
9089
9090 return true;
9091 }
9092
9093 static void
9094 wm_set_eee_i350(struct wm_softc * sc)
9095 {
9096 uint32_t ipcnfg, eeer;
9097
9098 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9099 eeer = CSR_READ(sc, WMREG_EEER);
9100
9101 if ((sc->sc_flags & WM_F_EEE) != 0) {
9102 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9103 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9104 | EEER_LPI_FC);
9105 } else {
9106 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9107 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9108 | EEER_LPI_FC);
9109 }
9110
9111 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9112 CSR_WRITE(sc, WMREG_EEER, eeer);
9113 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9114 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9115 }
9116