if_wm.c revision 1.267 1 /* $NetBSD: if_wm.c,v 1.267 2014/03/25 16:19:13 christos Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.267 2014/03/25 16:19:13 christos Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 /*
145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155 #define WM_NTXSEGS 256
156 #define WM_IFQUEUELEN 256
157 #define WM_TXQUEUELEN_MAX 64
158 #define WM_TXQUEUELEN_MAX_82547 16
159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162 #define WM_NTXDESC_82542 256
163 #define WM_NTXDESC_82544 4096
164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171
172 /*
173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178 #define WM_NRXDESC 256
179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182
183 /*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188 struct wm_control_data_82544 {
189 /*
190 * The receive descriptors.
191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194 /*
195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them.
197 */
198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u;
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257
258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */
260
261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak;
275
276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */
278
279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled;
283
284 /*
285 * Software state for the transmit and receive descriptors.
286 */
287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291 /*
292 * Control data structures.
293 */
294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 krndsource_t rnd_source; /* random source */
383 };
384
385 #define WM_RXCHAIN_RESET(sc) \
386 do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \
390 } while (/*CONSTCOND*/0)
391
392 #define WM_RXCHAIN_LINK(sc, m) \
393 do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \
396 } while (/*CONSTCOND*/0)
397
398 #ifdef WM_EVENT_COUNTERS
399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401 #else
402 #define WM_EVCNT_INCR(ev) /* nothing */
403 #define WM_EVCNT_ADD(ev, val) /* nothing */
404 #endif
405
406 #define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS)
412
413 #define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436 #define WM_CDTXSYNC(sc, x, n, ops) \
437 do { \
438 int __x, __n; \
439 \
440 __x = (x); \
441 __n = (n); \
442 \
443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \
450 } \
451 \
452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455 } while (/*CONSTCOND*/0)
456
457 #define WM_CDRXSYNC(sc, x, ops) \
458 do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_INIT_RXDESC(sc, x) \
464 do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \
469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \
473 * \
474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \
482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495 } while (/*CONSTCOND*/0)
496
497 static void wm_start(struct ifnet *);
498 static void wm_nq_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ifflags_cb(struct ethercom *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_check_alt_mac_addr(struct wm_softc *);
514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void wm_tick(void *);
516
517 static void wm_set_filter(struct wm_softc *);
518 static void wm_set_vlan(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_gmii_82580_readreg(device_t, int, int);
545 static void wm_gmii_82580_writereg(device_t, int, int, int);
546 static bool wm_sgmii_uses_mdio(struct wm_softc *);
547 static int wm_sgmii_readreg(device_t, int, int);
548 static void wm_sgmii_writereg(device_t, int, int, int);
549
550 static void wm_gmii_statchg(struct ifnet *);
551
552 static int wm_get_phy_id_82575(struct wm_softc *);
553 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
554 static int wm_gmii_mediachange(struct ifnet *);
555 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
556
557 static int wm_kmrn_readreg(struct wm_softc *, int);
558 static void wm_kmrn_writereg(struct wm_softc *, int, int);
559
560 static void wm_set_spiaddrbits(struct wm_softc *);
561 static int wm_match(device_t, cfdata_t, void *);
562 static void wm_attach(device_t, device_t, void *);
563 static int wm_detach(device_t, int);
564 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
565 static void wm_get_auto_rd_done(struct wm_softc *);
566 static void wm_lan_init_done(struct wm_softc *);
567 static void wm_get_cfg_done(struct wm_softc *);
568 static int wm_get_swsm_semaphore(struct wm_softc *);
569 static void wm_put_swsm_semaphore(struct wm_softc *);
570 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
571 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
572 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
573 static int wm_get_swfwhw_semaphore(struct wm_softc *);
574 static void wm_put_swfwhw_semaphore(struct wm_softc *);
575 static int wm_get_hw_semaphore_82573(struct wm_softc *);
576 static void wm_put_hw_semaphore_82573(struct wm_softc *);
577
578 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
579 static int32_t wm_ich8_cycle_init(struct wm_softc *);
580 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
581 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
582 uint32_t, uint16_t *);
583 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
584 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
585 static void wm_82547_txfifo_stall(void *);
586 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
587 static int wm_check_mng_mode(struct wm_softc *);
588 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
589 static int wm_check_mng_mode_82574(struct wm_softc *);
590 static int wm_check_mng_mode_generic(struct wm_softc *);
591 static int wm_enable_mng_pass_thru(struct wm_softc *);
592 static int wm_check_reset_block(struct wm_softc *);
593 static void wm_get_hw_control(struct wm_softc *);
594 static int wm_check_for_link(struct wm_softc *);
595 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
596 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
597 #ifdef WM_WOL
598 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
599 #endif
600 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
601 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
602 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
603 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
604 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
605 static void wm_smbustopci(struct wm_softc *);
606 static void wm_set_pcie_completion_timeout(struct wm_softc *);
607 static void wm_reset_init_script_82575(struct wm_softc *);
608 static void wm_release_manageability(struct wm_softc *);
609 static void wm_release_hw_control(struct wm_softc *);
610 static void wm_get_wakeup(struct wm_softc *);
611 #ifdef WM_WOL
612 static void wm_enable_phy_wakeup(struct wm_softc *);
613 static void wm_enable_wakeup(struct wm_softc *);
614 #endif
615 static void wm_init_manageability(struct wm_softc *);
616 static void wm_set_eee_i350(struct wm_softc *);
617
618 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
619 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
620
621 /*
622 * Devices supported by this driver.
623 */
624 static const struct wm_product {
625 pci_vendor_id_t wmp_vendor;
626 pci_product_id_t wmp_product;
627 const char *wmp_name;
628 wm_chip_type wmp_type;
629 int wmp_flags;
630 #define WMP_F_1000X 0x01
631 #define WMP_F_1000T 0x02
632 #define WMP_F_SERDES 0x04
633 } wm_products[] = {
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
635 "Intel i82542 1000BASE-X Ethernet",
636 WM_T_82542_2_1, WMP_F_1000X },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
639 "Intel i82543GC 1000BASE-X Ethernet",
640 WM_T_82543, WMP_F_1000X },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
643 "Intel i82543GC 1000BASE-T Ethernet",
644 WM_T_82543, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
647 "Intel i82544EI 1000BASE-T Ethernet",
648 WM_T_82544, WMP_F_1000T },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
651 "Intel i82544EI 1000BASE-X Ethernet",
652 WM_T_82544, WMP_F_1000X },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
655 "Intel i82544GC 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
659 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
660 WM_T_82544, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
663 "Intel i82540EM 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
667 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
679 "Intel i82540EP 1000BASE-T Ethernet",
680 WM_T_82540, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
683 "Intel i82545EM 1000BASE-T Ethernet",
684 WM_T_82545, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
687 "Intel i82545GM 1000BASE-T Ethernet",
688 WM_T_82545_3, WMP_F_1000T },
689
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
691 "Intel i82545GM 1000BASE-X Ethernet",
692 WM_T_82545_3, WMP_F_1000X },
693 #if 0
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
695 "Intel i82545GM Gigabit Ethernet (SERDES)",
696 WM_T_82545_3, WMP_F_SERDES },
697 #endif
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
703 "Intel i82546EB 1000BASE-T Ethernet",
704 WM_T_82546, WMP_F_1000T },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
707 "Intel i82545EM 1000BASE-X Ethernet",
708 WM_T_82545, WMP_F_1000X },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
711 "Intel i82546EB 1000BASE-X Ethernet",
712 WM_T_82546, WMP_F_1000X },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
715 "Intel i82546GB 1000BASE-T Ethernet",
716 WM_T_82546_3, WMP_F_1000T },
717
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
719 "Intel i82546GB 1000BASE-X Ethernet",
720 WM_T_82546_3, WMP_F_1000X },
721 #if 0
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
723 "Intel i82546GB Gigabit Ethernet (SERDES)",
724 WM_T_82546_3, WMP_F_SERDES },
725 #endif
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
727 "i82546GB quad-port Gigabit Ethernet",
728 WM_T_82546_3, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
731 "i82546GB quad-port Gigabit Ethernet (KSP3)",
732 WM_T_82546_3, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
735 "Intel PRO/1000MT (82546GB)",
736 WM_T_82546_3, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
739 "Intel i82541EI 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
743 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
747 "Intel i82541EI Mobile 1000BASE-T Ethernet",
748 WM_T_82541, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
751 "Intel i82541ER 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
755 "Intel i82541GI 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
759 "Intel i82541GI Mobile 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
763 "Intel i82541PI 1000BASE-T Ethernet",
764 WM_T_82541_2, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
767 "Intel i82547EI 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
771 "Intel i82547EI Mobile 1000BASE-T Ethernet",
772 WM_T_82547, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
775 "Intel i82547GI 1000BASE-T Ethernet",
776 WM_T_82547_2, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
779 "Intel PRO/1000 PT (82571EB)",
780 WM_T_82571, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
783 "Intel PRO/1000 PF (82571EB)",
784 WM_T_82571, WMP_F_1000X },
785 #if 0
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
787 "Intel PRO/1000 PB (82571EB)",
788 WM_T_82571, WMP_F_SERDES },
789 #endif
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
791 "Intel PRO/1000 QT (82571EB)",
792 WM_T_82571, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
795 "Intel i82572EI 1000baseT Ethernet",
796 WM_T_82572, WMP_F_1000T },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
799 "Intel PRO/1000 PT Quad Port Server Adapter",
800 WM_T_82571, WMP_F_1000T, },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
803 "Intel i82572EI 1000baseX Ethernet",
804 WM_T_82572, WMP_F_1000X },
805 #if 0
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
807 "Intel i82572EI Gigabit Ethernet (SERDES)",
808 WM_T_82572, WMP_F_SERDES },
809 #endif
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
812 "Intel i82572EI 1000baseT Ethernet",
813 WM_T_82572, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
816 "Intel i82573E",
817 WM_T_82573, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
820 "Intel i82573E IAMT",
821 WM_T_82573, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
824 "Intel i82573L Gigabit Ethernet",
825 WM_T_82573, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
828 "Intel i82574L",
829 WM_T_82574, WMP_F_1000T },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
832 "Intel i82583V",
833 WM_T_82583, WMP_F_1000T },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
836 "i80003 dual 1000baseT Ethernet",
837 WM_T_80003, WMP_F_1000T },
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
840 "i80003 dual 1000baseX Ethernet",
841 WM_T_80003, WMP_F_1000T },
842 #if 0
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
844 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
845 WM_T_80003, WMP_F_SERDES },
846 #endif
847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
849 "Intel i80003 1000baseT Ethernet",
850 WM_T_80003, WMP_F_1000T },
851 #if 0
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
853 "Intel i80003 Gigabit Ethernet (SERDES)",
854 WM_T_80003, WMP_F_SERDES },
855 #endif
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
857 "Intel i82801H (M_AMT) LAN Controller",
858 WM_T_ICH8, WMP_F_1000T },
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
860 "Intel i82801H (AMT) LAN Controller",
861 WM_T_ICH8, WMP_F_1000T },
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
863 "Intel i82801H LAN Controller",
864 WM_T_ICH8, WMP_F_1000T },
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
866 "Intel i82801H (IFE) LAN Controller",
867 WM_T_ICH8, WMP_F_1000T },
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
869 "Intel i82801H (M) LAN Controller",
870 WM_T_ICH8, WMP_F_1000T },
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
872 "Intel i82801H IFE (GT) LAN Controller",
873 WM_T_ICH8, WMP_F_1000T },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
875 "Intel i82801H IFE (G) LAN Controller",
876 WM_T_ICH8, WMP_F_1000T },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
878 "82801I (AMT) LAN Controller",
879 WM_T_ICH9, WMP_F_1000T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
881 "82801I LAN Controller",
882 WM_T_ICH9, WMP_F_1000T },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
884 "82801I (G) LAN Controller",
885 WM_T_ICH9, WMP_F_1000T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
887 "82801I (GT) LAN Controller",
888 WM_T_ICH9, WMP_F_1000T },
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
890 "82801I (C) LAN Controller",
891 WM_T_ICH9, WMP_F_1000T },
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
893 "82801I mobile LAN Controller",
894 WM_T_ICH9, WMP_F_1000T },
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
896 "82801I mobile (V) LAN Controller",
897 WM_T_ICH9, WMP_F_1000T },
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
899 "82801I mobile (AMT) LAN Controller",
900 WM_T_ICH9, WMP_F_1000T },
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
902 "82567LM-4 LAN Controller",
903 WM_T_ICH9, WMP_F_1000T },
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
905 "82567V-3 LAN Controller",
906 WM_T_ICH9, WMP_F_1000T },
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
908 "82567LM-2 LAN Controller",
909 WM_T_ICH10, WMP_F_1000T },
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
911 "82567LF-2 LAN Controller",
912 WM_T_ICH10, WMP_F_1000T },
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
914 "82567LM-3 LAN Controller",
915 WM_T_ICH10, WMP_F_1000T },
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
917 "82567LF-3 LAN Controller",
918 WM_T_ICH10, WMP_F_1000T },
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
920 "82567V-2 LAN Controller",
921 WM_T_ICH10, WMP_F_1000T },
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
923 "82567V-3? LAN Controller",
924 WM_T_ICH10, WMP_F_1000T },
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
926 "HANKSVILLE LAN Controller",
927 WM_T_ICH10, WMP_F_1000T },
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
929 "PCH LAN (82577LM) Controller",
930 WM_T_PCH, WMP_F_1000T },
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
932 "PCH LAN (82577LC) Controller",
933 WM_T_PCH, WMP_F_1000T },
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
935 "PCH LAN (82578DM) Controller",
936 WM_T_PCH, WMP_F_1000T },
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
938 "PCH LAN (82578DC) Controller",
939 WM_T_PCH, WMP_F_1000T },
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
941 "PCH2 LAN (82579LM) Controller",
942 WM_T_PCH2, WMP_F_1000T },
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
944 "PCH2 LAN (82579V) Controller",
945 WM_T_PCH2, WMP_F_1000T },
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
947 "82575EB dual-1000baseT Ethernet",
948 WM_T_82575, WMP_F_1000T },
949 #if 0
950 /*
951 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
952 * disabled for now ...
953 */
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
955 "82575EB dual-1000baseX Ethernet (SERDES)",
956 WM_T_82575, WMP_F_SERDES },
957 #endif
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
959 "82575GB quad-1000baseT Ethernet",
960 WM_T_82575, WMP_F_1000T },
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
962 "82575GB quad-1000baseT Ethernet (PM)",
963 WM_T_82575, WMP_F_1000T },
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
965 "82576 1000BaseT Ethernet",
966 WM_T_82576, WMP_F_1000T },
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
968 "82576 1000BaseX Ethernet",
969 WM_T_82576, WMP_F_1000X },
970 #if 0
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
972 "82576 gigabit Ethernet (SERDES)",
973 WM_T_82576, WMP_F_SERDES },
974 #endif
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
976 "82576 quad-1000BaseT Ethernet",
977 WM_T_82576, WMP_F_1000T },
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
979 "82576 gigabit Ethernet",
980 WM_T_82576, WMP_F_1000T },
981 #if 0
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
983 "82576 gigabit Ethernet (SERDES)",
984 WM_T_82576, WMP_F_SERDES },
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
986 "82576 quad-gigabit Ethernet (SERDES)",
987 WM_T_82576, WMP_F_SERDES },
988 #endif
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
990 "82580 1000BaseT Ethernet",
991 WM_T_82580, WMP_F_1000T },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
993 "82580 1000BaseX Ethernet",
994 WM_T_82580, WMP_F_1000X },
995 #if 0
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
997 "82580 1000BaseT Ethernet (SERDES)",
998 WM_T_82580, WMP_F_SERDES },
999 #endif
1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1001 "82580 gigabit Ethernet (SGMII)",
1002 WM_T_82580, WMP_F_1000T },
1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1004 "82580 dual-1000BaseT Ethernet",
1005 WM_T_82580, WMP_F_1000T },
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1007 "82580 1000BaseT Ethernet",
1008 WM_T_82580ER, WMP_F_1000T },
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1010 "82580 dual-1000BaseT Ethernet",
1011 WM_T_82580ER, WMP_F_1000T },
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1013 "82580 quad-1000BaseX Ethernet",
1014 WM_T_82580, WMP_F_1000X },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1016 "I350 Gigabit Network Connection",
1017 WM_T_I350, WMP_F_1000T },
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1019 "I350 Gigabit Fiber Network Connection",
1020 WM_T_I350, WMP_F_1000X },
1021 #if 0
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1023 "I350 Gigabit Backplane Connection",
1024 WM_T_I350, WMP_F_SERDES },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1026 "I350 Gigabit Connection",
1027 WM_T_I350, WMP_F_1000T },
1028 #endif
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1030 "I354 Gigabit Connection",
1031 WM_T_I354, WMP_F_1000T },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1033 "I210-T1 Ethernet Server Adapter",
1034 WM_T_I210, WMP_F_1000T },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1036 "I210 Ethernet (Copper OEM)",
1037 WM_T_I210, WMP_F_1000T },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1039 "I210 Ethernet (Copper IT)",
1040 WM_T_I210, WMP_F_1000T },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1042 "I210 Gigabit Ethernet (Fiber)",
1043 WM_T_I210, WMP_F_1000X },
1044 #if 0
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1046 "I210 Gigabit Ethernet (SERDES)",
1047 WM_T_I210, WMP_F_SERDES },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1049 "I210 Gigabit Ethernet (SGMII)",
1050 WM_T_I210, WMP_F_SERDES },
1051 #endif
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1053 "I211 Ethernet (COPPER)",
1054 WM_T_I211, WMP_F_1000T },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1056 "I217 V Ethernet Connection",
1057 WM_T_PCH_LPT, WMP_F_1000T },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1059 "I217 LM Ethernet Connection",
1060 WM_T_PCH_LPT, WMP_F_1000T },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1062 "I218 V Ethernet Connection",
1063 WM_T_PCH_LPT, WMP_F_1000T },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1065 "I218 LM Ethernet Connection",
1066 WM_T_PCH_LPT, WMP_F_1000T },
1067 { 0, 0,
1068 NULL,
1069 0, 0 },
1070 };
1071
1072 #ifdef WM_EVENT_COUNTERS
1073 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1074 #endif /* WM_EVENT_COUNTERS */
1075
1076 #if 0 /* Not currently used */
1077 static inline uint32_t
1078 wm_io_read(struct wm_softc *sc, int reg)
1079 {
1080
1081 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1082 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1083 }
1084 #endif
1085
1086 static inline void
1087 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1088 {
1089
1090 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1091 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1092 }
1093
1094 static inline void
1095 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1096 uint32_t data)
1097 {
1098 uint32_t regval;
1099 int i;
1100
1101 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1102
1103 CSR_WRITE(sc, reg, regval);
1104
1105 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1106 delay(5);
1107 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1108 break;
1109 }
1110 if (i == SCTL_CTL_POLL_TIMEOUT) {
1111 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1112 device_xname(sc->sc_dev), reg);
1113 }
1114 }
1115
1116 static inline void
1117 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1118 {
1119 wa->wa_low = htole32(v & 0xffffffffU);
1120 if (sizeof(bus_addr_t) == 8)
1121 wa->wa_high = htole32((uint64_t) v >> 32);
1122 else
1123 wa->wa_high = 0;
1124 }
1125
1126 static void
1127 wm_set_spiaddrbits(struct wm_softc *sc)
1128 {
1129 uint32_t reg;
1130
1131 sc->sc_flags |= WM_F_EEPROM_SPI;
1132 reg = CSR_READ(sc, WMREG_EECD);
1133 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1134 }
1135
1136 static const struct wm_product *
1137 wm_lookup(const struct pci_attach_args *pa)
1138 {
1139 const struct wm_product *wmp;
1140
1141 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1142 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1143 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1144 return wmp;
1145 }
1146 return NULL;
1147 }
1148
1149 static int
1150 wm_match(device_t parent, cfdata_t cf, void *aux)
1151 {
1152 struct pci_attach_args *pa = aux;
1153
1154 if (wm_lookup(pa) != NULL)
1155 return 1;
1156
1157 return 0;
1158 }
1159
1160 static void
1161 wm_attach(device_t parent, device_t self, void *aux)
1162 {
1163 struct wm_softc *sc = device_private(self);
1164 struct pci_attach_args *pa = aux;
1165 prop_dictionary_t dict;
1166 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167 pci_chipset_tag_t pc = pa->pa_pc;
1168 pci_intr_handle_t ih;
1169 const char *intrstr = NULL;
1170 const char *eetype, *xname;
1171 bus_space_tag_t memt;
1172 bus_space_handle_t memh;
1173 bus_size_t memsize;
1174 int memh_valid;
1175 int i, error;
1176 const struct wm_product *wmp;
1177 prop_data_t ea;
1178 prop_number_t pn;
1179 uint8_t enaddr[ETHER_ADDR_LEN];
1180 uint16_t cfg1, cfg2, swdpin, io3;
1181 pcireg_t preg, memtype;
1182 uint16_t eeprom_data, apme_mask;
1183 uint32_t reg;
1184
1185 sc->sc_dev = self;
1186 callout_init(&sc->sc_tick_ch, 0);
1187
1188 sc->sc_wmp = wmp = wm_lookup(pa);
1189 if (wmp == NULL) {
1190 printf("\n");
1191 panic("wm_attach: impossible");
1192 }
1193
1194 sc->sc_pc = pa->pa_pc;
1195 sc->sc_pcitag = pa->pa_tag;
1196
1197 if (pci_dma64_available(pa))
1198 sc->sc_dmat = pa->pa_dmat64;
1199 else
1200 sc->sc_dmat = pa->pa_dmat;
1201
1202 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1203 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1204
1205 sc->sc_type = wmp->wmp_type;
1206 if (sc->sc_type < WM_T_82543) {
1207 if (sc->sc_rev < 2) {
1208 aprint_error_dev(sc->sc_dev,
1209 "i82542 must be at least rev. 2\n");
1210 return;
1211 }
1212 if (sc->sc_rev < 3)
1213 sc->sc_type = WM_T_82542_2_0;
1214 }
1215
1216 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1217 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1218 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1219 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1220 sc->sc_flags |= WM_F_NEWQUEUE;
1221
1222 /* Set device properties (mactype) */
1223 dict = device_properties(sc->sc_dev);
1224 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1225
1226 /*
1227 * Map the device. All devices support memory-mapped acccess,
1228 * and it is really required for normal operation.
1229 */
1230 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1231 switch (memtype) {
1232 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1233 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1234 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1235 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1236 break;
1237 default:
1238 memh_valid = 0;
1239 break;
1240 }
1241
1242 if (memh_valid) {
1243 sc->sc_st = memt;
1244 sc->sc_sh = memh;
1245 sc->sc_ss = memsize;
1246 } else {
1247 aprint_error_dev(sc->sc_dev,
1248 "unable to map device registers\n");
1249 return;
1250 }
1251
1252 /*
1253 * In addition, i82544 and later support I/O mapped indirect
1254 * register access. It is not desirable (nor supported in
1255 * this driver) to use it for normal operation, though it is
1256 * required to work around bugs in some chip versions.
1257 */
1258 if (sc->sc_type >= WM_T_82544) {
1259 /* First we have to find the I/O BAR. */
1260 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1261 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1262 if (memtype == PCI_MAPREG_TYPE_IO)
1263 break;
1264 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1265 PCI_MAPREG_MEM_TYPE_64BIT)
1266 i += 4; /* skip high bits, too */
1267 }
1268 if (i < PCI_MAPREG_END) {
1269 /*
1270 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1271 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1272 * It's no problem because newer chips has no this
1273 * bug.
1274 *
1275 * The i8254x doesn't apparently respond when the
1276 * I/O BAR is 0, which looks somewhat like it's not
1277 * been configured.
1278 */
1279 preg = pci_conf_read(pc, pa->pa_tag, i);
1280 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1281 aprint_error_dev(sc->sc_dev,
1282 "WARNING: I/O BAR at zero.\n");
1283 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1284 0, &sc->sc_iot, &sc->sc_ioh,
1285 NULL, &sc->sc_ios) == 0) {
1286 sc->sc_flags |= WM_F_IOH_VALID;
1287 } else {
1288 aprint_error_dev(sc->sc_dev,
1289 "WARNING: unable to map I/O space\n");
1290 }
1291 }
1292
1293 }
1294
1295 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1296 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1297 preg |= PCI_COMMAND_MASTER_ENABLE;
1298 if (sc->sc_type < WM_T_82542_2_1)
1299 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1300 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1301
1302 /* power up chip */
1303 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1304 NULL)) && error != EOPNOTSUPP) {
1305 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1306 return;
1307 }
1308
1309 /*
1310 * Map and establish our interrupt.
1311 */
1312 if (pci_intr_map(pa, &ih)) {
1313 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1314 return;
1315 }
1316 intrstr = pci_intr_string(pc, ih);
1317 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1318 if (sc->sc_ih == NULL) {
1319 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1320 if (intrstr != NULL)
1321 aprint_error(" at %s", intrstr);
1322 aprint_error("\n");
1323 return;
1324 }
1325 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1326
1327 /*
1328 * Check the function ID (unit number of the chip).
1329 */
1330 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1331 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1332 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1333 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1334 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1335 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1336 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1337 else
1338 sc->sc_funcid = 0;
1339
1340 /*
1341 * Determine a few things about the bus we're connected to.
1342 */
1343 if (sc->sc_type < WM_T_82543) {
1344 /* We don't really know the bus characteristics here. */
1345 sc->sc_bus_speed = 33;
1346 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1347 /*
1348 * CSA (Communication Streaming Architecture) is about as fast
1349 * a 32-bit 66MHz PCI Bus.
1350 */
1351 sc->sc_flags |= WM_F_CSA;
1352 sc->sc_bus_speed = 66;
1353 aprint_verbose_dev(sc->sc_dev,
1354 "Communication Streaming Architecture\n");
1355 if (sc->sc_type == WM_T_82547) {
1356 callout_init(&sc->sc_txfifo_ch, 0);
1357 callout_setfunc(&sc->sc_txfifo_ch,
1358 wm_82547_txfifo_stall, sc);
1359 aprint_verbose_dev(sc->sc_dev,
1360 "using 82547 Tx FIFO stall work-around\n");
1361 }
1362 } else if (sc->sc_type >= WM_T_82571) {
1363 sc->sc_flags |= WM_F_PCIE;
1364 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1365 && (sc->sc_type != WM_T_ICH10)
1366 && (sc->sc_type != WM_T_PCH)
1367 && (sc->sc_type != WM_T_PCH2)
1368 && (sc->sc_type != WM_T_PCH_LPT)) {
1369 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1370 /* ICH* and PCH* have no PCIe capability registers */
1371 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1372 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1373 NULL) == 0)
1374 aprint_error_dev(sc->sc_dev,
1375 "unable to find PCIe capability\n");
1376 }
1377 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1378 } else {
1379 reg = CSR_READ(sc, WMREG_STATUS);
1380 if (reg & STATUS_BUS64)
1381 sc->sc_flags |= WM_F_BUS64;
1382 if ((reg & STATUS_PCIX_MODE) != 0) {
1383 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1384
1385 sc->sc_flags |= WM_F_PCIX;
1386 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1387 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1388 aprint_error_dev(sc->sc_dev,
1389 "unable to find PCIX capability\n");
1390 else if (sc->sc_type != WM_T_82545_3 &&
1391 sc->sc_type != WM_T_82546_3) {
1392 /*
1393 * Work around a problem caused by the BIOS
1394 * setting the max memory read byte count
1395 * incorrectly.
1396 */
1397 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1398 sc->sc_pcixe_capoff + PCIX_CMD);
1399 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1400 sc->sc_pcixe_capoff + PCIX_STATUS);
1401
1402 bytecnt =
1403 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1404 PCIX_CMD_BYTECNT_SHIFT;
1405 maxb =
1406 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1407 PCIX_STATUS_MAXB_SHIFT;
1408 if (bytecnt > maxb) {
1409 aprint_verbose_dev(sc->sc_dev,
1410 "resetting PCI-X MMRBC: %d -> %d\n",
1411 512 << bytecnt, 512 << maxb);
1412 pcix_cmd = (pcix_cmd &
1413 ~PCIX_CMD_BYTECNT_MASK) |
1414 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1415 pci_conf_write(pa->pa_pc, pa->pa_tag,
1416 sc->sc_pcixe_capoff + PCIX_CMD,
1417 pcix_cmd);
1418 }
1419 }
1420 }
1421 /*
1422 * The quad port adapter is special; it has a PCIX-PCIX
1423 * bridge on the board, and can run the secondary bus at
1424 * a higher speed.
1425 */
1426 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1427 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1428 : 66;
1429 } else if (sc->sc_flags & WM_F_PCIX) {
1430 switch (reg & STATUS_PCIXSPD_MASK) {
1431 case STATUS_PCIXSPD_50_66:
1432 sc->sc_bus_speed = 66;
1433 break;
1434 case STATUS_PCIXSPD_66_100:
1435 sc->sc_bus_speed = 100;
1436 break;
1437 case STATUS_PCIXSPD_100_133:
1438 sc->sc_bus_speed = 133;
1439 break;
1440 default:
1441 aprint_error_dev(sc->sc_dev,
1442 "unknown PCIXSPD %d; assuming 66MHz\n",
1443 reg & STATUS_PCIXSPD_MASK);
1444 sc->sc_bus_speed = 66;
1445 break;
1446 }
1447 } else
1448 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1449 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1450 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1451 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1452 }
1453
1454 /*
1455 * Allocate the control data structures, and create and load the
1456 * DMA map for it.
1457 *
1458 * NOTE: All Tx descriptors must be in the same 4G segment of
1459 * memory. So must Rx descriptors. We simplify by allocating
1460 * both sets within the same 4G segment.
1461 */
1462 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1463 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1464 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1465 sizeof(struct wm_control_data_82542) :
1466 sizeof(struct wm_control_data_82544);
1467 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1468 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1469 &sc->sc_cd_rseg, 0)) != 0) {
1470 aprint_error_dev(sc->sc_dev,
1471 "unable to allocate control data, error = %d\n",
1472 error);
1473 goto fail_0;
1474 }
1475
1476 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1477 sc->sc_cd_rseg, sc->sc_cd_size,
1478 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1479 aprint_error_dev(sc->sc_dev,
1480 "unable to map control data, error = %d\n", error);
1481 goto fail_1;
1482 }
1483
1484 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1485 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1486 aprint_error_dev(sc->sc_dev,
1487 "unable to create control data DMA map, error = %d\n",
1488 error);
1489 goto fail_2;
1490 }
1491
1492 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1493 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1494 aprint_error_dev(sc->sc_dev,
1495 "unable to load control data DMA map, error = %d\n",
1496 error);
1497 goto fail_3;
1498 }
1499
1500 /*
1501 * Create the transmit buffer DMA maps.
1502 */
1503 WM_TXQUEUELEN(sc) =
1504 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1505 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1506 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1507 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1508 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1509 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1510 aprint_error_dev(sc->sc_dev,
1511 "unable to create Tx DMA map %d, error = %d\n",
1512 i, error);
1513 goto fail_4;
1514 }
1515 }
1516
1517 /*
1518 * Create the receive buffer DMA maps.
1519 */
1520 for (i = 0; i < WM_NRXDESC; i++) {
1521 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1522 MCLBYTES, 0, 0,
1523 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1524 aprint_error_dev(sc->sc_dev,
1525 "unable to create Rx DMA map %d error = %d\n",
1526 i, error);
1527 goto fail_5;
1528 }
1529 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1530 }
1531
1532 /* clear interesting stat counters */
1533 CSR_READ(sc, WMREG_COLC);
1534 CSR_READ(sc, WMREG_RXERRC);
1535
1536 /* get PHY control from SMBus to PCIe */
1537 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1538 || (sc->sc_type == WM_T_PCH_LPT))
1539 wm_smbustopci(sc);
1540
1541 /*
1542 * Reset the chip to a known state.
1543 */
1544 wm_reset(sc);
1545
1546 /*
1547 * Get some information about the EEPROM.
1548 */
1549 switch (sc->sc_type) {
1550 case WM_T_82542_2_0:
1551 case WM_T_82542_2_1:
1552 case WM_T_82543:
1553 case WM_T_82544:
1554 /* Microwire */
1555 sc->sc_ee_addrbits = 6;
1556 break;
1557 case WM_T_82540:
1558 case WM_T_82545:
1559 case WM_T_82545_3:
1560 case WM_T_82546:
1561 case WM_T_82546_3:
1562 /* Microwire */
1563 reg = CSR_READ(sc, WMREG_EECD);
1564 if (reg & EECD_EE_SIZE)
1565 sc->sc_ee_addrbits = 8;
1566 else
1567 sc->sc_ee_addrbits = 6;
1568 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1569 break;
1570 case WM_T_82541:
1571 case WM_T_82541_2:
1572 case WM_T_82547:
1573 case WM_T_82547_2:
1574 reg = CSR_READ(sc, WMREG_EECD);
1575 if (reg & EECD_EE_TYPE) {
1576 /* SPI */
1577 wm_set_spiaddrbits(sc);
1578 } else
1579 /* Microwire */
1580 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1581 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1582 break;
1583 case WM_T_82571:
1584 case WM_T_82572:
1585 /* SPI */
1586 wm_set_spiaddrbits(sc);
1587 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1588 break;
1589 case WM_T_82573:
1590 case WM_T_82574:
1591 case WM_T_82583:
1592 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1593 sc->sc_flags |= WM_F_EEPROM_FLASH;
1594 else {
1595 /* SPI */
1596 wm_set_spiaddrbits(sc);
1597 }
1598 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1599 break;
1600 case WM_T_82575:
1601 case WM_T_82576:
1602 case WM_T_82580:
1603 case WM_T_82580ER:
1604 case WM_T_I350:
1605 case WM_T_I354: /* XXXX ok? */
1606 case WM_T_80003:
1607 /* SPI */
1608 wm_set_spiaddrbits(sc);
1609 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1610 break;
1611 case WM_T_ICH8:
1612 case WM_T_ICH9:
1613 case WM_T_ICH10:
1614 case WM_T_PCH:
1615 case WM_T_PCH2:
1616 case WM_T_PCH_LPT:
1617 /* FLASH */
1618 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1619 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1620 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1621 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1622 aprint_error_dev(sc->sc_dev,
1623 "can't map FLASH registers\n");
1624 return;
1625 }
1626 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1627 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1628 ICH_FLASH_SECTOR_SIZE;
1629 sc->sc_ich8_flash_bank_size =
1630 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1631 sc->sc_ich8_flash_bank_size -=
1632 (reg & ICH_GFPREG_BASE_MASK);
1633 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1634 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1635 break;
1636 case WM_T_I210:
1637 case WM_T_I211:
1638 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1639 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1640 break;
1641 default:
1642 break;
1643 }
1644
1645 /*
1646 * Defer printing the EEPROM type until after verifying the checksum
1647 * This allows the EEPROM type to be printed correctly in the case
1648 * that no EEPROM is attached.
1649 */
1650 /*
1651 * Validate the EEPROM checksum. If the checksum fails, flag
1652 * this for later, so we can fail future reads from the EEPROM.
1653 */
1654 if (wm_validate_eeprom_checksum(sc)) {
1655 /*
1656 * Read twice again because some PCI-e parts fail the
1657 * first check due to the link being in sleep state.
1658 */
1659 if (wm_validate_eeprom_checksum(sc))
1660 sc->sc_flags |= WM_F_EEPROM_INVALID;
1661 }
1662
1663 /* Set device properties (macflags) */
1664 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1665
1666 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1667 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1668 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1669 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1670 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1671 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1672 } else {
1673 if (sc->sc_flags & WM_F_EEPROM_SPI)
1674 eetype = "SPI";
1675 else
1676 eetype = "MicroWire";
1677 aprint_verbose_dev(sc->sc_dev,
1678 "%u word (%d address bits) %s EEPROM\n",
1679 1U << sc->sc_ee_addrbits,
1680 sc->sc_ee_addrbits, eetype);
1681 }
1682
1683 switch (sc->sc_type) {
1684 case WM_T_82571:
1685 case WM_T_82572:
1686 case WM_T_82573:
1687 case WM_T_82574:
1688 case WM_T_82583:
1689 case WM_T_80003:
1690 case WM_T_ICH8:
1691 case WM_T_ICH9:
1692 case WM_T_ICH10:
1693 case WM_T_PCH:
1694 case WM_T_PCH2:
1695 case WM_T_PCH_LPT:
1696 if (wm_check_mng_mode(sc) != 0)
1697 wm_get_hw_control(sc);
1698 break;
1699 default:
1700 break;
1701 }
1702 wm_get_wakeup(sc);
1703 /*
1704 * Read the Ethernet address from the EEPROM, if not first found
1705 * in device properties.
1706 */
1707 ea = prop_dictionary_get(dict, "mac-address");
1708 if (ea != NULL) {
1709 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1710 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1711 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1712 } else {
1713 if (wm_read_mac_addr(sc, enaddr) != 0) {
1714 aprint_error_dev(sc->sc_dev,
1715 "unable to read Ethernet address\n");
1716 return;
1717 }
1718 }
1719
1720 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1721 ether_sprintf(enaddr));
1722
1723 /*
1724 * Read the config info from the EEPROM, and set up various
1725 * bits in the control registers based on their contents.
1726 */
1727 pn = prop_dictionary_get(dict, "i82543-cfg1");
1728 if (pn != NULL) {
1729 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1730 cfg1 = (uint16_t) prop_number_integer_value(pn);
1731 } else {
1732 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1733 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1734 return;
1735 }
1736 }
1737
1738 pn = prop_dictionary_get(dict, "i82543-cfg2");
1739 if (pn != NULL) {
1740 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1741 cfg2 = (uint16_t) prop_number_integer_value(pn);
1742 } else {
1743 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1744 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1745 return;
1746 }
1747 }
1748
1749 /* check for WM_F_WOL */
1750 switch (sc->sc_type) {
1751 case WM_T_82542_2_0:
1752 case WM_T_82542_2_1:
1753 case WM_T_82543:
1754 /* dummy? */
1755 eeprom_data = 0;
1756 apme_mask = EEPROM_CFG3_APME;
1757 break;
1758 case WM_T_82544:
1759 apme_mask = EEPROM_CFG2_82544_APM_EN;
1760 eeprom_data = cfg2;
1761 break;
1762 case WM_T_82546:
1763 case WM_T_82546_3:
1764 case WM_T_82571:
1765 case WM_T_82572:
1766 case WM_T_82573:
1767 case WM_T_82574:
1768 case WM_T_82583:
1769 case WM_T_80003:
1770 default:
1771 apme_mask = EEPROM_CFG3_APME;
1772 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1773 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1774 break;
1775 case WM_T_82575:
1776 case WM_T_82576:
1777 case WM_T_82580:
1778 case WM_T_82580ER:
1779 case WM_T_I350:
1780 case WM_T_I354: /* XXX ok? */
1781 case WM_T_ICH8:
1782 case WM_T_ICH9:
1783 case WM_T_ICH10:
1784 case WM_T_PCH:
1785 case WM_T_PCH2:
1786 case WM_T_PCH_LPT:
1787 /* XXX The funcid should be checked on some devices */
1788 apme_mask = WUC_APME;
1789 eeprom_data = CSR_READ(sc, WMREG_WUC);
1790 break;
1791 }
1792
1793 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1794 if ((eeprom_data & apme_mask) != 0)
1795 sc->sc_flags |= WM_F_WOL;
1796 #ifdef WM_DEBUG
1797 if ((sc->sc_flags & WM_F_WOL) != 0)
1798 printf("WOL\n");
1799 #endif
1800
1801 /*
1802 * XXX need special handling for some multiple port cards
1803 * to disable a paticular port.
1804 */
1805
1806 if (sc->sc_type >= WM_T_82544) {
1807 pn = prop_dictionary_get(dict, "i82543-swdpin");
1808 if (pn != NULL) {
1809 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1810 swdpin = (uint16_t) prop_number_integer_value(pn);
1811 } else {
1812 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1813 aprint_error_dev(sc->sc_dev,
1814 "unable to read SWDPIN\n");
1815 return;
1816 }
1817 }
1818 }
1819
1820 if (cfg1 & EEPROM_CFG1_ILOS)
1821 sc->sc_ctrl |= CTRL_ILOS;
1822 if (sc->sc_type >= WM_T_82544) {
1823 sc->sc_ctrl |=
1824 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1825 CTRL_SWDPIO_SHIFT;
1826 sc->sc_ctrl |=
1827 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1828 CTRL_SWDPINS_SHIFT;
1829 } else {
1830 sc->sc_ctrl |=
1831 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1832 CTRL_SWDPIO_SHIFT;
1833 }
1834
1835 #if 0
1836 if (sc->sc_type >= WM_T_82544) {
1837 if (cfg1 & EEPROM_CFG1_IPS0)
1838 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1839 if (cfg1 & EEPROM_CFG1_IPS1)
1840 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1841 sc->sc_ctrl_ext |=
1842 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1843 CTRL_EXT_SWDPIO_SHIFT;
1844 sc->sc_ctrl_ext |=
1845 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1846 CTRL_EXT_SWDPINS_SHIFT;
1847 } else {
1848 sc->sc_ctrl_ext |=
1849 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1850 CTRL_EXT_SWDPIO_SHIFT;
1851 }
1852 #endif
1853
1854 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1855 #if 0
1856 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1857 #endif
1858
1859 /*
1860 * Set up some register offsets that are different between
1861 * the i82542 and the i82543 and later chips.
1862 */
1863 if (sc->sc_type < WM_T_82543) {
1864 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1865 sc->sc_tdt_reg = WMREG_OLD_TDT;
1866 } else {
1867 sc->sc_rdt_reg = WMREG_RDT;
1868 sc->sc_tdt_reg = WMREG_TDT;
1869 }
1870
1871 if (sc->sc_type == WM_T_PCH) {
1872 uint16_t val;
1873
1874 /* Save the NVM K1 bit setting */
1875 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1876
1877 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1878 sc->sc_nvm_k1_enabled = 1;
1879 else
1880 sc->sc_nvm_k1_enabled = 0;
1881 }
1882
1883 /*
1884 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1885 * media structures accordingly.
1886 */
1887 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1888 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1889 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1890 || sc->sc_type == WM_T_82573
1891 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1892 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1893 wm_gmii_mediainit(sc, wmp->wmp_product);
1894 } else if (sc->sc_type < WM_T_82543 ||
1895 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1896 if (wmp->wmp_flags & WMP_F_1000T)
1897 aprint_error_dev(sc->sc_dev,
1898 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1899 wm_tbi_mediainit(sc);
1900 } else {
1901 switch (sc->sc_type) {
1902 case WM_T_82575:
1903 case WM_T_82576:
1904 case WM_T_82580:
1905 case WM_T_82580ER:
1906 case WM_T_I350:
1907 case WM_T_I354:
1908 case WM_T_I210:
1909 case WM_T_I211:
1910 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1911 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1912 case CTRL_EXT_LINK_MODE_1000KX:
1913 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
1914 CSR_WRITE(sc, WMREG_CTRL_EXT,
1915 reg | CTRL_EXT_I2C_ENA);
1916 panic("not supported yet\n");
1917 break;
1918 case CTRL_EXT_LINK_MODE_SGMII:
1919 if (wm_sgmii_uses_mdio(sc)) {
1920 aprint_verbose_dev(sc->sc_dev,
1921 "SGMII(MDIO)\n");
1922 sc->sc_flags |= WM_F_SGMII;
1923 wm_gmii_mediainit(sc,
1924 wmp->wmp_product);
1925 break;
1926 }
1927 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
1928 /*FALLTHROUGH*/
1929 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1930 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
1931 CSR_WRITE(sc, WMREG_CTRL_EXT,
1932 reg | CTRL_EXT_I2C_ENA);
1933 panic("not supported yet\n");
1934 break;
1935 case CTRL_EXT_LINK_MODE_GMII:
1936 default:
1937 CSR_WRITE(sc, WMREG_CTRL_EXT,
1938 reg & ~CTRL_EXT_I2C_ENA);
1939 wm_gmii_mediainit(sc, wmp->wmp_product);
1940 break;
1941 }
1942 break;
1943 default:
1944 if (wmp->wmp_flags & WMP_F_1000X)
1945 aprint_error_dev(sc->sc_dev,
1946 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1947 wm_gmii_mediainit(sc, wmp->wmp_product);
1948 }
1949 }
1950
1951 ifp = &sc->sc_ethercom.ec_if;
1952 xname = device_xname(sc->sc_dev);
1953 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1954 ifp->if_softc = sc;
1955 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1956 ifp->if_ioctl = wm_ioctl;
1957 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1958 ifp->if_start = wm_nq_start;
1959 else
1960 ifp->if_start = wm_start;
1961 ifp->if_watchdog = wm_watchdog;
1962 ifp->if_init = wm_init;
1963 ifp->if_stop = wm_stop;
1964 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1965 IFQ_SET_READY(&ifp->if_snd);
1966
1967 /* Check for jumbo frame */
1968 switch (sc->sc_type) {
1969 case WM_T_82573:
1970 /* XXX limited to 9234 if ASPM is disabled */
1971 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1972 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1973 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1974 break;
1975 case WM_T_82571:
1976 case WM_T_82572:
1977 case WM_T_82574:
1978 case WM_T_82575:
1979 case WM_T_82576:
1980 case WM_T_82580:
1981 case WM_T_82580ER:
1982 case WM_T_I350:
1983 case WM_T_I354: /* XXXX ok? */
1984 case WM_T_I210:
1985 case WM_T_I211:
1986 case WM_T_80003:
1987 case WM_T_ICH9:
1988 case WM_T_ICH10:
1989 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1990 case WM_T_PCH_LPT:
1991 /* XXX limited to 9234 */
1992 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1993 break;
1994 case WM_T_PCH:
1995 /* XXX limited to 4096 */
1996 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1997 break;
1998 case WM_T_82542_2_0:
1999 case WM_T_82542_2_1:
2000 case WM_T_82583:
2001 case WM_T_ICH8:
2002 /* No support for jumbo frame */
2003 break;
2004 default:
2005 /* ETHER_MAX_LEN_JUMBO */
2006 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2007 break;
2008 }
2009
2010 /*
2011 * If we're a i82543 or greater, we can support VLANs.
2012 */
2013 if (sc->sc_type >= WM_T_82543)
2014 sc->sc_ethercom.ec_capabilities |=
2015 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2016
2017 /*
2018 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2019 * on i82543 and later.
2020 */
2021 if (sc->sc_type >= WM_T_82543) {
2022 ifp->if_capabilities |=
2023 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2024 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2025 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2026 IFCAP_CSUM_TCPv6_Tx |
2027 IFCAP_CSUM_UDPv6_Tx;
2028 }
2029
2030 /*
2031 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2032 *
2033 * 82541GI (8086:1076) ... no
2034 * 82572EI (8086:10b9) ... yes
2035 */
2036 if (sc->sc_type >= WM_T_82571) {
2037 ifp->if_capabilities |=
2038 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2039 }
2040
2041 /*
2042 * If we're a i82544 or greater (except i82547), we can do
2043 * TCP segmentation offload.
2044 */
2045 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2046 ifp->if_capabilities |= IFCAP_TSOv4;
2047 }
2048
2049 if (sc->sc_type >= WM_T_82571) {
2050 ifp->if_capabilities |= IFCAP_TSOv6;
2051 }
2052
2053 /*
2054 * Attach the interface.
2055 */
2056 if_attach(ifp);
2057 ether_ifattach(ifp, enaddr);
2058 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2059 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2060
2061 #ifdef WM_EVENT_COUNTERS
2062 /* Attach event counters. */
2063 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2064 NULL, xname, "txsstall");
2065 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2066 NULL, xname, "txdstall");
2067 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2068 NULL, xname, "txfifo_stall");
2069 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2070 NULL, xname, "txdw");
2071 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2072 NULL, xname, "txqe");
2073 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2074 NULL, xname, "rxintr");
2075 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2076 NULL, xname, "linkintr");
2077
2078 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2079 NULL, xname, "rxipsum");
2080 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2081 NULL, xname, "rxtusum");
2082 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2083 NULL, xname, "txipsum");
2084 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2085 NULL, xname, "txtusum");
2086 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2087 NULL, xname, "txtusum6");
2088
2089 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2090 NULL, xname, "txtso");
2091 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2092 NULL, xname, "txtso6");
2093 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2094 NULL, xname, "txtsopain");
2095
2096 for (i = 0; i < WM_NTXSEGS; i++) {
2097 snprintf(wm_txseg_evcnt_names[i],
2098 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2099 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2100 NULL, xname, wm_txseg_evcnt_names[i]);
2101 }
2102
2103 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2104 NULL, xname, "txdrop");
2105
2106 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2107 NULL, xname, "tu");
2108
2109 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2110 NULL, xname, "tx_xoff");
2111 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2112 NULL, xname, "tx_xon");
2113 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2114 NULL, xname, "rx_xoff");
2115 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2116 NULL, xname, "rx_xon");
2117 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2118 NULL, xname, "rx_macctl");
2119 #endif /* WM_EVENT_COUNTERS */
2120
2121 if (pmf_device_register(self, wm_suspend, wm_resume))
2122 pmf_class_network_register(self, ifp);
2123 else
2124 aprint_error_dev(self, "couldn't establish power handler\n");
2125
2126 return;
2127
2128 /*
2129 * Free any resources we've allocated during the failed attach
2130 * attempt. Do this in reverse order and fall through.
2131 */
2132 fail_5:
2133 for (i = 0; i < WM_NRXDESC; i++) {
2134 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2135 bus_dmamap_destroy(sc->sc_dmat,
2136 sc->sc_rxsoft[i].rxs_dmamap);
2137 }
2138 fail_4:
2139 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2140 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2141 bus_dmamap_destroy(sc->sc_dmat,
2142 sc->sc_txsoft[i].txs_dmamap);
2143 }
2144 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2145 fail_3:
2146 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2147 fail_2:
2148 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2149 sc->sc_cd_size);
2150 fail_1:
2151 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2152 fail_0:
2153 return;
2154 }
2155
2156 static int
2157 wm_detach(device_t self, int flags __unused)
2158 {
2159 struct wm_softc *sc = device_private(self);
2160 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2161 int i, s;
2162
2163 s = splnet();
2164 /* Stop the interface. Callouts are stopped in it. */
2165 wm_stop(ifp, 1);
2166 splx(s);
2167
2168 pmf_device_deregister(self);
2169
2170 /* Tell the firmware about the release */
2171 wm_release_manageability(sc);
2172 wm_release_hw_control(sc);
2173
2174 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2175
2176 /* Delete all remaining media. */
2177 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2178
2179 ether_ifdetach(ifp);
2180 if_detach(ifp);
2181
2182
2183 /* Unload RX dmamaps and free mbufs */
2184 wm_rxdrain(sc);
2185
2186 /* Free dmamap. It's the same as the end of the wm_attach() function */
2187 for (i = 0; i < WM_NRXDESC; i++) {
2188 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2189 bus_dmamap_destroy(sc->sc_dmat,
2190 sc->sc_rxsoft[i].rxs_dmamap);
2191 }
2192 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2193 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2194 bus_dmamap_destroy(sc->sc_dmat,
2195 sc->sc_txsoft[i].txs_dmamap);
2196 }
2197 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2198 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2199 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2200 sc->sc_cd_size);
2201 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2202
2203 /* Disestablish the interrupt handler */
2204 if (sc->sc_ih != NULL) {
2205 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2206 sc->sc_ih = NULL;
2207 }
2208
2209 /* Unmap the registers */
2210 if (sc->sc_ss) {
2211 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2212 sc->sc_ss = 0;
2213 }
2214
2215 if (sc->sc_ios) {
2216 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2217 sc->sc_ios = 0;
2218 }
2219
2220 return 0;
2221 }
2222
2223 /*
2224 * wm_tx_offload:
2225 *
2226 * Set up TCP/IP checksumming parameters for the
2227 * specified packet.
2228 */
2229 static int
2230 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2231 uint8_t *fieldsp)
2232 {
2233 struct mbuf *m0 = txs->txs_mbuf;
2234 struct livengood_tcpip_ctxdesc *t;
2235 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2236 uint32_t ipcse;
2237 struct ether_header *eh;
2238 int offset, iphl;
2239 uint8_t fields;
2240
2241 /*
2242 * XXX It would be nice if the mbuf pkthdr had offset
2243 * fields for the protocol headers.
2244 */
2245
2246 eh = mtod(m0, struct ether_header *);
2247 switch (htons(eh->ether_type)) {
2248 case ETHERTYPE_IP:
2249 case ETHERTYPE_IPV6:
2250 offset = ETHER_HDR_LEN;
2251 break;
2252
2253 case ETHERTYPE_VLAN:
2254 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2255 break;
2256
2257 default:
2258 /*
2259 * Don't support this protocol or encapsulation.
2260 */
2261 *fieldsp = 0;
2262 *cmdp = 0;
2263 return 0;
2264 }
2265
2266 if ((m0->m_pkthdr.csum_flags &
2267 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2268 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2269 } else {
2270 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2271 }
2272 ipcse = offset + iphl - 1;
2273
2274 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2275 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2276 seg = 0;
2277 fields = 0;
2278
2279 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2280 int hlen = offset + iphl;
2281 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2282
2283 if (__predict_false(m0->m_len <
2284 (hlen + sizeof(struct tcphdr)))) {
2285 /*
2286 * TCP/IP headers are not in the first mbuf; we need
2287 * to do this the slow and painful way. Let's just
2288 * hope this doesn't happen very often.
2289 */
2290 struct tcphdr th;
2291
2292 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2293
2294 m_copydata(m0, hlen, sizeof(th), &th);
2295 if (v4) {
2296 struct ip ip;
2297
2298 m_copydata(m0, offset, sizeof(ip), &ip);
2299 ip.ip_len = 0;
2300 m_copyback(m0,
2301 offset + offsetof(struct ip, ip_len),
2302 sizeof(ip.ip_len), &ip.ip_len);
2303 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2304 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2305 } else {
2306 struct ip6_hdr ip6;
2307
2308 m_copydata(m0, offset, sizeof(ip6), &ip6);
2309 ip6.ip6_plen = 0;
2310 m_copyback(m0,
2311 offset + offsetof(struct ip6_hdr, ip6_plen),
2312 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2313 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2314 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2315 }
2316 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2317 sizeof(th.th_sum), &th.th_sum);
2318
2319 hlen += th.th_off << 2;
2320 } else {
2321 /*
2322 * TCP/IP headers are in the first mbuf; we can do
2323 * this the easy way.
2324 */
2325 struct tcphdr *th;
2326
2327 if (v4) {
2328 struct ip *ip =
2329 (void *)(mtod(m0, char *) + offset);
2330 th = (void *)(mtod(m0, char *) + hlen);
2331
2332 ip->ip_len = 0;
2333 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2334 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2335 } else {
2336 struct ip6_hdr *ip6 =
2337 (void *)(mtod(m0, char *) + offset);
2338 th = (void *)(mtod(m0, char *) + hlen);
2339
2340 ip6->ip6_plen = 0;
2341 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2342 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2343 }
2344 hlen += th->th_off << 2;
2345 }
2346
2347 if (v4) {
2348 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2349 cmdlen |= WTX_TCPIP_CMD_IP;
2350 } else {
2351 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2352 ipcse = 0;
2353 }
2354 cmd |= WTX_TCPIP_CMD_TSE;
2355 cmdlen |= WTX_TCPIP_CMD_TSE |
2356 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2357 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2358 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2359 }
2360
2361 /*
2362 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2363 * offload feature, if we load the context descriptor, we
2364 * MUST provide valid values for IPCSS and TUCSS fields.
2365 */
2366
2367 ipcs = WTX_TCPIP_IPCSS(offset) |
2368 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2369 WTX_TCPIP_IPCSE(ipcse);
2370 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2371 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2372 fields |= WTX_IXSM;
2373 }
2374
2375 offset += iphl;
2376
2377 if (m0->m_pkthdr.csum_flags &
2378 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2379 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2380 fields |= WTX_TXSM;
2381 tucs = WTX_TCPIP_TUCSS(offset) |
2382 WTX_TCPIP_TUCSO(offset +
2383 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2384 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2385 } else if ((m0->m_pkthdr.csum_flags &
2386 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2387 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2388 fields |= WTX_TXSM;
2389 tucs = WTX_TCPIP_TUCSS(offset) |
2390 WTX_TCPIP_TUCSO(offset +
2391 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2392 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2393 } else {
2394 /* Just initialize it to a valid TCP context. */
2395 tucs = WTX_TCPIP_TUCSS(offset) |
2396 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2397 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2398 }
2399
2400 /* Fill in the context descriptor. */
2401 t = (struct livengood_tcpip_ctxdesc *)
2402 &sc->sc_txdescs[sc->sc_txnext];
2403 t->tcpip_ipcs = htole32(ipcs);
2404 t->tcpip_tucs = htole32(tucs);
2405 t->tcpip_cmdlen = htole32(cmdlen);
2406 t->tcpip_seg = htole32(seg);
2407 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2408
2409 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2410 txs->txs_ndesc++;
2411
2412 *cmdp = cmd;
2413 *fieldsp = fields;
2414
2415 return 0;
2416 }
2417
2418 static void
2419 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2420 {
2421 struct mbuf *m;
2422 int i;
2423
2424 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2425 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2426 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2427 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2428 m->m_data, m->m_len, m->m_flags);
2429 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2430 i, i == 1 ? "" : "s");
2431 }
2432
2433 /*
2434 * wm_82547_txfifo_stall:
2435 *
2436 * Callout used to wait for the 82547 Tx FIFO to drain,
2437 * reset the FIFO pointers, and restart packet transmission.
2438 */
2439 static void
2440 wm_82547_txfifo_stall(void *arg)
2441 {
2442 struct wm_softc *sc = arg;
2443 int s;
2444
2445 s = splnet();
2446
2447 if (sc->sc_txfifo_stall) {
2448 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2449 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2450 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2451 /*
2452 * Packets have drained. Stop transmitter, reset
2453 * FIFO pointers, restart transmitter, and kick
2454 * the packet queue.
2455 */
2456 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2457 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2458 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2459 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2460 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2461 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2462 CSR_WRITE(sc, WMREG_TCTL, tctl);
2463 CSR_WRITE_FLUSH(sc);
2464
2465 sc->sc_txfifo_head = 0;
2466 sc->sc_txfifo_stall = 0;
2467 wm_start(&sc->sc_ethercom.ec_if);
2468 } else {
2469 /*
2470 * Still waiting for packets to drain; try again in
2471 * another tick.
2472 */
2473 callout_schedule(&sc->sc_txfifo_ch, 1);
2474 }
2475 }
2476
2477 splx(s);
2478 }
2479
2480 static void
2481 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2482 {
2483 uint32_t reg;
2484
2485 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2486
2487 if (on != 0)
2488 reg |= EXTCNFCTR_GATE_PHY_CFG;
2489 else
2490 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2491
2492 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2493 }
2494
2495 /*
2496 * wm_82547_txfifo_bugchk:
2497 *
2498 * Check for bug condition in the 82547 Tx FIFO. We need to
2499 * prevent enqueueing a packet that would wrap around the end
2500 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2501 *
2502 * We do this by checking the amount of space before the end
2503 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2504 * the Tx FIFO, wait for all remaining packets to drain, reset
2505 * the internal FIFO pointers to the beginning, and restart
2506 * transmission on the interface.
2507 */
2508 #define WM_FIFO_HDR 0x10
2509 #define WM_82547_PAD_LEN 0x3e0
2510 static int
2511 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2512 {
2513 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2514 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2515
2516 /* Just return if already stalled. */
2517 if (sc->sc_txfifo_stall)
2518 return 1;
2519
2520 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2521 /* Stall only occurs in half-duplex mode. */
2522 goto send_packet;
2523 }
2524
2525 if (len >= WM_82547_PAD_LEN + space) {
2526 sc->sc_txfifo_stall = 1;
2527 callout_schedule(&sc->sc_txfifo_ch, 1);
2528 return 1;
2529 }
2530
2531 send_packet:
2532 sc->sc_txfifo_head += len;
2533 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2534 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2535
2536 return 0;
2537 }
2538
2539 /*
2540 * wm_start: [ifnet interface function]
2541 *
2542 * Start packet transmission on the interface.
2543 */
2544 static void
2545 wm_start(struct ifnet *ifp)
2546 {
2547 struct wm_softc *sc = ifp->if_softc;
2548 struct mbuf *m0;
2549 struct m_tag *mtag;
2550 struct wm_txsoft *txs;
2551 bus_dmamap_t dmamap;
2552 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2553 bus_addr_t curaddr;
2554 bus_size_t seglen, curlen;
2555 uint32_t cksumcmd;
2556 uint8_t cksumfields;
2557
2558 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2559 return;
2560
2561 /*
2562 * Remember the previous number of free descriptors.
2563 */
2564 ofree = sc->sc_txfree;
2565
2566 /*
2567 * Loop through the send queue, setting up transmit descriptors
2568 * until we drain the queue, or use up all available transmit
2569 * descriptors.
2570 */
2571 for (;;) {
2572 /* Grab a packet off the queue. */
2573 IFQ_POLL(&ifp->if_snd, m0);
2574 if (m0 == NULL)
2575 break;
2576
2577 DPRINTF(WM_DEBUG_TX,
2578 ("%s: TX: have packet to transmit: %p\n",
2579 device_xname(sc->sc_dev), m0));
2580
2581 /* Get a work queue entry. */
2582 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2583 wm_txintr(sc);
2584 if (sc->sc_txsfree == 0) {
2585 DPRINTF(WM_DEBUG_TX,
2586 ("%s: TX: no free job descriptors\n",
2587 device_xname(sc->sc_dev)));
2588 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2589 break;
2590 }
2591 }
2592
2593 txs = &sc->sc_txsoft[sc->sc_txsnext];
2594 dmamap = txs->txs_dmamap;
2595
2596 use_tso = (m0->m_pkthdr.csum_flags &
2597 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2598
2599 /*
2600 * So says the Linux driver:
2601 * The controller does a simple calculation to make sure
2602 * there is enough room in the FIFO before initiating the
2603 * DMA for each buffer. The calc is:
2604 * 4 = ceil(buffer len / MSS)
2605 * To make sure we don't overrun the FIFO, adjust the max
2606 * buffer len if the MSS drops.
2607 */
2608 dmamap->dm_maxsegsz =
2609 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2610 ? m0->m_pkthdr.segsz << 2
2611 : WTX_MAX_LEN;
2612
2613 /*
2614 * Load the DMA map. If this fails, the packet either
2615 * didn't fit in the allotted number of segments, or we
2616 * were short on resources. For the too-many-segments
2617 * case, we simply report an error and drop the packet,
2618 * since we can't sanely copy a jumbo packet to a single
2619 * buffer.
2620 */
2621 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2622 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2623 if (error) {
2624 if (error == EFBIG) {
2625 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2626 log(LOG_ERR, "%s: Tx packet consumes too many "
2627 "DMA segments, dropping...\n",
2628 device_xname(sc->sc_dev));
2629 IFQ_DEQUEUE(&ifp->if_snd, m0);
2630 wm_dump_mbuf_chain(sc, m0);
2631 m_freem(m0);
2632 continue;
2633 }
2634 /*
2635 * Short on resources, just stop for now.
2636 */
2637 DPRINTF(WM_DEBUG_TX,
2638 ("%s: TX: dmamap load failed: %d\n",
2639 device_xname(sc->sc_dev), error));
2640 break;
2641 }
2642
2643 segs_needed = dmamap->dm_nsegs;
2644 if (use_tso) {
2645 /* For sentinel descriptor; see below. */
2646 segs_needed++;
2647 }
2648
2649 /*
2650 * Ensure we have enough descriptors free to describe
2651 * the packet. Note, we always reserve one descriptor
2652 * at the end of the ring due to the semantics of the
2653 * TDT register, plus one more in the event we need
2654 * to load offload context.
2655 */
2656 if (segs_needed > sc->sc_txfree - 2) {
2657 /*
2658 * Not enough free descriptors to transmit this
2659 * packet. We haven't committed anything yet,
2660 * so just unload the DMA map, put the packet
2661 * pack on the queue, and punt. Notify the upper
2662 * layer that there are no more slots left.
2663 */
2664 DPRINTF(WM_DEBUG_TX,
2665 ("%s: TX: need %d (%d) descriptors, have %d\n",
2666 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2667 segs_needed, sc->sc_txfree - 1));
2668 ifp->if_flags |= IFF_OACTIVE;
2669 bus_dmamap_unload(sc->sc_dmat, dmamap);
2670 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2671 break;
2672 }
2673
2674 /*
2675 * Check for 82547 Tx FIFO bug. We need to do this
2676 * once we know we can transmit the packet, since we
2677 * do some internal FIFO space accounting here.
2678 */
2679 if (sc->sc_type == WM_T_82547 &&
2680 wm_82547_txfifo_bugchk(sc, m0)) {
2681 DPRINTF(WM_DEBUG_TX,
2682 ("%s: TX: 82547 Tx FIFO bug detected\n",
2683 device_xname(sc->sc_dev)));
2684 ifp->if_flags |= IFF_OACTIVE;
2685 bus_dmamap_unload(sc->sc_dmat, dmamap);
2686 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2687 break;
2688 }
2689
2690 IFQ_DEQUEUE(&ifp->if_snd, m0);
2691
2692 /*
2693 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2694 */
2695
2696 DPRINTF(WM_DEBUG_TX,
2697 ("%s: TX: packet has %d (%d) DMA segments\n",
2698 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2699
2700 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2701
2702 /*
2703 * Store a pointer to the packet so that we can free it
2704 * later.
2705 *
2706 * Initially, we consider the number of descriptors the
2707 * packet uses the number of DMA segments. This may be
2708 * incremented by 1 if we do checksum offload (a descriptor
2709 * is used to set the checksum context).
2710 */
2711 txs->txs_mbuf = m0;
2712 txs->txs_firstdesc = sc->sc_txnext;
2713 txs->txs_ndesc = segs_needed;
2714
2715 /* Set up offload parameters for this packet. */
2716 if (m0->m_pkthdr.csum_flags &
2717 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2718 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2719 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2720 if (wm_tx_offload(sc, txs, &cksumcmd,
2721 &cksumfields) != 0) {
2722 /* Error message already displayed. */
2723 bus_dmamap_unload(sc->sc_dmat, dmamap);
2724 continue;
2725 }
2726 } else {
2727 cksumcmd = 0;
2728 cksumfields = 0;
2729 }
2730
2731 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2732
2733 /* Sync the DMA map. */
2734 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2735 BUS_DMASYNC_PREWRITE);
2736
2737 /*
2738 * Initialize the transmit descriptor.
2739 */
2740 for (nexttx = sc->sc_txnext, seg = 0;
2741 seg < dmamap->dm_nsegs; seg++) {
2742 for (seglen = dmamap->dm_segs[seg].ds_len,
2743 curaddr = dmamap->dm_segs[seg].ds_addr;
2744 seglen != 0;
2745 curaddr += curlen, seglen -= curlen,
2746 nexttx = WM_NEXTTX(sc, nexttx)) {
2747 curlen = seglen;
2748
2749 /*
2750 * So says the Linux driver:
2751 * Work around for premature descriptor
2752 * write-backs in TSO mode. Append a
2753 * 4-byte sentinel descriptor.
2754 */
2755 if (use_tso &&
2756 seg == dmamap->dm_nsegs - 1 &&
2757 curlen > 8)
2758 curlen -= 4;
2759
2760 wm_set_dma_addr(
2761 &sc->sc_txdescs[nexttx].wtx_addr,
2762 curaddr);
2763 sc->sc_txdescs[nexttx].wtx_cmdlen =
2764 htole32(cksumcmd | curlen);
2765 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2766 0;
2767 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2768 cksumfields;
2769 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2770 lasttx = nexttx;
2771
2772 DPRINTF(WM_DEBUG_TX,
2773 ("%s: TX: desc %d: low %#" PRIx64 ", "
2774 "len %#04zx\n",
2775 device_xname(sc->sc_dev), nexttx,
2776 (uint64_t)curaddr, curlen));
2777 }
2778 }
2779
2780 KASSERT(lasttx != -1);
2781
2782 /*
2783 * Set up the command byte on the last descriptor of
2784 * the packet. If we're in the interrupt delay window,
2785 * delay the interrupt.
2786 */
2787 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2788 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2789
2790 /*
2791 * If VLANs are enabled and the packet has a VLAN tag, set
2792 * up the descriptor to encapsulate the packet for us.
2793 *
2794 * This is only valid on the last descriptor of the packet.
2795 */
2796 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2797 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2798 htole32(WTX_CMD_VLE);
2799 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2800 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2801 }
2802
2803 txs->txs_lastdesc = lasttx;
2804
2805 DPRINTF(WM_DEBUG_TX,
2806 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2807 device_xname(sc->sc_dev),
2808 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2809
2810 /* Sync the descriptors we're using. */
2811 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2812 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2813
2814 /* Give the packet to the chip. */
2815 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2816
2817 DPRINTF(WM_DEBUG_TX,
2818 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2819
2820 DPRINTF(WM_DEBUG_TX,
2821 ("%s: TX: finished transmitting packet, job %d\n",
2822 device_xname(sc->sc_dev), sc->sc_txsnext));
2823
2824 /* Advance the tx pointer. */
2825 sc->sc_txfree -= txs->txs_ndesc;
2826 sc->sc_txnext = nexttx;
2827
2828 sc->sc_txsfree--;
2829 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2830
2831 /* Pass the packet to any BPF listeners. */
2832 bpf_mtap(ifp, m0);
2833 }
2834
2835 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2836 /* No more slots; notify upper layer. */
2837 ifp->if_flags |= IFF_OACTIVE;
2838 }
2839
2840 if (sc->sc_txfree != ofree) {
2841 /* Set a watchdog timer in case the chip flakes out. */
2842 ifp->if_timer = 5;
2843 }
2844 }
2845
2846 /*
2847 * wm_nq_tx_offload:
2848 *
2849 * Set up TCP/IP checksumming parameters for the
2850 * specified packet, for NEWQUEUE devices
2851 */
2852 static int
2853 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2854 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2855 {
2856 struct mbuf *m0 = txs->txs_mbuf;
2857 struct m_tag *mtag;
2858 uint32_t vl_len, mssidx, cmdc;
2859 struct ether_header *eh;
2860 int offset, iphl;
2861
2862 /*
2863 * XXX It would be nice if the mbuf pkthdr had offset
2864 * fields for the protocol headers.
2865 */
2866 *cmdlenp = 0;
2867 *fieldsp = 0;
2868
2869 eh = mtod(m0, struct ether_header *);
2870 switch (htons(eh->ether_type)) {
2871 case ETHERTYPE_IP:
2872 case ETHERTYPE_IPV6:
2873 offset = ETHER_HDR_LEN;
2874 break;
2875
2876 case ETHERTYPE_VLAN:
2877 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2878 break;
2879
2880 default:
2881 /*
2882 * Don't support this protocol or encapsulation.
2883 */
2884 *do_csum = false;
2885 return 0;
2886 }
2887 *do_csum = true;
2888 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2889 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2890
2891 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2892 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2893
2894 if ((m0->m_pkthdr.csum_flags &
2895 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2896 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2897 } else {
2898 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2899 }
2900 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2901 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2902
2903 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2904 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2905 << NQTXC_VLLEN_VLAN_SHIFT);
2906 *cmdlenp |= NQTX_CMD_VLE;
2907 }
2908
2909 mssidx = 0;
2910
2911 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2912 int hlen = offset + iphl;
2913 int tcp_hlen;
2914 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2915
2916 if (__predict_false(m0->m_len <
2917 (hlen + sizeof(struct tcphdr)))) {
2918 /*
2919 * TCP/IP headers are not in the first mbuf; we need
2920 * to do this the slow and painful way. Let's just
2921 * hope this doesn't happen very often.
2922 */
2923 struct tcphdr th;
2924
2925 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2926
2927 m_copydata(m0, hlen, sizeof(th), &th);
2928 if (v4) {
2929 struct ip ip;
2930
2931 m_copydata(m0, offset, sizeof(ip), &ip);
2932 ip.ip_len = 0;
2933 m_copyback(m0,
2934 offset + offsetof(struct ip, ip_len),
2935 sizeof(ip.ip_len), &ip.ip_len);
2936 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2937 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2938 } else {
2939 struct ip6_hdr ip6;
2940
2941 m_copydata(m0, offset, sizeof(ip6), &ip6);
2942 ip6.ip6_plen = 0;
2943 m_copyback(m0,
2944 offset + offsetof(struct ip6_hdr, ip6_plen),
2945 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2946 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2947 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2948 }
2949 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2950 sizeof(th.th_sum), &th.th_sum);
2951
2952 tcp_hlen = th.th_off << 2;
2953 } else {
2954 /*
2955 * TCP/IP headers are in the first mbuf; we can do
2956 * this the easy way.
2957 */
2958 struct tcphdr *th;
2959
2960 if (v4) {
2961 struct ip *ip =
2962 (void *)(mtod(m0, char *) + offset);
2963 th = (void *)(mtod(m0, char *) + hlen);
2964
2965 ip->ip_len = 0;
2966 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2967 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2968 } else {
2969 struct ip6_hdr *ip6 =
2970 (void *)(mtod(m0, char *) + offset);
2971 th = (void *)(mtod(m0, char *) + hlen);
2972
2973 ip6->ip6_plen = 0;
2974 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2975 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2976 }
2977 tcp_hlen = th->th_off << 2;
2978 }
2979 hlen += tcp_hlen;
2980 *cmdlenp |= NQTX_CMD_TSE;
2981
2982 if (v4) {
2983 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2984 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2985 } else {
2986 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2987 *fieldsp |= NQTXD_FIELDS_TUXSM;
2988 }
2989 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2990 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2991 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2992 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2993 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2994 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2995 } else {
2996 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2997 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2998 }
2999
3000 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3001 *fieldsp |= NQTXD_FIELDS_IXSM;
3002 cmdc |= NQTXC_CMD_IP4;
3003 }
3004
3005 if (m0->m_pkthdr.csum_flags &
3006 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3007 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
3008 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3009 cmdc |= NQTXC_CMD_TCP;
3010 } else {
3011 cmdc |= NQTXC_CMD_UDP;
3012 }
3013 cmdc |= NQTXC_CMD_IP4;
3014 *fieldsp |= NQTXD_FIELDS_TUXSM;
3015 }
3016 if (m0->m_pkthdr.csum_flags &
3017 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3018 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3019 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3020 cmdc |= NQTXC_CMD_TCP;
3021 } else {
3022 cmdc |= NQTXC_CMD_UDP;
3023 }
3024 cmdc |= NQTXC_CMD_IP6;
3025 *fieldsp |= NQTXD_FIELDS_TUXSM;
3026 }
3027
3028 /* Fill in the context descriptor. */
3029 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3030 htole32(vl_len);
3031 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3032 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3033 htole32(cmdc);
3034 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3035 htole32(mssidx);
3036 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3037 DPRINTF(WM_DEBUG_TX,
3038 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3039 sc->sc_txnext, 0, vl_len));
3040 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3041 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3042 txs->txs_ndesc++;
3043 return 0;
3044 }
3045
3046 /*
3047 * wm_nq_start: [ifnet interface function]
3048 *
3049 * Start packet transmission on the interface for NEWQUEUE devices
3050 */
3051 static void
3052 wm_nq_start(struct ifnet *ifp)
3053 {
3054 struct wm_softc *sc = ifp->if_softc;
3055 struct mbuf *m0;
3056 struct m_tag *mtag;
3057 struct wm_txsoft *txs;
3058 bus_dmamap_t dmamap;
3059 int error, nexttx, lasttx = -1, seg, segs_needed;
3060 bool do_csum, sent;
3061
3062 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3063 return;
3064
3065 sent = false;
3066
3067 /*
3068 * Loop through the send queue, setting up transmit descriptors
3069 * until we drain the queue, or use up all available transmit
3070 * descriptors.
3071 */
3072 for (;;) {
3073 /* Grab a packet off the queue. */
3074 IFQ_POLL(&ifp->if_snd, m0);
3075 if (m0 == NULL)
3076 break;
3077
3078 DPRINTF(WM_DEBUG_TX,
3079 ("%s: TX: have packet to transmit: %p\n",
3080 device_xname(sc->sc_dev), m0));
3081
3082 /* Get a work queue entry. */
3083 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3084 wm_txintr(sc);
3085 if (sc->sc_txsfree == 0) {
3086 DPRINTF(WM_DEBUG_TX,
3087 ("%s: TX: no free job descriptors\n",
3088 device_xname(sc->sc_dev)));
3089 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3090 break;
3091 }
3092 }
3093
3094 txs = &sc->sc_txsoft[sc->sc_txsnext];
3095 dmamap = txs->txs_dmamap;
3096
3097 /*
3098 * Load the DMA map. If this fails, the packet either
3099 * didn't fit in the allotted number of segments, or we
3100 * were short on resources. For the too-many-segments
3101 * case, we simply report an error and drop the packet,
3102 * since we can't sanely copy a jumbo packet to a single
3103 * buffer.
3104 */
3105 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3106 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3107 if (error) {
3108 if (error == EFBIG) {
3109 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3110 log(LOG_ERR, "%s: Tx packet consumes too many "
3111 "DMA segments, dropping...\n",
3112 device_xname(sc->sc_dev));
3113 IFQ_DEQUEUE(&ifp->if_snd, m0);
3114 wm_dump_mbuf_chain(sc, m0);
3115 m_freem(m0);
3116 continue;
3117 }
3118 /*
3119 * Short on resources, just stop for now.
3120 */
3121 DPRINTF(WM_DEBUG_TX,
3122 ("%s: TX: dmamap load failed: %d\n",
3123 device_xname(sc->sc_dev), error));
3124 break;
3125 }
3126
3127 segs_needed = dmamap->dm_nsegs;
3128
3129 /*
3130 * Ensure we have enough descriptors free to describe
3131 * the packet. Note, we always reserve one descriptor
3132 * at the end of the ring due to the semantics of the
3133 * TDT register, plus one more in the event we need
3134 * to load offload context.
3135 */
3136 if (segs_needed > sc->sc_txfree - 2) {
3137 /*
3138 * Not enough free descriptors to transmit this
3139 * packet. We haven't committed anything yet,
3140 * so just unload the DMA map, put the packet
3141 * pack on the queue, and punt. Notify the upper
3142 * layer that there are no more slots left.
3143 */
3144 DPRINTF(WM_DEBUG_TX,
3145 ("%s: TX: need %d (%d) descriptors, have %d\n",
3146 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3147 segs_needed, sc->sc_txfree - 1));
3148 ifp->if_flags |= IFF_OACTIVE;
3149 bus_dmamap_unload(sc->sc_dmat, dmamap);
3150 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3151 break;
3152 }
3153
3154 IFQ_DEQUEUE(&ifp->if_snd, m0);
3155
3156 /*
3157 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3158 */
3159
3160 DPRINTF(WM_DEBUG_TX,
3161 ("%s: TX: packet has %d (%d) DMA segments\n",
3162 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3163
3164 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3165
3166 /*
3167 * Store a pointer to the packet so that we can free it
3168 * later.
3169 *
3170 * Initially, we consider the number of descriptors the
3171 * packet uses the number of DMA segments. This may be
3172 * incremented by 1 if we do checksum offload (a descriptor
3173 * is used to set the checksum context).
3174 */
3175 txs->txs_mbuf = m0;
3176 txs->txs_firstdesc = sc->sc_txnext;
3177 txs->txs_ndesc = segs_needed;
3178
3179 /* Set up offload parameters for this packet. */
3180 uint32_t cmdlen, fields, dcmdlen;
3181 if (m0->m_pkthdr.csum_flags &
3182 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3183 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3184 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3185 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3186 &do_csum) != 0) {
3187 /* Error message already displayed. */
3188 bus_dmamap_unload(sc->sc_dmat, dmamap);
3189 continue;
3190 }
3191 } else {
3192 do_csum = false;
3193 cmdlen = 0;
3194 fields = 0;
3195 }
3196
3197 /* Sync the DMA map. */
3198 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3199 BUS_DMASYNC_PREWRITE);
3200
3201 /*
3202 * Initialize the first transmit descriptor.
3203 */
3204 nexttx = sc->sc_txnext;
3205 if (!do_csum) {
3206 /* setup a legacy descriptor */
3207 wm_set_dma_addr(
3208 &sc->sc_txdescs[nexttx].wtx_addr,
3209 dmamap->dm_segs[0].ds_addr);
3210 sc->sc_txdescs[nexttx].wtx_cmdlen =
3211 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3212 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3213 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3214 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3215 NULL) {
3216 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3217 htole32(WTX_CMD_VLE);
3218 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3219 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3220 } else {
3221 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3222 }
3223 dcmdlen = 0;
3224 } else {
3225 /* setup an advanced data descriptor */
3226 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3227 htole64(dmamap->dm_segs[0].ds_addr);
3228 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3229 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3230 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3231 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3232 htole32(fields);
3233 DPRINTF(WM_DEBUG_TX,
3234 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3235 device_xname(sc->sc_dev), nexttx,
3236 (uint64_t)dmamap->dm_segs[0].ds_addr));
3237 DPRINTF(WM_DEBUG_TX,
3238 ("\t 0x%08x%08x\n", fields,
3239 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3240 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3241 }
3242
3243 lasttx = nexttx;
3244 nexttx = WM_NEXTTX(sc, nexttx);
3245 /*
3246 * fill in the next descriptors. legacy or adcanced format
3247 * is the same here
3248 */
3249 for (seg = 1; seg < dmamap->dm_nsegs;
3250 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3251 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3252 htole64(dmamap->dm_segs[seg].ds_addr);
3253 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3254 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3255 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3256 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3257 lasttx = nexttx;
3258
3259 DPRINTF(WM_DEBUG_TX,
3260 ("%s: TX: desc %d: %#" PRIx64 ", "
3261 "len %#04zx\n",
3262 device_xname(sc->sc_dev), nexttx,
3263 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3264 dmamap->dm_segs[seg].ds_len));
3265 }
3266
3267 KASSERT(lasttx != -1);
3268
3269 /*
3270 * Set up the command byte on the last descriptor of
3271 * the packet. If we're in the interrupt delay window,
3272 * delay the interrupt.
3273 */
3274 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3275 (NQTX_CMD_EOP | NQTX_CMD_RS));
3276 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3277 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3278
3279 txs->txs_lastdesc = lasttx;
3280
3281 DPRINTF(WM_DEBUG_TX,
3282 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3283 device_xname(sc->sc_dev),
3284 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3285
3286 /* Sync the descriptors we're using. */
3287 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3288 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3289
3290 /* Give the packet to the chip. */
3291 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3292 sent = true;
3293
3294 DPRINTF(WM_DEBUG_TX,
3295 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3296
3297 DPRINTF(WM_DEBUG_TX,
3298 ("%s: TX: finished transmitting packet, job %d\n",
3299 device_xname(sc->sc_dev), sc->sc_txsnext));
3300
3301 /* Advance the tx pointer. */
3302 sc->sc_txfree -= txs->txs_ndesc;
3303 sc->sc_txnext = nexttx;
3304
3305 sc->sc_txsfree--;
3306 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3307
3308 /* Pass the packet to any BPF listeners. */
3309 bpf_mtap(ifp, m0);
3310 }
3311
3312 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3313 /* No more slots; notify upper layer. */
3314 ifp->if_flags |= IFF_OACTIVE;
3315 }
3316
3317 if (sent) {
3318 /* Set a watchdog timer in case the chip flakes out. */
3319 ifp->if_timer = 5;
3320 }
3321 }
3322
3323 /*
3324 * wm_watchdog: [ifnet interface function]
3325 *
3326 * Watchdog timer handler.
3327 */
3328 static void
3329 wm_watchdog(struct ifnet *ifp)
3330 {
3331 struct wm_softc *sc = ifp->if_softc;
3332
3333 /*
3334 * Since we're using delayed interrupts, sweep up
3335 * before we report an error.
3336 */
3337 wm_txintr(sc);
3338
3339 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3340 #ifdef WM_DEBUG
3341 int i, j;
3342 struct wm_txsoft *txs;
3343 #endif
3344 log(LOG_ERR,
3345 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3346 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3347 sc->sc_txnext);
3348 ifp->if_oerrors++;
3349 #ifdef WM_DEBUG
3350 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3351 i = WM_NEXTTXS(sc, i)) {
3352 txs = &sc->sc_txsoft[i];
3353 printf("txs %d tx %d -> %d\n",
3354 i, txs->txs_firstdesc, txs->txs_lastdesc);
3355 for (j = txs->txs_firstdesc; ;
3356 j = WM_NEXTTX(sc, j)) {
3357 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3358 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3359 printf("\t %#08x%08x\n",
3360 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3361 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3362 if (j == txs->txs_lastdesc)
3363 break;
3364 }
3365 }
3366 #endif
3367 /* Reset the interface. */
3368 (void) wm_init(ifp);
3369 }
3370
3371 /* Try to get more packets going. */
3372 ifp->if_start(ifp);
3373 }
3374
3375 static int
3376 wm_ifflags_cb(struct ethercom *ec)
3377 {
3378 struct ifnet *ifp = &ec->ec_if;
3379 struct wm_softc *sc = ifp->if_softc;
3380 int change = ifp->if_flags ^ sc->sc_if_flags;
3381
3382 if (change != 0)
3383 sc->sc_if_flags = ifp->if_flags;
3384
3385 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3386 return ENETRESET;
3387
3388 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3389 wm_set_filter(sc);
3390
3391 wm_set_vlan(sc);
3392
3393 return 0;
3394 }
3395
3396 /*
3397 * wm_ioctl: [ifnet interface function]
3398 *
3399 * Handle control requests from the operator.
3400 */
3401 static int
3402 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3403 {
3404 struct wm_softc *sc = ifp->if_softc;
3405 struct ifreq *ifr = (struct ifreq *) data;
3406 struct ifaddr *ifa = (struct ifaddr *)data;
3407 struct sockaddr_dl *sdl;
3408 int s, error;
3409
3410 s = splnet();
3411
3412 switch (cmd) {
3413 case SIOCSIFMEDIA:
3414 case SIOCGIFMEDIA:
3415 /* Flow control requires full-duplex mode. */
3416 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3417 (ifr->ifr_media & IFM_FDX) == 0)
3418 ifr->ifr_media &= ~IFM_ETH_FMASK;
3419 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3420 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3421 /* We can do both TXPAUSE and RXPAUSE. */
3422 ifr->ifr_media |=
3423 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3424 }
3425 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3426 }
3427 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3428 break;
3429 case SIOCINITIFADDR:
3430 if (ifa->ifa_addr->sa_family == AF_LINK) {
3431 sdl = satosdl(ifp->if_dl->ifa_addr);
3432 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3433 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3434 /* unicast address is first multicast entry */
3435 wm_set_filter(sc);
3436 error = 0;
3437 break;
3438 }
3439 /*FALLTHROUGH*/
3440 default:
3441 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3442 break;
3443
3444 error = 0;
3445
3446 if (cmd == SIOCSIFCAP)
3447 error = (*ifp->if_init)(ifp);
3448 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3449 ;
3450 else if (ifp->if_flags & IFF_RUNNING) {
3451 /*
3452 * Multicast list has changed; set the hardware filter
3453 * accordingly.
3454 */
3455 wm_set_filter(sc);
3456 }
3457 break;
3458 }
3459
3460 /* Try to get more packets going. */
3461 ifp->if_start(ifp);
3462
3463 splx(s);
3464 return error;
3465 }
3466
3467 /*
3468 * wm_intr:
3469 *
3470 * Interrupt service routine.
3471 */
3472 static int
3473 wm_intr(void *arg)
3474 {
3475 struct wm_softc *sc = arg;
3476 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3477 uint32_t icr;
3478 int handled = 0;
3479
3480 while (1 /* CONSTCOND */) {
3481 icr = CSR_READ(sc, WMREG_ICR);
3482 if ((icr & sc->sc_icr) == 0)
3483 break;
3484 rnd_add_uint32(&sc->rnd_source, icr);
3485
3486 handled = 1;
3487
3488 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3489 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3490 DPRINTF(WM_DEBUG_RX,
3491 ("%s: RX: got Rx intr 0x%08x\n",
3492 device_xname(sc->sc_dev),
3493 icr & (ICR_RXDMT0|ICR_RXT0)));
3494 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3495 }
3496 #endif
3497 wm_rxintr(sc);
3498
3499 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3500 if (icr & ICR_TXDW) {
3501 DPRINTF(WM_DEBUG_TX,
3502 ("%s: TX: got TXDW interrupt\n",
3503 device_xname(sc->sc_dev)));
3504 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3505 }
3506 #endif
3507 wm_txintr(sc);
3508
3509 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3510 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3511 wm_linkintr(sc, icr);
3512 }
3513
3514 if (icr & ICR_RXO) {
3515 #if defined(WM_DEBUG)
3516 log(LOG_WARNING, "%s: Receive overrun\n",
3517 device_xname(sc->sc_dev));
3518 #endif /* defined(WM_DEBUG) */
3519 }
3520 }
3521
3522 if (handled) {
3523 /* Try to get more packets going. */
3524 ifp->if_start(ifp);
3525 }
3526
3527 return handled;
3528 }
3529
3530 /*
3531 * wm_txintr:
3532 *
3533 * Helper; handle transmit interrupts.
3534 */
3535 static void
3536 wm_txintr(struct wm_softc *sc)
3537 {
3538 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3539 struct wm_txsoft *txs;
3540 uint8_t status;
3541 int i;
3542
3543 ifp->if_flags &= ~IFF_OACTIVE;
3544
3545 /*
3546 * Go through the Tx list and free mbufs for those
3547 * frames which have been transmitted.
3548 */
3549 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3550 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3551 txs = &sc->sc_txsoft[i];
3552
3553 DPRINTF(WM_DEBUG_TX,
3554 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3555
3556 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3557 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3558
3559 status =
3560 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3561 if ((status & WTX_ST_DD) == 0) {
3562 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3563 BUS_DMASYNC_PREREAD);
3564 break;
3565 }
3566
3567 DPRINTF(WM_DEBUG_TX,
3568 ("%s: TX: job %d done: descs %d..%d\n",
3569 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3570 txs->txs_lastdesc));
3571
3572 /*
3573 * XXX We should probably be using the statistics
3574 * XXX registers, but I don't know if they exist
3575 * XXX on chips before the i82544.
3576 */
3577
3578 #ifdef WM_EVENT_COUNTERS
3579 if (status & WTX_ST_TU)
3580 WM_EVCNT_INCR(&sc->sc_ev_tu);
3581 #endif /* WM_EVENT_COUNTERS */
3582
3583 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3584 ifp->if_oerrors++;
3585 if (status & WTX_ST_LC)
3586 log(LOG_WARNING, "%s: late collision\n",
3587 device_xname(sc->sc_dev));
3588 else if (status & WTX_ST_EC) {
3589 ifp->if_collisions += 16;
3590 log(LOG_WARNING, "%s: excessive collisions\n",
3591 device_xname(sc->sc_dev));
3592 }
3593 } else
3594 ifp->if_opackets++;
3595
3596 sc->sc_txfree += txs->txs_ndesc;
3597 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3598 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3599 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3600 m_freem(txs->txs_mbuf);
3601 txs->txs_mbuf = NULL;
3602 }
3603
3604 /* Update the dirty transmit buffer pointer. */
3605 sc->sc_txsdirty = i;
3606 DPRINTF(WM_DEBUG_TX,
3607 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3608
3609 /*
3610 * If there are no more pending transmissions, cancel the watchdog
3611 * timer.
3612 */
3613 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3614 ifp->if_timer = 0;
3615 }
3616
3617 /*
3618 * wm_rxintr:
3619 *
3620 * Helper; handle receive interrupts.
3621 */
3622 static void
3623 wm_rxintr(struct wm_softc *sc)
3624 {
3625 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3626 struct wm_rxsoft *rxs;
3627 struct mbuf *m;
3628 int i, len;
3629 uint8_t status, errors;
3630 uint16_t vlantag;
3631
3632 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3633 rxs = &sc->sc_rxsoft[i];
3634
3635 DPRINTF(WM_DEBUG_RX,
3636 ("%s: RX: checking descriptor %d\n",
3637 device_xname(sc->sc_dev), i));
3638
3639 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3640
3641 status = sc->sc_rxdescs[i].wrx_status;
3642 errors = sc->sc_rxdescs[i].wrx_errors;
3643 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3644 vlantag = sc->sc_rxdescs[i].wrx_special;
3645
3646 if ((status & WRX_ST_DD) == 0) {
3647 /*
3648 * We have processed all of the receive descriptors.
3649 */
3650 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3651 break;
3652 }
3653
3654 if (__predict_false(sc->sc_rxdiscard)) {
3655 DPRINTF(WM_DEBUG_RX,
3656 ("%s: RX: discarding contents of descriptor %d\n",
3657 device_xname(sc->sc_dev), i));
3658 WM_INIT_RXDESC(sc, i);
3659 if (status & WRX_ST_EOP) {
3660 /* Reset our state. */
3661 DPRINTF(WM_DEBUG_RX,
3662 ("%s: RX: resetting rxdiscard -> 0\n",
3663 device_xname(sc->sc_dev)));
3664 sc->sc_rxdiscard = 0;
3665 }
3666 continue;
3667 }
3668
3669 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3670 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3671
3672 m = rxs->rxs_mbuf;
3673
3674 /*
3675 * Add a new receive buffer to the ring, unless of
3676 * course the length is zero. Treat the latter as a
3677 * failed mapping.
3678 */
3679 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3680 /*
3681 * Failed, throw away what we've done so
3682 * far, and discard the rest of the packet.
3683 */
3684 ifp->if_ierrors++;
3685 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3686 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3687 WM_INIT_RXDESC(sc, i);
3688 if ((status & WRX_ST_EOP) == 0)
3689 sc->sc_rxdiscard = 1;
3690 if (sc->sc_rxhead != NULL)
3691 m_freem(sc->sc_rxhead);
3692 WM_RXCHAIN_RESET(sc);
3693 DPRINTF(WM_DEBUG_RX,
3694 ("%s: RX: Rx buffer allocation failed, "
3695 "dropping packet%s\n", device_xname(sc->sc_dev),
3696 sc->sc_rxdiscard ? " (discard)" : ""));
3697 continue;
3698 }
3699
3700 m->m_len = len;
3701 sc->sc_rxlen += len;
3702 DPRINTF(WM_DEBUG_RX,
3703 ("%s: RX: buffer at %p len %d\n",
3704 device_xname(sc->sc_dev), m->m_data, len));
3705
3706 /*
3707 * If this is not the end of the packet, keep
3708 * looking.
3709 */
3710 if ((status & WRX_ST_EOP) == 0) {
3711 WM_RXCHAIN_LINK(sc, m);
3712 DPRINTF(WM_DEBUG_RX,
3713 ("%s: RX: not yet EOP, rxlen -> %d\n",
3714 device_xname(sc->sc_dev), sc->sc_rxlen));
3715 continue;
3716 }
3717
3718 /*
3719 * Okay, we have the entire packet now. The chip is
3720 * configured to include the FCS except I350 and I21[01]
3721 * (not all chips can be configured to strip it),
3722 * so we need to trim it.
3723 * May need to adjust length of previous mbuf in the
3724 * chain if the current mbuf is too short.
3725 * For an eratta, the RCTL_SECRC bit in RCTL register
3726 * is always set in I350, so we don't trim it.
3727 */
3728 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
3729 && (sc->sc_type != WM_T_I210)
3730 && (sc->sc_type != WM_T_I211)) {
3731 if (m->m_len < ETHER_CRC_LEN) {
3732 sc->sc_rxtail->m_len
3733 -= (ETHER_CRC_LEN - m->m_len);
3734 m->m_len = 0;
3735 } else
3736 m->m_len -= ETHER_CRC_LEN;
3737 len = sc->sc_rxlen - ETHER_CRC_LEN;
3738 } else
3739 len = sc->sc_rxlen;
3740
3741 WM_RXCHAIN_LINK(sc, m);
3742
3743 *sc->sc_rxtailp = NULL;
3744 m = sc->sc_rxhead;
3745
3746 WM_RXCHAIN_RESET(sc);
3747
3748 DPRINTF(WM_DEBUG_RX,
3749 ("%s: RX: have entire packet, len -> %d\n",
3750 device_xname(sc->sc_dev), len));
3751
3752 /*
3753 * If an error occurred, update stats and drop the packet.
3754 */
3755 if (errors &
3756 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3757 if (errors & WRX_ER_SE)
3758 log(LOG_WARNING, "%s: symbol error\n",
3759 device_xname(sc->sc_dev));
3760 else if (errors & WRX_ER_SEQ)
3761 log(LOG_WARNING, "%s: receive sequence error\n",
3762 device_xname(sc->sc_dev));
3763 else if (errors & WRX_ER_CE)
3764 log(LOG_WARNING, "%s: CRC error\n",
3765 device_xname(sc->sc_dev));
3766 m_freem(m);
3767 continue;
3768 }
3769
3770 /*
3771 * No errors. Receive the packet.
3772 */
3773 m->m_pkthdr.rcvif = ifp;
3774 m->m_pkthdr.len = len;
3775
3776 /*
3777 * If VLANs are enabled, VLAN packets have been unwrapped
3778 * for us. Associate the tag with the packet.
3779 */
3780 /* XXXX should check for i350 and i354 */
3781 if ((status & WRX_ST_VP) != 0) {
3782 VLAN_INPUT_TAG(ifp, m,
3783 le16toh(vlantag),
3784 continue);
3785 }
3786
3787 /*
3788 * Set up checksum info for this packet.
3789 */
3790 if ((status & WRX_ST_IXSM) == 0) {
3791 if (status & WRX_ST_IPCS) {
3792 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3793 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3794 if (errors & WRX_ER_IPE)
3795 m->m_pkthdr.csum_flags |=
3796 M_CSUM_IPv4_BAD;
3797 }
3798 if (status & WRX_ST_TCPCS) {
3799 /*
3800 * Note: we don't know if this was TCP or UDP,
3801 * so we just set both bits, and expect the
3802 * upper layers to deal.
3803 */
3804 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3805 m->m_pkthdr.csum_flags |=
3806 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3807 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3808 if (errors & WRX_ER_TCPE)
3809 m->m_pkthdr.csum_flags |=
3810 M_CSUM_TCP_UDP_BAD;
3811 }
3812 }
3813
3814 ifp->if_ipackets++;
3815
3816 /* Pass this up to any BPF listeners. */
3817 bpf_mtap(ifp, m);
3818
3819 /* Pass it on. */
3820 (*ifp->if_input)(ifp, m);
3821 }
3822
3823 /* Update the receive pointer. */
3824 sc->sc_rxptr = i;
3825
3826 DPRINTF(WM_DEBUG_RX,
3827 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3828 }
3829
3830 /*
3831 * wm_linkintr_gmii:
3832 *
3833 * Helper; handle link interrupts for GMII.
3834 */
3835 static void
3836 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3837 {
3838
3839 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3840 __func__));
3841
3842 if (icr & ICR_LSC) {
3843 DPRINTF(WM_DEBUG_LINK,
3844 ("%s: LINK: LSC -> mii_pollstat\n",
3845 device_xname(sc->sc_dev)));
3846 mii_pollstat(&sc->sc_mii);
3847 if (sc->sc_type == WM_T_82543) {
3848 int miistatus, active;
3849
3850 /*
3851 * With 82543, we need to force speed and
3852 * duplex on the MAC equal to what the PHY
3853 * speed and duplex configuration is.
3854 */
3855 miistatus = sc->sc_mii.mii_media_status;
3856
3857 if (miistatus & IFM_ACTIVE) {
3858 active = sc->sc_mii.mii_media_active;
3859 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3860 switch (IFM_SUBTYPE(active)) {
3861 case IFM_10_T:
3862 sc->sc_ctrl |= CTRL_SPEED_10;
3863 break;
3864 case IFM_100_TX:
3865 sc->sc_ctrl |= CTRL_SPEED_100;
3866 break;
3867 case IFM_1000_T:
3868 sc->sc_ctrl |= CTRL_SPEED_1000;
3869 break;
3870 default:
3871 /*
3872 * fiber?
3873 * Shoud not enter here.
3874 */
3875 printf("unknown media (%x)\n",
3876 active);
3877 break;
3878 }
3879 if (active & IFM_FDX)
3880 sc->sc_ctrl |= CTRL_FD;
3881 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3882 }
3883 } else if ((sc->sc_type == WM_T_ICH8)
3884 && (sc->sc_phytype == WMPHY_IGP_3)) {
3885 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3886 } else if (sc->sc_type == WM_T_PCH) {
3887 wm_k1_gig_workaround_hv(sc,
3888 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3889 }
3890
3891 if ((sc->sc_phytype == WMPHY_82578)
3892 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3893 == IFM_1000_T)) {
3894
3895 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3896 delay(200*1000); /* XXX too big */
3897
3898 /* Link stall fix for link up */
3899 wm_gmii_hv_writereg(sc->sc_dev, 1,
3900 HV_MUX_DATA_CTRL,
3901 HV_MUX_DATA_CTRL_GEN_TO_MAC
3902 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3903 wm_gmii_hv_writereg(sc->sc_dev, 1,
3904 HV_MUX_DATA_CTRL,
3905 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3906 }
3907 }
3908 } else if (icr & ICR_RXSEQ) {
3909 DPRINTF(WM_DEBUG_LINK,
3910 ("%s: LINK Receive sequence error\n",
3911 device_xname(sc->sc_dev)));
3912 }
3913 }
3914
3915 /*
3916 * wm_linkintr_tbi:
3917 *
3918 * Helper; handle link interrupts for TBI mode.
3919 */
3920 static void
3921 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3922 {
3923 uint32_t status;
3924
3925 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3926 __func__));
3927
3928 status = CSR_READ(sc, WMREG_STATUS);
3929 if (icr & ICR_LSC) {
3930 if (status & STATUS_LU) {
3931 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3932 device_xname(sc->sc_dev),
3933 (status & STATUS_FD) ? "FDX" : "HDX"));
3934 /*
3935 * NOTE: CTRL will update TFCE and RFCE automatically,
3936 * so we should update sc->sc_ctrl
3937 */
3938
3939 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3940 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3941 sc->sc_fcrtl &= ~FCRTL_XONE;
3942 if (status & STATUS_FD)
3943 sc->sc_tctl |=
3944 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3945 else
3946 sc->sc_tctl |=
3947 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3948 if (sc->sc_ctrl & CTRL_TFCE)
3949 sc->sc_fcrtl |= FCRTL_XONE;
3950 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3951 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3952 WMREG_OLD_FCRTL : WMREG_FCRTL,
3953 sc->sc_fcrtl);
3954 sc->sc_tbi_linkup = 1;
3955 } else {
3956 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3957 device_xname(sc->sc_dev)));
3958 sc->sc_tbi_linkup = 0;
3959 }
3960 wm_tbi_set_linkled(sc);
3961 } else if (icr & ICR_RXCFG) {
3962 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3963 device_xname(sc->sc_dev)));
3964 sc->sc_tbi_nrxcfg++;
3965 wm_check_for_link(sc);
3966 } else if (icr & ICR_RXSEQ) {
3967 DPRINTF(WM_DEBUG_LINK,
3968 ("%s: LINK: Receive sequence error\n",
3969 device_xname(sc->sc_dev)));
3970 }
3971 }
3972
3973 /*
3974 * wm_linkintr:
3975 *
3976 * Helper; handle link interrupts.
3977 */
3978 static void
3979 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3980 {
3981
3982 if (sc->sc_flags & WM_F_HAS_MII)
3983 wm_linkintr_gmii(sc, icr);
3984 else
3985 wm_linkintr_tbi(sc, icr);
3986 }
3987
3988 /*
3989 * wm_tick:
3990 *
3991 * One second timer, used to check link status, sweep up
3992 * completed transmit jobs, etc.
3993 */
3994 static void
3995 wm_tick(void *arg)
3996 {
3997 struct wm_softc *sc = arg;
3998 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3999 int s;
4000
4001 s = splnet();
4002
4003 if (sc->sc_type >= WM_T_82542_2_1) {
4004 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
4005 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
4006 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
4007 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
4008 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
4009 }
4010
4011 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4012 ifp->if_ierrors += 0ULL + /* ensure quad_t */
4013 + CSR_READ(sc, WMREG_CRCERRS)
4014 + CSR_READ(sc, WMREG_ALGNERRC)
4015 + CSR_READ(sc, WMREG_SYMERRC)
4016 + CSR_READ(sc, WMREG_RXERRC)
4017 + CSR_READ(sc, WMREG_SEC)
4018 + CSR_READ(sc, WMREG_CEXTERR)
4019 + CSR_READ(sc, WMREG_RLEC);
4020 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4021
4022 if (sc->sc_flags & WM_F_HAS_MII)
4023 mii_tick(&sc->sc_mii);
4024 else
4025 wm_tbi_check_link(sc);
4026
4027 splx(s);
4028
4029 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4030 }
4031
4032 /*
4033 * wm_reset:
4034 *
4035 * Reset the i82542 chip.
4036 */
4037 static void
4038 wm_reset(struct wm_softc *sc)
4039 {
4040 int phy_reset = 0;
4041 uint32_t reg, mask;
4042
4043 /*
4044 * Allocate on-chip memory according to the MTU size.
4045 * The Packet Buffer Allocation register must be written
4046 * before the chip is reset.
4047 */
4048 switch (sc->sc_type) {
4049 case WM_T_82547:
4050 case WM_T_82547_2:
4051 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4052 PBA_22K : PBA_30K;
4053 sc->sc_txfifo_head = 0;
4054 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4055 sc->sc_txfifo_size =
4056 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4057 sc->sc_txfifo_stall = 0;
4058 break;
4059 case WM_T_82571:
4060 case WM_T_82572:
4061 case WM_T_82575: /* XXX need special handing for jumbo frames */
4062 case WM_T_I350:
4063 case WM_T_I354:
4064 case WM_T_80003:
4065 sc->sc_pba = PBA_32K;
4066 break;
4067 case WM_T_82580:
4068 case WM_T_82580ER:
4069 sc->sc_pba = PBA_35K;
4070 break;
4071 case WM_T_I210:
4072 case WM_T_I211:
4073 sc->sc_pba = PBA_34K;
4074 break;
4075 case WM_T_82576:
4076 sc->sc_pba = PBA_64K;
4077 break;
4078 case WM_T_82573:
4079 sc->sc_pba = PBA_12K;
4080 break;
4081 case WM_T_82574:
4082 case WM_T_82583:
4083 sc->sc_pba = PBA_20K;
4084 break;
4085 case WM_T_ICH8:
4086 sc->sc_pba = PBA_8K;
4087 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4088 break;
4089 case WM_T_ICH9:
4090 case WM_T_ICH10:
4091 sc->sc_pba = PBA_10K;
4092 break;
4093 case WM_T_PCH:
4094 case WM_T_PCH2:
4095 case WM_T_PCH_LPT:
4096 sc->sc_pba = PBA_26K;
4097 break;
4098 default:
4099 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4100 PBA_40K : PBA_48K;
4101 break;
4102 }
4103 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4104
4105 /* Prevent the PCI-E bus from sticking */
4106 if (sc->sc_flags & WM_F_PCIE) {
4107 int timeout = 800;
4108
4109 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4110 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4111
4112 while (timeout--) {
4113 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4114 == 0)
4115 break;
4116 delay(100);
4117 }
4118 }
4119
4120 /* Set the completion timeout for interface */
4121 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4122 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4123 wm_set_pcie_completion_timeout(sc);
4124
4125 /* Clear interrupt */
4126 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4127
4128 /* Stop the transmit and receive processes. */
4129 CSR_WRITE(sc, WMREG_RCTL, 0);
4130 sc->sc_rctl &= ~RCTL_EN;
4131 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4132 CSR_WRITE_FLUSH(sc);
4133
4134 /* XXX set_tbi_sbp_82543() */
4135
4136 delay(10*1000);
4137
4138 /* Must acquire the MDIO ownership before MAC reset */
4139 switch (sc->sc_type) {
4140 case WM_T_82573:
4141 case WM_T_82574:
4142 case WM_T_82583:
4143 wm_get_hw_semaphore_82573(sc);
4144 break;
4145 default:
4146 break;
4147 }
4148
4149 /*
4150 * 82541 Errata 29? & 82547 Errata 28?
4151 * See also the description about PHY_RST bit in CTRL register
4152 * in 8254x_GBe_SDM.pdf.
4153 */
4154 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4155 CSR_WRITE(sc, WMREG_CTRL,
4156 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4157 CSR_WRITE_FLUSH(sc);
4158 delay(5000);
4159 }
4160
4161 switch (sc->sc_type) {
4162 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4163 case WM_T_82541:
4164 case WM_T_82541_2:
4165 case WM_T_82547:
4166 case WM_T_82547_2:
4167 /*
4168 * On some chipsets, a reset through a memory-mapped write
4169 * cycle can cause the chip to reset before completing the
4170 * write cycle. This causes major headache that can be
4171 * avoided by issuing the reset via indirect register writes
4172 * through I/O space.
4173 *
4174 * So, if we successfully mapped the I/O BAR at attach time,
4175 * use that. Otherwise, try our luck with a memory-mapped
4176 * reset.
4177 */
4178 if (sc->sc_flags & WM_F_IOH_VALID)
4179 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4180 else
4181 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4182 break;
4183 case WM_T_82545_3:
4184 case WM_T_82546_3:
4185 /* Use the shadow control register on these chips. */
4186 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4187 break;
4188 case WM_T_80003:
4189 mask = swfwphysem[sc->sc_funcid];
4190 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4191 wm_get_swfw_semaphore(sc, mask);
4192 CSR_WRITE(sc, WMREG_CTRL, reg);
4193 wm_put_swfw_semaphore(sc, mask);
4194 break;
4195 case WM_T_ICH8:
4196 case WM_T_ICH9:
4197 case WM_T_ICH10:
4198 case WM_T_PCH:
4199 case WM_T_PCH2:
4200 case WM_T_PCH_LPT:
4201 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4202 if (wm_check_reset_block(sc) == 0) {
4203 /*
4204 * Gate automatic PHY configuration by hardware on
4205 * non-managed 82579
4206 */
4207 if ((sc->sc_type == WM_T_PCH2)
4208 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4209 != 0))
4210 wm_gate_hw_phy_config_ich8lan(sc, 1);
4211
4212
4213 reg |= CTRL_PHY_RESET;
4214 phy_reset = 1;
4215 }
4216 wm_get_swfwhw_semaphore(sc);
4217 CSR_WRITE(sc, WMREG_CTRL, reg);
4218 /* Don't insert a completion barrier when reset */
4219 delay(20*1000);
4220 wm_put_swfwhw_semaphore(sc);
4221 break;
4222 case WM_T_82542_2_0:
4223 case WM_T_82542_2_1:
4224 case WM_T_82543:
4225 case WM_T_82540:
4226 case WM_T_82545:
4227 case WM_T_82546:
4228 case WM_T_82571:
4229 case WM_T_82572:
4230 case WM_T_82573:
4231 case WM_T_82574:
4232 case WM_T_82575:
4233 case WM_T_82576:
4234 case WM_T_82580:
4235 case WM_T_82580ER:
4236 case WM_T_82583:
4237 case WM_T_I350:
4238 case WM_T_I354:
4239 case WM_T_I210:
4240 case WM_T_I211:
4241 default:
4242 /* Everything else can safely use the documented method. */
4243 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4244 break;
4245 }
4246
4247 /* Must release the MDIO ownership after MAC reset */
4248 switch (sc->sc_type) {
4249 case WM_T_82574:
4250 case WM_T_82583:
4251 wm_put_hw_semaphore_82573(sc);
4252 break;
4253 default:
4254 break;
4255 }
4256
4257 if (phy_reset != 0)
4258 wm_get_cfg_done(sc);
4259
4260 /* reload EEPROM */
4261 switch (sc->sc_type) {
4262 case WM_T_82542_2_0:
4263 case WM_T_82542_2_1:
4264 case WM_T_82543:
4265 case WM_T_82544:
4266 delay(10);
4267 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4268 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4269 CSR_WRITE_FLUSH(sc);
4270 delay(2000);
4271 break;
4272 case WM_T_82540:
4273 case WM_T_82545:
4274 case WM_T_82545_3:
4275 case WM_T_82546:
4276 case WM_T_82546_3:
4277 delay(5*1000);
4278 /* XXX Disable HW ARPs on ASF enabled adapters */
4279 break;
4280 case WM_T_82541:
4281 case WM_T_82541_2:
4282 case WM_T_82547:
4283 case WM_T_82547_2:
4284 delay(20000);
4285 /* XXX Disable HW ARPs on ASF enabled adapters */
4286 break;
4287 case WM_T_82571:
4288 case WM_T_82572:
4289 case WM_T_82573:
4290 case WM_T_82574:
4291 case WM_T_82583:
4292 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4293 delay(10);
4294 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4295 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4296 CSR_WRITE_FLUSH(sc);
4297 }
4298 /* check EECD_EE_AUTORD */
4299 wm_get_auto_rd_done(sc);
4300 /*
4301 * Phy configuration from NVM just starts after EECD_AUTO_RD
4302 * is set.
4303 */
4304 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4305 || (sc->sc_type == WM_T_82583))
4306 delay(25*1000);
4307 break;
4308 case WM_T_82575:
4309 case WM_T_82576:
4310 case WM_T_82580:
4311 case WM_T_82580ER:
4312 case WM_T_I350:
4313 case WM_T_I354:
4314 case WM_T_I210:
4315 case WM_T_I211:
4316 case WM_T_80003:
4317 /* check EECD_EE_AUTORD */
4318 wm_get_auto_rd_done(sc);
4319 break;
4320 case WM_T_ICH8:
4321 case WM_T_ICH9:
4322 case WM_T_ICH10:
4323 case WM_T_PCH:
4324 case WM_T_PCH2:
4325 case WM_T_PCH_LPT:
4326 break;
4327 default:
4328 panic("%s: unknown type\n", __func__);
4329 }
4330
4331 /* Check whether EEPROM is present or not */
4332 switch (sc->sc_type) {
4333 case WM_T_82575:
4334 case WM_T_82576:
4335 #if 0 /* XXX */
4336 case WM_T_82580:
4337 case WM_T_82580ER:
4338 #endif
4339 case WM_T_I350:
4340 case WM_T_I354:
4341 case WM_T_ICH8:
4342 case WM_T_ICH9:
4343 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4344 /* Not found */
4345 sc->sc_flags |= WM_F_EEPROM_INVALID;
4346 if ((sc->sc_type == WM_T_82575)
4347 || (sc->sc_type == WM_T_82576)
4348 || (sc->sc_type == WM_T_82580)
4349 || (sc->sc_type == WM_T_82580ER)
4350 || (sc->sc_type == WM_T_I350)
4351 || (sc->sc_type == WM_T_I354))
4352 wm_reset_init_script_82575(sc);
4353 }
4354 break;
4355 default:
4356 break;
4357 }
4358
4359 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4360 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4361 /* clear global device reset status bit */
4362 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4363 }
4364
4365 /* Clear any pending interrupt events. */
4366 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4367 reg = CSR_READ(sc, WMREG_ICR);
4368
4369 /* reload sc_ctrl */
4370 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4371
4372 if (sc->sc_type == WM_T_I350)
4373 wm_set_eee_i350(sc);
4374
4375 /* dummy read from WUC */
4376 if (sc->sc_type == WM_T_PCH)
4377 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4378 /*
4379 * For PCH, this write will make sure that any noise will be detected
4380 * as a CRC error and be dropped rather than show up as a bad packet
4381 * to the DMA engine
4382 */
4383 if (sc->sc_type == WM_T_PCH)
4384 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4385
4386 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4387 CSR_WRITE(sc, WMREG_WUC, 0);
4388
4389 /* XXX need special handling for 82580 */
4390 }
4391
4392 static void
4393 wm_set_vlan(struct wm_softc *sc)
4394 {
4395 /* Deal with VLAN enables. */
4396 if (VLAN_ATTACHED(&sc->sc_ethercom))
4397 sc->sc_ctrl |= CTRL_VME;
4398 else
4399 sc->sc_ctrl &= ~CTRL_VME;
4400
4401 /* Write the control registers. */
4402 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4403 }
4404
4405 /*
4406 * wm_init: [ifnet interface function]
4407 *
4408 * Initialize the interface. Must be called at splnet().
4409 */
4410 static int
4411 wm_init(struct ifnet *ifp)
4412 {
4413 struct wm_softc *sc = ifp->if_softc;
4414 struct wm_rxsoft *rxs;
4415 int i, j, trynum, error = 0;
4416 uint32_t reg;
4417
4418 /*
4419 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4420 * There is a small but measurable benefit to avoiding the adjusment
4421 * of the descriptor so that the headers are aligned, for normal mtu,
4422 * on such platforms. One possibility is that the DMA itself is
4423 * slightly more efficient if the front of the entire packet (instead
4424 * of the front of the headers) is aligned.
4425 *
4426 * Note we must always set align_tweak to 0 if we are using
4427 * jumbo frames.
4428 */
4429 #ifdef __NO_STRICT_ALIGNMENT
4430 sc->sc_align_tweak = 0;
4431 #else
4432 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4433 sc->sc_align_tweak = 0;
4434 else
4435 sc->sc_align_tweak = 2;
4436 #endif /* __NO_STRICT_ALIGNMENT */
4437
4438 /* Cancel any pending I/O. */
4439 wm_stop(ifp, 0);
4440
4441 /* update statistics before reset */
4442 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4443 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4444
4445 /* Reset the chip to a known state. */
4446 wm_reset(sc);
4447
4448 switch (sc->sc_type) {
4449 case WM_T_82571:
4450 case WM_T_82572:
4451 case WM_T_82573:
4452 case WM_T_82574:
4453 case WM_T_82583:
4454 case WM_T_80003:
4455 case WM_T_ICH8:
4456 case WM_T_ICH9:
4457 case WM_T_ICH10:
4458 case WM_T_PCH:
4459 case WM_T_PCH2:
4460 case WM_T_PCH_LPT:
4461 if (wm_check_mng_mode(sc) != 0)
4462 wm_get_hw_control(sc);
4463 break;
4464 default:
4465 break;
4466 }
4467
4468 /* Reset the PHY. */
4469 if (sc->sc_flags & WM_F_HAS_MII)
4470 wm_gmii_reset(sc);
4471
4472 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4473 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4474 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4475 || (sc->sc_type == WM_T_PCH_LPT))
4476 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4477
4478 /* Initialize the transmit descriptor ring. */
4479 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4480 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4481 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4482 sc->sc_txfree = WM_NTXDESC(sc);
4483 sc->sc_txnext = 0;
4484
4485 if (sc->sc_type < WM_T_82543) {
4486 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4487 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4488 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4489 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4490 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4491 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4492 } else {
4493 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4494 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4495 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4496 CSR_WRITE(sc, WMREG_TDH, 0);
4497 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4498 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4499
4500 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4501 /*
4502 * Don't write TDT before TCTL.EN is set.
4503 * See the document.
4504 */
4505 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4506 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4507 | TXDCTL_WTHRESH(0));
4508 else {
4509 CSR_WRITE(sc, WMREG_TDT, 0);
4510 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4511 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4512 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4513 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4514 }
4515 }
4516 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4517 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4518
4519 /* Initialize the transmit job descriptors. */
4520 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4521 sc->sc_txsoft[i].txs_mbuf = NULL;
4522 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4523 sc->sc_txsnext = 0;
4524 sc->sc_txsdirty = 0;
4525
4526 /*
4527 * Initialize the receive descriptor and receive job
4528 * descriptor rings.
4529 */
4530 if (sc->sc_type < WM_T_82543) {
4531 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4532 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4533 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4534 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4535 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4536 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4537
4538 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4539 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4540 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4541 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4542 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4543 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4544 } else {
4545 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4546 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4547 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4548 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4549 CSR_WRITE(sc, WMREG_EITR(0), 450);
4550 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4551 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4552 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4553 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4554 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4555 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4556 | RXDCTL_WTHRESH(1));
4557 } else {
4558 CSR_WRITE(sc, WMREG_RDH, 0);
4559 CSR_WRITE(sc, WMREG_RDT, 0);
4560 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4561 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4562 }
4563 }
4564 for (i = 0; i < WM_NRXDESC; i++) {
4565 rxs = &sc->sc_rxsoft[i];
4566 if (rxs->rxs_mbuf == NULL) {
4567 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4568 log(LOG_ERR, "%s: unable to allocate or map "
4569 "rx buffer %d, error = %d\n",
4570 device_xname(sc->sc_dev), i, error);
4571 /*
4572 * XXX Should attempt to run with fewer receive
4573 * XXX buffers instead of just failing.
4574 */
4575 wm_rxdrain(sc);
4576 goto out;
4577 }
4578 } else {
4579 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4580 WM_INIT_RXDESC(sc, i);
4581 /*
4582 * For 82575 and newer device, the RX descriptors
4583 * must be initialized after the setting of RCTL.EN in
4584 * wm_set_filter()
4585 */
4586 }
4587 }
4588 sc->sc_rxptr = 0;
4589 sc->sc_rxdiscard = 0;
4590 WM_RXCHAIN_RESET(sc);
4591
4592 /*
4593 * Clear out the VLAN table -- we don't use it (yet).
4594 */
4595 CSR_WRITE(sc, WMREG_VET, 0);
4596 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4597 trynum = 10; /* Due to hw errata */
4598 else
4599 trynum = 1;
4600 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4601 for (j = 0; j < trynum; j++)
4602 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4603
4604 /*
4605 * Set up flow-control parameters.
4606 *
4607 * XXX Values could probably stand some tuning.
4608 */
4609 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4610 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4611 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4612 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4613 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4614 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4615 }
4616
4617 sc->sc_fcrtl = FCRTL_DFLT;
4618 if (sc->sc_type < WM_T_82543) {
4619 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4620 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4621 } else {
4622 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4623 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4624 }
4625
4626 if (sc->sc_type == WM_T_80003)
4627 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4628 else
4629 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4630
4631 /* Writes the control register. */
4632 wm_set_vlan(sc);
4633
4634 if (sc->sc_flags & WM_F_HAS_MII) {
4635 int val;
4636
4637 switch (sc->sc_type) {
4638 case WM_T_80003:
4639 case WM_T_ICH8:
4640 case WM_T_ICH9:
4641 case WM_T_ICH10:
4642 case WM_T_PCH:
4643 case WM_T_PCH2:
4644 case WM_T_PCH_LPT:
4645 /*
4646 * Set the mac to wait the maximum time between each
4647 * iteration and increase the max iterations when
4648 * polling the phy; this fixes erroneous timeouts at
4649 * 10Mbps.
4650 */
4651 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4652 0xFFFF);
4653 val = wm_kmrn_readreg(sc,
4654 KUMCTRLSTA_OFFSET_INB_PARAM);
4655 val |= 0x3F;
4656 wm_kmrn_writereg(sc,
4657 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4658 break;
4659 default:
4660 break;
4661 }
4662
4663 if (sc->sc_type == WM_T_80003) {
4664 val = CSR_READ(sc, WMREG_CTRL_EXT);
4665 val &= ~CTRL_EXT_LINK_MODE_MASK;
4666 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4667
4668 /* Bypass RX and TX FIFO's */
4669 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4670 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4671 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4672 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4673 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4674 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4675 }
4676 }
4677 #if 0
4678 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4679 #endif
4680
4681 /*
4682 * Set up checksum offload parameters.
4683 */
4684 reg = CSR_READ(sc, WMREG_RXCSUM);
4685 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4686 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4687 reg |= RXCSUM_IPOFL;
4688 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4689 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4690 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4691 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4692 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4693
4694 /* Reset TBI's RXCFG count */
4695 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4696
4697 /*
4698 * Set up the interrupt registers.
4699 */
4700 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4701 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4702 ICR_RXO | ICR_RXT0;
4703 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4704 sc->sc_icr |= ICR_RXCFG;
4705 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4706
4707 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4708 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4709 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4710 reg = CSR_READ(sc, WMREG_KABGTXD);
4711 reg |= KABGTXD_BGSQLBIAS;
4712 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4713 }
4714
4715 /* Set up the inter-packet gap. */
4716 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4717
4718 if (sc->sc_type >= WM_T_82543) {
4719 /*
4720 * Set up the interrupt throttling register (units of 256ns)
4721 * Note that a footnote in Intel's documentation says this
4722 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4723 * or 10Mbit mode. Empirically, it appears to be the case
4724 * that that is also true for the 1024ns units of the other
4725 * interrupt-related timer registers -- so, really, we ought
4726 * to divide this value by 4 when the link speed is low.
4727 *
4728 * XXX implement this division at link speed change!
4729 */
4730
4731 /*
4732 * For N interrupts/sec, set this value to:
4733 * 1000000000 / (N * 256). Note that we set the
4734 * absolute and packet timer values to this value
4735 * divided by 4 to get "simple timer" behavior.
4736 */
4737
4738 sc->sc_itr = 1500; /* 2604 ints/sec */
4739 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4740 }
4741
4742 /* Set the VLAN ethernetype. */
4743 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4744
4745 /*
4746 * Set up the transmit control register; we start out with
4747 * a collision distance suitable for FDX, but update it whe
4748 * we resolve the media type.
4749 */
4750 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4751 | TCTL_CT(TX_COLLISION_THRESHOLD)
4752 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4753 if (sc->sc_type >= WM_T_82571)
4754 sc->sc_tctl |= TCTL_MULR;
4755 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4756
4757 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4758 /*
4759 * Write TDT after TCTL.EN is set.
4760 * See the document.
4761 */
4762 CSR_WRITE(sc, WMREG_TDT, 0);
4763 }
4764
4765 if (sc->sc_type == WM_T_80003) {
4766 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4767 reg &= ~TCTL_EXT_GCEX_MASK;
4768 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4769 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4770 }
4771
4772 /* Set the media. */
4773 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4774 goto out;
4775
4776 /* Configure for OS presence */
4777 wm_init_manageability(sc);
4778
4779 /*
4780 * Set up the receive control register; we actually program
4781 * the register when we set the receive filter. Use multicast
4782 * address offset type 0.
4783 *
4784 * Only the i82544 has the ability to strip the incoming
4785 * CRC, so we don't enable that feature.
4786 */
4787 sc->sc_mchash_type = 0;
4788 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4789 | RCTL_MO(sc->sc_mchash_type);
4790
4791 /*
4792 * The I350 has a bug where it always strips the CRC whether
4793 * asked to or not. So ask for stripped CRC here and cope in rxeof
4794 */
4795 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4796 || (sc->sc_type == WM_T_I210))
4797 sc->sc_rctl |= RCTL_SECRC;
4798
4799 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4800 && (ifp->if_mtu > ETHERMTU)) {
4801 sc->sc_rctl |= RCTL_LPE;
4802 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4803 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4804 }
4805
4806 if (MCLBYTES == 2048) {
4807 sc->sc_rctl |= RCTL_2k;
4808 } else {
4809 if (sc->sc_type >= WM_T_82543) {
4810 switch (MCLBYTES) {
4811 case 4096:
4812 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4813 break;
4814 case 8192:
4815 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4816 break;
4817 case 16384:
4818 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4819 break;
4820 default:
4821 panic("wm_init: MCLBYTES %d unsupported",
4822 MCLBYTES);
4823 break;
4824 }
4825 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4826 }
4827
4828 /* Set the receive filter. */
4829 wm_set_filter(sc);
4830
4831 /* Enable ECC */
4832 switch (sc->sc_type) {
4833 case WM_T_82571:
4834 reg = CSR_READ(sc, WMREG_PBA_ECC);
4835 reg |= PBA_ECC_CORR_EN;
4836 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4837 break;
4838 case WM_T_PCH_LPT:
4839 reg = CSR_READ(sc, WMREG_PBECCSTS);
4840 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4841 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4842
4843 reg = CSR_READ(sc, WMREG_CTRL);
4844 reg |= CTRL_MEHE;
4845 CSR_WRITE(sc, WMREG_CTRL, reg);
4846 break;
4847 default:
4848 break;
4849 }
4850
4851 /* On 575 and later set RDT only if RX enabled */
4852 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4853 for (i = 0; i < WM_NRXDESC; i++)
4854 WM_INIT_RXDESC(sc, i);
4855
4856 /* Start the one second link check clock. */
4857 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4858
4859 /* ...all done! */
4860 ifp->if_flags |= IFF_RUNNING;
4861 ifp->if_flags &= ~IFF_OACTIVE;
4862
4863 out:
4864 sc->sc_if_flags = ifp->if_flags;
4865 if (error)
4866 log(LOG_ERR, "%s: interface not running\n",
4867 device_xname(sc->sc_dev));
4868 return error;
4869 }
4870
4871 /*
4872 * wm_rxdrain:
4873 *
4874 * Drain the receive queue.
4875 */
4876 static void
4877 wm_rxdrain(struct wm_softc *sc)
4878 {
4879 struct wm_rxsoft *rxs;
4880 int i;
4881
4882 for (i = 0; i < WM_NRXDESC; i++) {
4883 rxs = &sc->sc_rxsoft[i];
4884 if (rxs->rxs_mbuf != NULL) {
4885 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4886 m_freem(rxs->rxs_mbuf);
4887 rxs->rxs_mbuf = NULL;
4888 }
4889 }
4890 }
4891
4892 /*
4893 * wm_stop: [ifnet interface function]
4894 *
4895 * Stop transmission on the interface.
4896 */
4897 static void
4898 wm_stop(struct ifnet *ifp, int disable)
4899 {
4900 struct wm_softc *sc = ifp->if_softc;
4901 struct wm_txsoft *txs;
4902 int i;
4903
4904 /* Stop the one second clock. */
4905 callout_stop(&sc->sc_tick_ch);
4906
4907 /* Stop the 82547 Tx FIFO stall check timer. */
4908 if (sc->sc_type == WM_T_82547)
4909 callout_stop(&sc->sc_txfifo_ch);
4910
4911 if (sc->sc_flags & WM_F_HAS_MII) {
4912 /* Down the MII. */
4913 mii_down(&sc->sc_mii);
4914 } else {
4915 #if 0
4916 /* Should we clear PHY's status properly? */
4917 wm_reset(sc);
4918 #endif
4919 }
4920
4921 /* Stop the transmit and receive processes. */
4922 CSR_WRITE(sc, WMREG_TCTL, 0);
4923 CSR_WRITE(sc, WMREG_RCTL, 0);
4924 sc->sc_rctl &= ~RCTL_EN;
4925
4926 /*
4927 * Clear the interrupt mask to ensure the device cannot assert its
4928 * interrupt line.
4929 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4930 * any currently pending or shared interrupt.
4931 */
4932 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4933 sc->sc_icr = 0;
4934
4935 /* Release any queued transmit buffers. */
4936 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4937 txs = &sc->sc_txsoft[i];
4938 if (txs->txs_mbuf != NULL) {
4939 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4940 m_freem(txs->txs_mbuf);
4941 txs->txs_mbuf = NULL;
4942 }
4943 }
4944
4945 /* Mark the interface as down and cancel the watchdog timer. */
4946 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4947 ifp->if_timer = 0;
4948
4949 if (disable)
4950 wm_rxdrain(sc);
4951
4952 #if 0 /* notyet */
4953 if (sc->sc_type >= WM_T_82544)
4954 CSR_WRITE(sc, WMREG_WUC, 0);
4955 #endif
4956 }
4957
4958 void
4959 wm_get_auto_rd_done(struct wm_softc *sc)
4960 {
4961 int i;
4962
4963 /* wait for eeprom to reload */
4964 switch (sc->sc_type) {
4965 case WM_T_82571:
4966 case WM_T_82572:
4967 case WM_T_82573:
4968 case WM_T_82574:
4969 case WM_T_82583:
4970 case WM_T_82575:
4971 case WM_T_82576:
4972 case WM_T_82580:
4973 case WM_T_82580ER:
4974 case WM_T_I350:
4975 case WM_T_I354:
4976 case WM_T_I210:
4977 case WM_T_I211:
4978 case WM_T_80003:
4979 case WM_T_ICH8:
4980 case WM_T_ICH9:
4981 for (i = 0; i < 10; i++) {
4982 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4983 break;
4984 delay(1000);
4985 }
4986 if (i == 10) {
4987 log(LOG_ERR, "%s: auto read from eeprom failed to "
4988 "complete\n", device_xname(sc->sc_dev));
4989 }
4990 break;
4991 default:
4992 break;
4993 }
4994 }
4995
4996 void
4997 wm_lan_init_done(struct wm_softc *sc)
4998 {
4999 uint32_t reg = 0;
5000 int i;
5001
5002 /* wait for eeprom to reload */
5003 switch (sc->sc_type) {
5004 case WM_T_ICH10:
5005 case WM_T_PCH:
5006 case WM_T_PCH2:
5007 case WM_T_PCH_LPT:
5008 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
5009 reg = CSR_READ(sc, WMREG_STATUS);
5010 if ((reg & STATUS_LAN_INIT_DONE) != 0)
5011 break;
5012 delay(100);
5013 }
5014 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
5015 log(LOG_ERR, "%s: %s: lan_init_done failed to "
5016 "complete\n", device_xname(sc->sc_dev), __func__);
5017 }
5018 break;
5019 default:
5020 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5021 __func__);
5022 break;
5023 }
5024
5025 reg &= ~STATUS_LAN_INIT_DONE;
5026 CSR_WRITE(sc, WMREG_STATUS, reg);
5027 }
5028
5029 void
5030 wm_get_cfg_done(struct wm_softc *sc)
5031 {
5032 int mask;
5033 uint32_t reg;
5034 int i;
5035
5036 /* wait for eeprom to reload */
5037 switch (sc->sc_type) {
5038 case WM_T_82542_2_0:
5039 case WM_T_82542_2_1:
5040 /* null */
5041 break;
5042 case WM_T_82543:
5043 case WM_T_82544:
5044 case WM_T_82540:
5045 case WM_T_82545:
5046 case WM_T_82545_3:
5047 case WM_T_82546:
5048 case WM_T_82546_3:
5049 case WM_T_82541:
5050 case WM_T_82541_2:
5051 case WM_T_82547:
5052 case WM_T_82547_2:
5053 case WM_T_82573:
5054 case WM_T_82574:
5055 case WM_T_82583:
5056 /* generic */
5057 delay(10*1000);
5058 break;
5059 case WM_T_80003:
5060 case WM_T_82571:
5061 case WM_T_82572:
5062 case WM_T_82575:
5063 case WM_T_82576:
5064 case WM_T_82580:
5065 case WM_T_82580ER:
5066 case WM_T_I350:
5067 case WM_T_I354:
5068 case WM_T_I210:
5069 case WM_T_I211:
5070 if (sc->sc_type == WM_T_82571) {
5071 /* Only 82571 shares port 0 */
5072 mask = EEMNGCTL_CFGDONE_0;
5073 } else
5074 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5075 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5076 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5077 break;
5078 delay(1000);
5079 }
5080 if (i >= WM_PHY_CFG_TIMEOUT) {
5081 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5082 device_xname(sc->sc_dev), __func__));
5083 }
5084 break;
5085 case WM_T_ICH8:
5086 case WM_T_ICH9:
5087 case WM_T_ICH10:
5088 case WM_T_PCH:
5089 case WM_T_PCH2:
5090 case WM_T_PCH_LPT:
5091 delay(10*1000);
5092 if (sc->sc_type >= WM_T_ICH10)
5093 wm_lan_init_done(sc);
5094 else
5095 wm_get_auto_rd_done(sc);
5096
5097 reg = CSR_READ(sc, WMREG_STATUS);
5098 if ((reg & STATUS_PHYRA) != 0)
5099 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5100 break;
5101 default:
5102 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5103 __func__);
5104 break;
5105 }
5106 }
5107
5108 /*
5109 * wm_acquire_eeprom:
5110 *
5111 * Perform the EEPROM handshake required on some chips.
5112 */
5113 static int
5114 wm_acquire_eeprom(struct wm_softc *sc)
5115 {
5116 uint32_t reg;
5117 int x;
5118 int ret = 0;
5119
5120 /* always success */
5121 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5122 return 0;
5123
5124 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5125 ret = wm_get_swfwhw_semaphore(sc);
5126 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5127 /* this will also do wm_get_swsm_semaphore() if needed */
5128 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5129 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5130 ret = wm_get_swsm_semaphore(sc);
5131 }
5132
5133 if (ret) {
5134 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5135 __func__);
5136 return 1;
5137 }
5138
5139 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5140 reg = CSR_READ(sc, WMREG_EECD);
5141
5142 /* Request EEPROM access. */
5143 reg |= EECD_EE_REQ;
5144 CSR_WRITE(sc, WMREG_EECD, reg);
5145
5146 /* ..and wait for it to be granted. */
5147 for (x = 0; x < 1000; x++) {
5148 reg = CSR_READ(sc, WMREG_EECD);
5149 if (reg & EECD_EE_GNT)
5150 break;
5151 delay(5);
5152 }
5153 if ((reg & EECD_EE_GNT) == 0) {
5154 aprint_error_dev(sc->sc_dev,
5155 "could not acquire EEPROM GNT\n");
5156 reg &= ~EECD_EE_REQ;
5157 CSR_WRITE(sc, WMREG_EECD, reg);
5158 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5159 wm_put_swfwhw_semaphore(sc);
5160 if (sc->sc_flags & WM_F_SWFW_SYNC)
5161 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5162 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5163 wm_put_swsm_semaphore(sc);
5164 return 1;
5165 }
5166 }
5167
5168 return 0;
5169 }
5170
5171 /*
5172 * wm_release_eeprom:
5173 *
5174 * Release the EEPROM mutex.
5175 */
5176 static void
5177 wm_release_eeprom(struct wm_softc *sc)
5178 {
5179 uint32_t reg;
5180
5181 /* always success */
5182 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5183 return;
5184
5185 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5186 reg = CSR_READ(sc, WMREG_EECD);
5187 reg &= ~EECD_EE_REQ;
5188 CSR_WRITE(sc, WMREG_EECD, reg);
5189 }
5190
5191 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5192 wm_put_swfwhw_semaphore(sc);
5193 if (sc->sc_flags & WM_F_SWFW_SYNC)
5194 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5195 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5196 wm_put_swsm_semaphore(sc);
5197 }
5198
5199 /*
5200 * wm_eeprom_sendbits:
5201 *
5202 * Send a series of bits to the EEPROM.
5203 */
5204 static void
5205 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5206 {
5207 uint32_t reg;
5208 int x;
5209
5210 reg = CSR_READ(sc, WMREG_EECD);
5211
5212 for (x = nbits; x > 0; x--) {
5213 if (bits & (1U << (x - 1)))
5214 reg |= EECD_DI;
5215 else
5216 reg &= ~EECD_DI;
5217 CSR_WRITE(sc, WMREG_EECD, reg);
5218 CSR_WRITE_FLUSH(sc);
5219 delay(2);
5220 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5221 CSR_WRITE_FLUSH(sc);
5222 delay(2);
5223 CSR_WRITE(sc, WMREG_EECD, reg);
5224 CSR_WRITE_FLUSH(sc);
5225 delay(2);
5226 }
5227 }
5228
5229 /*
5230 * wm_eeprom_recvbits:
5231 *
5232 * Receive a series of bits from the EEPROM.
5233 */
5234 static void
5235 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5236 {
5237 uint32_t reg, val;
5238 int x;
5239
5240 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5241
5242 val = 0;
5243 for (x = nbits; x > 0; x--) {
5244 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5245 CSR_WRITE_FLUSH(sc);
5246 delay(2);
5247 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5248 val |= (1U << (x - 1));
5249 CSR_WRITE(sc, WMREG_EECD, reg);
5250 CSR_WRITE_FLUSH(sc);
5251 delay(2);
5252 }
5253 *valp = val;
5254 }
5255
5256 /*
5257 * wm_read_eeprom_uwire:
5258 *
5259 * Read a word from the EEPROM using the MicroWire protocol.
5260 */
5261 static int
5262 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5263 {
5264 uint32_t reg, val;
5265 int i;
5266
5267 for (i = 0; i < wordcnt; i++) {
5268 /* Clear SK and DI. */
5269 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5270 CSR_WRITE(sc, WMREG_EECD, reg);
5271
5272 /*
5273 * XXX: workaround for a bug in qemu-0.12.x and prior
5274 * and Xen.
5275 *
5276 * We use this workaround only for 82540 because qemu's
5277 * e1000 act as 82540.
5278 */
5279 if (sc->sc_type == WM_T_82540) {
5280 reg |= EECD_SK;
5281 CSR_WRITE(sc, WMREG_EECD, reg);
5282 reg &= ~EECD_SK;
5283 CSR_WRITE(sc, WMREG_EECD, reg);
5284 CSR_WRITE_FLUSH(sc);
5285 delay(2);
5286 }
5287 /* XXX: end of workaround */
5288
5289 /* Set CHIP SELECT. */
5290 reg |= EECD_CS;
5291 CSR_WRITE(sc, WMREG_EECD, reg);
5292 CSR_WRITE_FLUSH(sc);
5293 delay(2);
5294
5295 /* Shift in the READ command. */
5296 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5297
5298 /* Shift in address. */
5299 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5300
5301 /* Shift out the data. */
5302 wm_eeprom_recvbits(sc, &val, 16);
5303 data[i] = val & 0xffff;
5304
5305 /* Clear CHIP SELECT. */
5306 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5307 CSR_WRITE(sc, WMREG_EECD, reg);
5308 CSR_WRITE_FLUSH(sc);
5309 delay(2);
5310 }
5311
5312 return 0;
5313 }
5314
5315 /*
5316 * wm_spi_eeprom_ready:
5317 *
5318 * Wait for a SPI EEPROM to be ready for commands.
5319 */
5320 static int
5321 wm_spi_eeprom_ready(struct wm_softc *sc)
5322 {
5323 uint32_t val;
5324 int usec;
5325
5326 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5327 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5328 wm_eeprom_recvbits(sc, &val, 8);
5329 if ((val & SPI_SR_RDY) == 0)
5330 break;
5331 }
5332 if (usec >= SPI_MAX_RETRIES) {
5333 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5334 return 1;
5335 }
5336 return 0;
5337 }
5338
5339 /*
5340 * wm_read_eeprom_spi:
5341 *
5342 * Read a work from the EEPROM using the SPI protocol.
5343 */
5344 static int
5345 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5346 {
5347 uint32_t reg, val;
5348 int i;
5349 uint8_t opc;
5350
5351 /* Clear SK and CS. */
5352 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5353 CSR_WRITE(sc, WMREG_EECD, reg);
5354 CSR_WRITE_FLUSH(sc);
5355 delay(2);
5356
5357 if (wm_spi_eeprom_ready(sc))
5358 return 1;
5359
5360 /* Toggle CS to flush commands. */
5361 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5362 CSR_WRITE_FLUSH(sc);
5363 delay(2);
5364 CSR_WRITE(sc, WMREG_EECD, reg);
5365 CSR_WRITE_FLUSH(sc);
5366 delay(2);
5367
5368 opc = SPI_OPC_READ;
5369 if (sc->sc_ee_addrbits == 8 && word >= 128)
5370 opc |= SPI_OPC_A8;
5371
5372 wm_eeprom_sendbits(sc, opc, 8);
5373 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5374
5375 for (i = 0; i < wordcnt; i++) {
5376 wm_eeprom_recvbits(sc, &val, 16);
5377 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5378 }
5379
5380 /* Raise CS and clear SK. */
5381 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5382 CSR_WRITE(sc, WMREG_EECD, reg);
5383 CSR_WRITE_FLUSH(sc);
5384 delay(2);
5385
5386 return 0;
5387 }
5388
5389 #define NVM_CHECKSUM 0xBABA
5390 #define EEPROM_SIZE 0x0040
5391 #define NVM_COMPAT 0x0003
5392 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
5393 #define NVM_FUTURE_INIT_WORD1 0x0019
5394 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
5395
5396 /*
5397 * wm_validate_eeprom_checksum
5398 *
5399 * The checksum is defined as the sum of the first 64 (16 bit) words.
5400 */
5401 static int
5402 wm_validate_eeprom_checksum(struct wm_softc *sc)
5403 {
5404 uint16_t checksum;
5405 uint16_t eeprom_data;
5406 #ifdef WM_DEBUG
5407 uint16_t csum_wordaddr, valid_checksum;
5408 #endif
5409 int i;
5410
5411 checksum = 0;
5412
5413 /* Don't check for I211 */
5414 if (sc->sc_type == WM_T_I211)
5415 return 0;
5416
5417 #ifdef WM_DEBUG
5418 if (sc->sc_type == WM_T_PCH_LPT) {
5419 csum_wordaddr = NVM_COMPAT;
5420 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5421 } else {
5422 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5423 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5424 }
5425
5426 /* Dump EEPROM image for debug */
5427 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5428 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5429 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5430 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5431 if ((eeprom_data & valid_checksum) == 0) {
5432 DPRINTF(WM_DEBUG_NVM,
5433 ("%s: NVM need to be updated (%04x != %04x)\n",
5434 device_xname(sc->sc_dev), eeprom_data,
5435 valid_checksum));
5436 }
5437 }
5438
5439 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5440 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5441 for (i = 0; i < EEPROM_SIZE; i++) {
5442 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5443 printf("XX ");
5444 else
5445 printf("%04x ", eeprom_data);
5446 if (i % 8 == 7)
5447 printf("\n");
5448 }
5449 }
5450
5451 #endif /* WM_DEBUG */
5452
5453 for (i = 0; i < EEPROM_SIZE; i++) {
5454 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5455 return 1;
5456 checksum += eeprom_data;
5457 }
5458
5459 if (checksum != (uint16_t) NVM_CHECKSUM) {
5460 #ifdef WM_DEBUG
5461 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5462 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5463 #endif
5464 }
5465
5466 return 0;
5467 }
5468
5469 /*
5470 * wm_read_eeprom:
5471 *
5472 * Read data from the serial EEPROM.
5473 */
5474 static int
5475 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5476 {
5477 int rv;
5478
5479 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5480 return 1;
5481
5482 if (wm_acquire_eeprom(sc))
5483 return 1;
5484
5485 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5486 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5487 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5488 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5489 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5490 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5491 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5492 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5493 else
5494 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5495
5496 wm_release_eeprom(sc);
5497 return rv;
5498 }
5499
5500 static int
5501 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5502 uint16_t *data)
5503 {
5504 int i, eerd = 0;
5505 int error = 0;
5506
5507 for (i = 0; i < wordcnt; i++) {
5508 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5509
5510 CSR_WRITE(sc, WMREG_EERD, eerd);
5511 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5512 if (error != 0)
5513 break;
5514
5515 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5516 }
5517
5518 return error;
5519 }
5520
5521 static int
5522 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5523 {
5524 uint32_t attempts = 100000;
5525 uint32_t i, reg = 0;
5526 int32_t done = -1;
5527
5528 for (i = 0; i < attempts; i++) {
5529 reg = CSR_READ(sc, rw);
5530
5531 if (reg & EERD_DONE) {
5532 done = 0;
5533 break;
5534 }
5535 delay(5);
5536 }
5537
5538 return done;
5539 }
5540
5541 static int
5542 wm_check_alt_mac_addr(struct wm_softc *sc)
5543 {
5544 uint16_t myea[ETHER_ADDR_LEN / 2];
5545 uint16_t offset = EEPROM_OFF_MACADDR;
5546
5547 /* Try to read alternative MAC address pointer */
5548 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5549 return -1;
5550
5551 /* Check pointer */
5552 if (offset == 0xffff)
5553 return -1;
5554
5555 /*
5556 * Check whether alternative MAC address is valid or not.
5557 * Some cards have non 0xffff pointer but those don't use
5558 * alternative MAC address in reality.
5559 *
5560 * Check whether the broadcast bit is set or not.
5561 */
5562 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5563 if (((myea[0] & 0xff) & 0x01) == 0)
5564 return 0; /* found! */
5565
5566 /* not found */
5567 return -1;
5568 }
5569
5570 static int
5571 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5572 {
5573 uint16_t myea[ETHER_ADDR_LEN / 2];
5574 uint16_t offset = EEPROM_OFF_MACADDR;
5575 int do_invert = 0;
5576
5577 switch (sc->sc_type) {
5578 case WM_T_82580:
5579 case WM_T_82580ER:
5580 case WM_T_I350:
5581 case WM_T_I354:
5582 switch (sc->sc_funcid) {
5583 case 0:
5584 /* default value (== EEPROM_OFF_MACADDR) */
5585 break;
5586 case 1:
5587 offset = EEPROM_OFF_LAN1;
5588 break;
5589 case 2:
5590 offset = EEPROM_OFF_LAN2;
5591 break;
5592 case 3:
5593 offset = EEPROM_OFF_LAN3;
5594 break;
5595 default:
5596 goto bad;
5597 /* NOTREACHED */
5598 break;
5599 }
5600 break;
5601 case WM_T_82571:
5602 case WM_T_82575:
5603 case WM_T_82576:
5604 case WM_T_80003:
5605 case WM_T_I210:
5606 case WM_T_I211:
5607 if (wm_check_alt_mac_addr(sc) != 0) {
5608 /* reset the offset to LAN0 */
5609 offset = EEPROM_OFF_MACADDR;
5610 if ((sc->sc_funcid & 0x01) == 1)
5611 do_invert = 1;
5612 goto do_read;
5613 }
5614 switch (sc->sc_funcid) {
5615 case 0:
5616 /*
5617 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5618 * itself.
5619 */
5620 break;
5621 case 1:
5622 offset += EEPROM_OFF_MACADDR_LAN1;
5623 break;
5624 case 2:
5625 offset += EEPROM_OFF_MACADDR_LAN2;
5626 break;
5627 case 3:
5628 offset += EEPROM_OFF_MACADDR_LAN3;
5629 break;
5630 default:
5631 goto bad;
5632 /* NOTREACHED */
5633 break;
5634 }
5635 break;
5636 default:
5637 if ((sc->sc_funcid & 0x01) == 1)
5638 do_invert = 1;
5639 break;
5640 }
5641
5642 do_read:
5643 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5644 myea) != 0) {
5645 goto bad;
5646 }
5647
5648 enaddr[0] = myea[0] & 0xff;
5649 enaddr[1] = myea[0] >> 8;
5650 enaddr[2] = myea[1] & 0xff;
5651 enaddr[3] = myea[1] >> 8;
5652 enaddr[4] = myea[2] & 0xff;
5653 enaddr[5] = myea[2] >> 8;
5654
5655 /*
5656 * Toggle the LSB of the MAC address on the second port
5657 * of some dual port cards.
5658 */
5659 if (do_invert != 0)
5660 enaddr[5] ^= 1;
5661
5662 return 0;
5663
5664 bad:
5665 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5666
5667 return -1;
5668 }
5669
5670 /*
5671 * wm_add_rxbuf:
5672 *
5673 * Add a receive buffer to the indiciated descriptor.
5674 */
5675 static int
5676 wm_add_rxbuf(struct wm_softc *sc, int idx)
5677 {
5678 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5679 struct mbuf *m;
5680 int error;
5681
5682 MGETHDR(m, M_DONTWAIT, MT_DATA);
5683 if (m == NULL)
5684 return ENOBUFS;
5685
5686 MCLGET(m, M_DONTWAIT);
5687 if ((m->m_flags & M_EXT) == 0) {
5688 m_freem(m);
5689 return ENOBUFS;
5690 }
5691
5692 if (rxs->rxs_mbuf != NULL)
5693 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5694
5695 rxs->rxs_mbuf = m;
5696
5697 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5698 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5699 BUS_DMA_READ|BUS_DMA_NOWAIT);
5700 if (error) {
5701 /* XXX XXX XXX */
5702 aprint_error_dev(sc->sc_dev,
5703 "unable to load rx DMA map %d, error = %d\n",
5704 idx, error);
5705 panic("wm_add_rxbuf");
5706 }
5707
5708 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5709 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5710
5711 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5712 if ((sc->sc_rctl & RCTL_EN) != 0)
5713 WM_INIT_RXDESC(sc, idx);
5714 } else
5715 WM_INIT_RXDESC(sc, idx);
5716
5717 return 0;
5718 }
5719
5720 /*
5721 * wm_set_ral:
5722 *
5723 * Set an entery in the receive address list.
5724 */
5725 static void
5726 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5727 {
5728 uint32_t ral_lo, ral_hi;
5729
5730 if (enaddr != NULL) {
5731 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5732 (enaddr[3] << 24);
5733 ral_hi = enaddr[4] | (enaddr[5] << 8);
5734 ral_hi |= RAL_AV;
5735 } else {
5736 ral_lo = 0;
5737 ral_hi = 0;
5738 }
5739
5740 if (sc->sc_type >= WM_T_82544) {
5741 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5742 ral_lo);
5743 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5744 ral_hi);
5745 } else {
5746 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5747 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5748 }
5749 }
5750
5751 /*
5752 * wm_mchash:
5753 *
5754 * Compute the hash of the multicast address for the 4096-bit
5755 * multicast filter.
5756 */
5757 static uint32_t
5758 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5759 {
5760 static const int lo_shift[4] = { 4, 3, 2, 0 };
5761 static const int hi_shift[4] = { 4, 5, 6, 8 };
5762 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5763 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5764 uint32_t hash;
5765
5766 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5767 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5768 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5769 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5770 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5771 return (hash & 0x3ff);
5772 }
5773 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5774 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5775
5776 return (hash & 0xfff);
5777 }
5778
5779 /*
5780 * wm_set_filter:
5781 *
5782 * Set up the receive filter.
5783 */
5784 static void
5785 wm_set_filter(struct wm_softc *sc)
5786 {
5787 struct ethercom *ec = &sc->sc_ethercom;
5788 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5789 struct ether_multi *enm;
5790 struct ether_multistep step;
5791 bus_addr_t mta_reg;
5792 uint32_t hash, reg, bit;
5793 int i, size;
5794
5795 if (sc->sc_type >= WM_T_82544)
5796 mta_reg = WMREG_CORDOVA_MTA;
5797 else
5798 mta_reg = WMREG_MTA;
5799
5800 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5801
5802 if (ifp->if_flags & IFF_BROADCAST)
5803 sc->sc_rctl |= RCTL_BAM;
5804 if (ifp->if_flags & IFF_PROMISC) {
5805 sc->sc_rctl |= RCTL_UPE;
5806 goto allmulti;
5807 }
5808
5809 /*
5810 * Set the station address in the first RAL slot, and
5811 * clear the remaining slots.
5812 */
5813 if (sc->sc_type == WM_T_ICH8)
5814 size = WM_RAL_TABSIZE_ICH8 -1;
5815 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5816 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5817 || (sc->sc_type == WM_T_PCH_LPT))
5818 size = WM_RAL_TABSIZE_ICH8;
5819 else if (sc->sc_type == WM_T_82575)
5820 size = WM_RAL_TABSIZE_82575;
5821 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5822 size = WM_RAL_TABSIZE_82576;
5823 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5824 size = WM_RAL_TABSIZE_I350;
5825 else
5826 size = WM_RAL_TABSIZE;
5827 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5828 for (i = 1; i < size; i++)
5829 wm_set_ral(sc, NULL, i);
5830
5831 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5832 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5833 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5834 size = WM_ICH8_MC_TABSIZE;
5835 else
5836 size = WM_MC_TABSIZE;
5837 /* Clear out the multicast table. */
5838 for (i = 0; i < size; i++)
5839 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5840
5841 ETHER_FIRST_MULTI(step, ec, enm);
5842 while (enm != NULL) {
5843 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5844 /*
5845 * We must listen to a range of multicast addresses.
5846 * For now, just accept all multicasts, rather than
5847 * trying to set only those filter bits needed to match
5848 * the range. (At this time, the only use of address
5849 * ranges is for IP multicast routing, for which the
5850 * range is big enough to require all bits set.)
5851 */
5852 goto allmulti;
5853 }
5854
5855 hash = wm_mchash(sc, enm->enm_addrlo);
5856
5857 reg = (hash >> 5);
5858 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5859 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5860 || (sc->sc_type == WM_T_PCH2)
5861 || (sc->sc_type == WM_T_PCH_LPT))
5862 reg &= 0x1f;
5863 else
5864 reg &= 0x7f;
5865 bit = hash & 0x1f;
5866
5867 hash = CSR_READ(sc, mta_reg + (reg << 2));
5868 hash |= 1U << bit;
5869
5870 /* XXX Hardware bug?? */
5871 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5872 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5873 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5874 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5875 } else
5876 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5877
5878 ETHER_NEXT_MULTI(step, enm);
5879 }
5880
5881 ifp->if_flags &= ~IFF_ALLMULTI;
5882 goto setit;
5883
5884 allmulti:
5885 ifp->if_flags |= IFF_ALLMULTI;
5886 sc->sc_rctl |= RCTL_MPE;
5887
5888 setit:
5889 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5890 }
5891
5892 /*
5893 * wm_tbi_mediainit:
5894 *
5895 * Initialize media for use on 1000BASE-X devices.
5896 */
5897 static void
5898 wm_tbi_mediainit(struct wm_softc *sc)
5899 {
5900 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5901 const char *sep = "";
5902
5903 if (sc->sc_type < WM_T_82543)
5904 sc->sc_tipg = TIPG_WM_DFLT;
5905 else
5906 sc->sc_tipg = TIPG_LG_DFLT;
5907
5908 sc->sc_tbi_anegticks = 5;
5909
5910 /* Initialize our media structures */
5911 sc->sc_mii.mii_ifp = ifp;
5912
5913 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5914 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5915 wm_tbi_mediastatus);
5916
5917 /*
5918 * SWD Pins:
5919 *
5920 * 0 = Link LED (output)
5921 * 1 = Loss Of Signal (input)
5922 */
5923 sc->sc_ctrl |= CTRL_SWDPIO(0);
5924 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5925
5926 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5927
5928 #define ADD(ss, mm, dd) \
5929 do { \
5930 aprint_normal("%s%s", sep, ss); \
5931 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5932 sep = ", "; \
5933 } while (/*CONSTCOND*/0)
5934
5935 aprint_normal_dev(sc->sc_dev, "");
5936 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5937 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5938 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5939 aprint_normal("\n");
5940
5941 #undef ADD
5942
5943 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5944 }
5945
5946 /*
5947 * wm_tbi_mediastatus: [ifmedia interface function]
5948 *
5949 * Get the current interface media status on a 1000BASE-X device.
5950 */
5951 static void
5952 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5953 {
5954 struct wm_softc *sc = ifp->if_softc;
5955 uint32_t ctrl, status;
5956
5957 ifmr->ifm_status = IFM_AVALID;
5958 ifmr->ifm_active = IFM_ETHER;
5959
5960 status = CSR_READ(sc, WMREG_STATUS);
5961 if ((status & STATUS_LU) == 0) {
5962 ifmr->ifm_active |= IFM_NONE;
5963 return;
5964 }
5965
5966 ifmr->ifm_status |= IFM_ACTIVE;
5967 ifmr->ifm_active |= IFM_1000_SX;
5968 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5969 ifmr->ifm_active |= IFM_FDX;
5970 ctrl = CSR_READ(sc, WMREG_CTRL);
5971 if (ctrl & CTRL_RFCE)
5972 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5973 if (ctrl & CTRL_TFCE)
5974 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5975 }
5976
5977 /*
5978 * wm_tbi_mediachange: [ifmedia interface function]
5979 *
5980 * Set hardware to newly-selected media on a 1000BASE-X device.
5981 */
5982 static int
5983 wm_tbi_mediachange(struct ifnet *ifp)
5984 {
5985 struct wm_softc *sc = ifp->if_softc;
5986 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5987 uint32_t status;
5988 int i;
5989
5990 sc->sc_txcw = 0;
5991 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5992 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5993 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5994 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5995 sc->sc_txcw |= TXCW_ANE;
5996 } else {
5997 /*
5998 * If autonegotiation is turned off, force link up and turn on
5999 * full duplex
6000 */
6001 sc->sc_txcw &= ~TXCW_ANE;
6002 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
6003 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6004 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6005 CSR_WRITE_FLUSH(sc);
6006 delay(1000);
6007 }
6008
6009 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
6010 device_xname(sc->sc_dev),sc->sc_txcw));
6011 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6012 CSR_WRITE_FLUSH(sc);
6013 delay(10000);
6014
6015 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
6016 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
6017
6018 /*
6019 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
6020 * optics detect a signal, 0 if they don't.
6021 */
6022 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
6023 /* Have signal; wait for the link to come up. */
6024
6025 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6026 /*
6027 * Reset the link, and let autonegotiation do its thing
6028 */
6029 sc->sc_ctrl |= CTRL_LRST;
6030 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6031 CSR_WRITE_FLUSH(sc);
6032 delay(1000);
6033 sc->sc_ctrl &= ~CTRL_LRST;
6034 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6035 CSR_WRITE_FLUSH(sc);
6036 delay(1000);
6037 }
6038
6039 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
6040 delay(10000);
6041 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
6042 break;
6043 }
6044
6045 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
6046 device_xname(sc->sc_dev),i));
6047
6048 status = CSR_READ(sc, WMREG_STATUS);
6049 DPRINTF(WM_DEBUG_LINK,
6050 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6051 device_xname(sc->sc_dev),status, STATUS_LU));
6052 if (status & STATUS_LU) {
6053 /* Link is up. */
6054 DPRINTF(WM_DEBUG_LINK,
6055 ("%s: LINK: set media -> link up %s\n",
6056 device_xname(sc->sc_dev),
6057 (status & STATUS_FD) ? "FDX" : "HDX"));
6058
6059 /*
6060 * NOTE: CTRL will update TFCE and RFCE automatically,
6061 * so we should update sc->sc_ctrl
6062 */
6063 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6064 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6065 sc->sc_fcrtl &= ~FCRTL_XONE;
6066 if (status & STATUS_FD)
6067 sc->sc_tctl |=
6068 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6069 else
6070 sc->sc_tctl |=
6071 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6072 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6073 sc->sc_fcrtl |= FCRTL_XONE;
6074 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6075 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6076 WMREG_OLD_FCRTL : WMREG_FCRTL,
6077 sc->sc_fcrtl);
6078 sc->sc_tbi_linkup = 1;
6079 } else {
6080 if (i == WM_LINKUP_TIMEOUT)
6081 wm_check_for_link(sc);
6082 /* Link is down. */
6083 DPRINTF(WM_DEBUG_LINK,
6084 ("%s: LINK: set media -> link down\n",
6085 device_xname(sc->sc_dev)));
6086 sc->sc_tbi_linkup = 0;
6087 }
6088 } else {
6089 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6090 device_xname(sc->sc_dev)));
6091 sc->sc_tbi_linkup = 0;
6092 }
6093
6094 wm_tbi_set_linkled(sc);
6095
6096 return 0;
6097 }
6098
6099 /*
6100 * wm_tbi_set_linkled:
6101 *
6102 * Update the link LED on 1000BASE-X devices.
6103 */
6104 static void
6105 wm_tbi_set_linkled(struct wm_softc *sc)
6106 {
6107
6108 if (sc->sc_tbi_linkup)
6109 sc->sc_ctrl |= CTRL_SWDPIN(0);
6110 else
6111 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6112
6113 /* 82540 or newer devices are active low */
6114 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6115
6116 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6117 }
6118
6119 /*
6120 * wm_tbi_check_link:
6121 *
6122 * Check the link on 1000BASE-X devices.
6123 */
6124 static void
6125 wm_tbi_check_link(struct wm_softc *sc)
6126 {
6127 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6128 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6129 uint32_t status;
6130
6131 status = CSR_READ(sc, WMREG_STATUS);
6132
6133 /* XXX is this needed? */
6134 (void)CSR_READ(sc, WMREG_RXCW);
6135 (void)CSR_READ(sc, WMREG_CTRL);
6136
6137 /* set link status */
6138 if ((status & STATUS_LU) == 0) {
6139 DPRINTF(WM_DEBUG_LINK,
6140 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6141 sc->sc_tbi_linkup = 0;
6142 } else if (sc->sc_tbi_linkup == 0) {
6143 DPRINTF(WM_DEBUG_LINK,
6144 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6145 (status & STATUS_FD) ? "FDX" : "HDX"));
6146 sc->sc_tbi_linkup = 1;
6147 }
6148
6149 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6150 && ((status & STATUS_LU) == 0)) {
6151 sc->sc_tbi_linkup = 0;
6152 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6153 /* RXCFG storm! */
6154 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6155 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6156 wm_init(ifp);
6157 ifp->if_start(ifp);
6158 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6159 /* If the timer expired, retry autonegotiation */
6160 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6161 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6162 sc->sc_tbi_ticks = 0;
6163 /*
6164 * Reset the link, and let autonegotiation do
6165 * its thing
6166 */
6167 sc->sc_ctrl |= CTRL_LRST;
6168 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6169 CSR_WRITE_FLUSH(sc);
6170 delay(1000);
6171 sc->sc_ctrl &= ~CTRL_LRST;
6172 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6173 CSR_WRITE_FLUSH(sc);
6174 delay(1000);
6175 CSR_WRITE(sc, WMREG_TXCW,
6176 sc->sc_txcw & ~TXCW_ANE);
6177 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6178 }
6179 }
6180 }
6181
6182 wm_tbi_set_linkled(sc);
6183 }
6184
6185 /*
6186 * wm_gmii_reset:
6187 *
6188 * Reset the PHY.
6189 */
6190 static void
6191 wm_gmii_reset(struct wm_softc *sc)
6192 {
6193 uint32_t reg;
6194 int rv;
6195
6196 /* get phy semaphore */
6197 switch (sc->sc_type) {
6198 case WM_T_82571:
6199 case WM_T_82572:
6200 case WM_T_82573:
6201 case WM_T_82574:
6202 case WM_T_82583:
6203 /* XXX should get sw semaphore, too */
6204 rv = wm_get_swsm_semaphore(sc);
6205 break;
6206 case WM_T_82575:
6207 case WM_T_82576:
6208 case WM_T_82580:
6209 case WM_T_82580ER:
6210 case WM_T_I350:
6211 case WM_T_I354:
6212 case WM_T_I210:
6213 case WM_T_I211:
6214 case WM_T_80003:
6215 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6216 break;
6217 case WM_T_ICH8:
6218 case WM_T_ICH9:
6219 case WM_T_ICH10:
6220 case WM_T_PCH:
6221 case WM_T_PCH2:
6222 case WM_T_PCH_LPT:
6223 rv = wm_get_swfwhw_semaphore(sc);
6224 break;
6225 default:
6226 /* nothing to do*/
6227 rv = 0;
6228 break;
6229 }
6230 if (rv != 0) {
6231 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6232 __func__);
6233 return;
6234 }
6235
6236 switch (sc->sc_type) {
6237 case WM_T_82542_2_0:
6238 case WM_T_82542_2_1:
6239 /* null */
6240 break;
6241 case WM_T_82543:
6242 /*
6243 * With 82543, we need to force speed and duplex on the MAC
6244 * equal to what the PHY speed and duplex configuration is.
6245 * In addition, we need to perform a hardware reset on the PHY
6246 * to take it out of reset.
6247 */
6248 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6249 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6250
6251 /* The PHY reset pin is active-low. */
6252 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6253 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6254 CTRL_EXT_SWDPIN(4));
6255 reg |= CTRL_EXT_SWDPIO(4);
6256
6257 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6258 CSR_WRITE_FLUSH(sc);
6259 delay(10*1000);
6260
6261 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6262 CSR_WRITE_FLUSH(sc);
6263 delay(150);
6264 #if 0
6265 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6266 #endif
6267 delay(20*1000); /* XXX extra delay to get PHY ID? */
6268 break;
6269 case WM_T_82544: /* reset 10000us */
6270 case WM_T_82540:
6271 case WM_T_82545:
6272 case WM_T_82545_3:
6273 case WM_T_82546:
6274 case WM_T_82546_3:
6275 case WM_T_82541:
6276 case WM_T_82541_2:
6277 case WM_T_82547:
6278 case WM_T_82547_2:
6279 case WM_T_82571: /* reset 100us */
6280 case WM_T_82572:
6281 case WM_T_82573:
6282 case WM_T_82574:
6283 case WM_T_82575:
6284 case WM_T_82576:
6285 case WM_T_82580:
6286 case WM_T_82580ER:
6287 case WM_T_I350:
6288 case WM_T_I354:
6289 case WM_T_I210:
6290 case WM_T_I211:
6291 case WM_T_82583:
6292 case WM_T_80003:
6293 /* generic reset */
6294 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6295 CSR_WRITE_FLUSH(sc);
6296 delay(20000);
6297 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6298 CSR_WRITE_FLUSH(sc);
6299 delay(20000);
6300
6301 if ((sc->sc_type == WM_T_82541)
6302 || (sc->sc_type == WM_T_82541_2)
6303 || (sc->sc_type == WM_T_82547)
6304 || (sc->sc_type == WM_T_82547_2)) {
6305 /* workaround for igp are done in igp_reset() */
6306 /* XXX add code to set LED after phy reset */
6307 }
6308 break;
6309 case WM_T_ICH8:
6310 case WM_T_ICH9:
6311 case WM_T_ICH10:
6312 case WM_T_PCH:
6313 case WM_T_PCH2:
6314 case WM_T_PCH_LPT:
6315 /* generic reset */
6316 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6317 CSR_WRITE_FLUSH(sc);
6318 delay(100);
6319 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6320 CSR_WRITE_FLUSH(sc);
6321 delay(150);
6322 break;
6323 default:
6324 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6325 __func__);
6326 break;
6327 }
6328
6329 /* release PHY semaphore */
6330 switch (sc->sc_type) {
6331 case WM_T_82571:
6332 case WM_T_82572:
6333 case WM_T_82573:
6334 case WM_T_82574:
6335 case WM_T_82583:
6336 /* XXX should put sw semaphore, too */
6337 wm_put_swsm_semaphore(sc);
6338 break;
6339 case WM_T_82575:
6340 case WM_T_82576:
6341 case WM_T_82580:
6342 case WM_T_82580ER:
6343 case WM_T_I350:
6344 case WM_T_I354:
6345 case WM_T_I210:
6346 case WM_T_I211:
6347 case WM_T_80003:
6348 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6349 break;
6350 case WM_T_ICH8:
6351 case WM_T_ICH9:
6352 case WM_T_ICH10:
6353 case WM_T_PCH:
6354 case WM_T_PCH2:
6355 case WM_T_PCH_LPT:
6356 wm_put_swfwhw_semaphore(sc);
6357 break;
6358 default:
6359 /* nothing to do*/
6360 rv = 0;
6361 break;
6362 }
6363
6364 /* get_cfg_done */
6365 wm_get_cfg_done(sc);
6366
6367 /* extra setup */
6368 switch (sc->sc_type) {
6369 case WM_T_82542_2_0:
6370 case WM_T_82542_2_1:
6371 case WM_T_82543:
6372 case WM_T_82544:
6373 case WM_T_82540:
6374 case WM_T_82545:
6375 case WM_T_82545_3:
6376 case WM_T_82546:
6377 case WM_T_82546_3:
6378 case WM_T_82541_2:
6379 case WM_T_82547_2:
6380 case WM_T_82571:
6381 case WM_T_82572:
6382 case WM_T_82573:
6383 case WM_T_82574:
6384 case WM_T_82575:
6385 case WM_T_82576:
6386 case WM_T_82580:
6387 case WM_T_82580ER:
6388 case WM_T_I350:
6389 case WM_T_I354:
6390 case WM_T_I210:
6391 case WM_T_I211:
6392 case WM_T_82583:
6393 case WM_T_80003:
6394 /* null */
6395 break;
6396 case WM_T_82541:
6397 case WM_T_82547:
6398 /* XXX Configure actively LED after PHY reset */
6399 break;
6400 case WM_T_ICH8:
6401 case WM_T_ICH9:
6402 case WM_T_ICH10:
6403 case WM_T_PCH:
6404 case WM_T_PCH2:
6405 case WM_T_PCH_LPT:
6406 /* Allow time for h/w to get to a quiescent state afer reset */
6407 delay(10*1000);
6408
6409 if (sc->sc_type == WM_T_PCH)
6410 wm_hv_phy_workaround_ich8lan(sc);
6411
6412 if (sc->sc_type == WM_T_PCH2)
6413 wm_lv_phy_workaround_ich8lan(sc);
6414
6415 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6416 /*
6417 * dummy read to clear the phy wakeup bit after lcd
6418 * reset
6419 */
6420 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6421 }
6422
6423 /*
6424 * XXX Configure the LCD with th extended configuration region
6425 * in NVM
6426 */
6427
6428 /* Configure the LCD with the OEM bits in NVM */
6429 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6430 || (sc->sc_type == WM_T_PCH_LPT)) {
6431 /*
6432 * Disable LPLU.
6433 * XXX It seems that 82567 has LPLU, too.
6434 */
6435 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6436 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6437 reg |= HV_OEM_BITS_ANEGNOW;
6438 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6439 }
6440 break;
6441 default:
6442 panic("%s: unknown type\n", __func__);
6443 break;
6444 }
6445 }
6446
6447 /*
6448 * wm_get_phy_id_82575:
6449 *
6450 * Return PHY ID. Return -1 if it failed.
6451 */
6452 static int
6453 wm_get_phy_id_82575(struct wm_softc *sc)
6454 {
6455 uint32_t reg;
6456 int phyid = -1;
6457
6458 /* XXX */
6459 if ((sc->sc_flags & WM_F_SGMII) == 0)
6460 return -1;
6461
6462 if (wm_sgmii_uses_mdio(sc)) {
6463 switch (sc->sc_type) {
6464 case WM_T_82575:
6465 case WM_T_82576:
6466 reg = CSR_READ(sc, WMREG_MDIC);
6467 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6468 break;
6469 case WM_T_82580:
6470 case WM_T_I350:
6471 case WM_T_I354:
6472 case WM_T_I210:
6473 case WM_T_I211:
6474 reg = CSR_READ(sc, WMREG_MDICNFG);
6475 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6476 break;
6477 default:
6478 return -1;
6479 }
6480 }
6481
6482 return phyid;
6483 }
6484
6485
6486 /*
6487 * wm_gmii_mediainit:
6488 *
6489 * Initialize media for use on 1000BASE-T devices.
6490 */
6491 static void
6492 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6493 {
6494 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6495 struct mii_data *mii = &sc->sc_mii;
6496
6497 /* We have MII. */
6498 sc->sc_flags |= WM_F_HAS_MII;
6499
6500 if (sc->sc_type == WM_T_80003)
6501 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6502 else
6503 sc->sc_tipg = TIPG_1000T_DFLT;
6504
6505 /*
6506 * Let the chip set speed/duplex on its own based on
6507 * signals from the PHY.
6508 * XXXbouyer - I'm not sure this is right for the 80003,
6509 * the em driver only sets CTRL_SLU here - but it seems to work.
6510 */
6511 sc->sc_ctrl |= CTRL_SLU;
6512 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6513
6514 /* Initialize our media structures and probe the GMII. */
6515 mii->mii_ifp = ifp;
6516
6517 /*
6518 * Determine the PHY access method.
6519 *
6520 * For SGMII, use SGMII specific method.
6521 *
6522 * For some devices, we can determine the PHY access method
6523 * from sc_type.
6524 *
6525 * For ICH8 variants, it's difficult to detemine the PHY access
6526 * method by sc_type, so use the PCI product ID for some devices.
6527 * For other ICH8 variants, try to use igp's method. If the PHY
6528 * can't detect, then use bm's method.
6529 */
6530 switch (prodid) {
6531 case PCI_PRODUCT_INTEL_PCH_M_LM:
6532 case PCI_PRODUCT_INTEL_PCH_M_LC:
6533 /* 82577 */
6534 sc->sc_phytype = WMPHY_82577;
6535 mii->mii_readreg = wm_gmii_hv_readreg;
6536 mii->mii_writereg = wm_gmii_hv_writereg;
6537 break;
6538 case PCI_PRODUCT_INTEL_PCH_D_DM:
6539 case PCI_PRODUCT_INTEL_PCH_D_DC:
6540 /* 82578 */
6541 sc->sc_phytype = WMPHY_82578;
6542 mii->mii_readreg = wm_gmii_hv_readreg;
6543 mii->mii_writereg = wm_gmii_hv_writereg;
6544 break;
6545 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6546 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6547 /* 82579 */
6548 sc->sc_phytype = WMPHY_82579;
6549 mii->mii_readreg = wm_gmii_hv_readreg;
6550 mii->mii_writereg = wm_gmii_hv_writereg;
6551 break;
6552 case PCI_PRODUCT_INTEL_I217_LM:
6553 case PCI_PRODUCT_INTEL_I217_V:
6554 case PCI_PRODUCT_INTEL_I218_LM:
6555 case PCI_PRODUCT_INTEL_I218_V:
6556 /* I21[78] */
6557 mii->mii_readreg = wm_gmii_hv_readreg;
6558 mii->mii_writereg = wm_gmii_hv_writereg;
6559 break;
6560 case PCI_PRODUCT_INTEL_82801I_BM:
6561 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6562 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6563 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6564 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6565 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6566 /* 82567 */
6567 sc->sc_phytype = WMPHY_BM;
6568 mii->mii_readreg = wm_gmii_bm_readreg;
6569 mii->mii_writereg = wm_gmii_bm_writereg;
6570 break;
6571 default:
6572 if (((sc->sc_flags & WM_F_SGMII) != 0)
6573 && !wm_sgmii_uses_mdio(sc)){
6574 mii->mii_readreg = wm_sgmii_readreg;
6575 mii->mii_writereg = wm_sgmii_writereg;
6576 } else if (sc->sc_type >= WM_T_80003) {
6577 mii->mii_readreg = wm_gmii_i80003_readreg;
6578 mii->mii_writereg = wm_gmii_i80003_writereg;
6579 } else if (sc->sc_type >= WM_T_I210) {
6580 mii->mii_readreg = wm_gmii_i82544_readreg;
6581 mii->mii_writereg = wm_gmii_i82544_writereg;
6582 } else if (sc->sc_type >= WM_T_82580) {
6583 sc->sc_phytype = WMPHY_82580;
6584 mii->mii_readreg = wm_gmii_82580_readreg;
6585 mii->mii_writereg = wm_gmii_82580_writereg;
6586 } else if (sc->sc_type >= WM_T_82544) {
6587 mii->mii_readreg = wm_gmii_i82544_readreg;
6588 mii->mii_writereg = wm_gmii_i82544_writereg;
6589 } else {
6590 mii->mii_readreg = wm_gmii_i82543_readreg;
6591 mii->mii_writereg = wm_gmii_i82543_writereg;
6592 }
6593 break;
6594 }
6595 mii->mii_statchg = wm_gmii_statchg;
6596
6597 wm_gmii_reset(sc);
6598
6599 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6600 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6601 wm_gmii_mediastatus);
6602
6603 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6604 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6605 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6606 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6607 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6608 /* Attach only one port */
6609 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6610 MII_OFFSET_ANY, MIIF_DOPAUSE);
6611 } else {
6612 int i, id;
6613 uint32_t ctrl_ext;
6614
6615 id = wm_get_phy_id_82575(sc);
6616 if (id != -1) {
6617 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6618 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6619 }
6620 if ((id == -1)
6621 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6622 /* Power on sgmii phy if it is disabled */
6623 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6624 CSR_WRITE(sc, WMREG_CTRL_EXT,
6625 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6626 CSR_WRITE_FLUSH(sc);
6627 delay(300*1000); /* XXX too long */
6628
6629 /* from 1 to 8 */
6630 for (i = 1; i < 8; i++)
6631 mii_attach(sc->sc_dev, &sc->sc_mii,
6632 0xffffffff, i, MII_OFFSET_ANY,
6633 MIIF_DOPAUSE);
6634
6635 /* restore previous sfp cage power state */
6636 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6637 }
6638 }
6639 } else {
6640 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6641 MII_OFFSET_ANY, MIIF_DOPAUSE);
6642 }
6643
6644 /*
6645 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6646 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6647 */
6648 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6649 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6650 wm_set_mdio_slow_mode_hv(sc);
6651 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6652 MII_OFFSET_ANY, MIIF_DOPAUSE);
6653 }
6654
6655 /*
6656 * (For ICH8 variants)
6657 * If PHY detection failed, use BM's r/w function and retry.
6658 */
6659 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6660 /* if failed, retry with *_bm_* */
6661 mii->mii_readreg = wm_gmii_bm_readreg;
6662 mii->mii_writereg = wm_gmii_bm_writereg;
6663
6664 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6665 MII_OFFSET_ANY, MIIF_DOPAUSE);
6666 }
6667
6668 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6669 /* Any PHY wasn't find */
6670 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6671 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6672 sc->sc_phytype = WMPHY_NONE;
6673 } else {
6674 /*
6675 * PHY Found!
6676 * Check PHY type.
6677 */
6678 uint32_t model;
6679 struct mii_softc *child;
6680
6681 child = LIST_FIRST(&mii->mii_phys);
6682 if (device_is_a(child->mii_dev, "igphy")) {
6683 struct igphy_softc *isc = (struct igphy_softc *)child;
6684
6685 model = isc->sc_mii.mii_mpd_model;
6686 if (model == MII_MODEL_yyINTEL_I82566)
6687 sc->sc_phytype = WMPHY_IGP_3;
6688 }
6689
6690 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6691 }
6692 }
6693
6694 /*
6695 * wm_gmii_mediastatus: [ifmedia interface function]
6696 *
6697 * Get the current interface media status on a 1000BASE-T device.
6698 */
6699 static void
6700 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6701 {
6702 struct wm_softc *sc = ifp->if_softc;
6703
6704 ether_mediastatus(ifp, ifmr);
6705 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6706 | sc->sc_flowflags;
6707 }
6708
6709 /*
6710 * wm_gmii_mediachange: [ifmedia interface function]
6711 *
6712 * Set hardware to newly-selected media on a 1000BASE-T device.
6713 */
6714 static int
6715 wm_gmii_mediachange(struct ifnet *ifp)
6716 {
6717 struct wm_softc *sc = ifp->if_softc;
6718 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6719 int rc;
6720
6721 if ((ifp->if_flags & IFF_UP) == 0)
6722 return 0;
6723
6724 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6725 sc->sc_ctrl |= CTRL_SLU;
6726 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6727 || (sc->sc_type > WM_T_82543)) {
6728 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6729 } else {
6730 sc->sc_ctrl &= ~CTRL_ASDE;
6731 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6732 if (ife->ifm_media & IFM_FDX)
6733 sc->sc_ctrl |= CTRL_FD;
6734 switch (IFM_SUBTYPE(ife->ifm_media)) {
6735 case IFM_10_T:
6736 sc->sc_ctrl |= CTRL_SPEED_10;
6737 break;
6738 case IFM_100_TX:
6739 sc->sc_ctrl |= CTRL_SPEED_100;
6740 break;
6741 case IFM_1000_T:
6742 sc->sc_ctrl |= CTRL_SPEED_1000;
6743 break;
6744 default:
6745 panic("wm_gmii_mediachange: bad media 0x%x",
6746 ife->ifm_media);
6747 }
6748 }
6749 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6750 if (sc->sc_type <= WM_T_82543)
6751 wm_gmii_reset(sc);
6752
6753 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6754 return 0;
6755 return rc;
6756 }
6757
6758 #define MDI_IO CTRL_SWDPIN(2)
6759 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6760 #define MDI_CLK CTRL_SWDPIN(3)
6761
6762 static void
6763 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6764 {
6765 uint32_t i, v;
6766
6767 v = CSR_READ(sc, WMREG_CTRL);
6768 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6769 v |= MDI_DIR | CTRL_SWDPIO(3);
6770
6771 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6772 if (data & i)
6773 v |= MDI_IO;
6774 else
6775 v &= ~MDI_IO;
6776 CSR_WRITE(sc, WMREG_CTRL, v);
6777 CSR_WRITE_FLUSH(sc);
6778 delay(10);
6779 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6780 CSR_WRITE_FLUSH(sc);
6781 delay(10);
6782 CSR_WRITE(sc, WMREG_CTRL, v);
6783 CSR_WRITE_FLUSH(sc);
6784 delay(10);
6785 }
6786 }
6787
6788 static uint32_t
6789 i82543_mii_recvbits(struct wm_softc *sc)
6790 {
6791 uint32_t v, i, data = 0;
6792
6793 v = CSR_READ(sc, WMREG_CTRL);
6794 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6795 v |= CTRL_SWDPIO(3);
6796
6797 CSR_WRITE(sc, WMREG_CTRL, v);
6798 CSR_WRITE_FLUSH(sc);
6799 delay(10);
6800 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6801 CSR_WRITE_FLUSH(sc);
6802 delay(10);
6803 CSR_WRITE(sc, WMREG_CTRL, v);
6804 CSR_WRITE_FLUSH(sc);
6805 delay(10);
6806
6807 for (i = 0; i < 16; i++) {
6808 data <<= 1;
6809 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6810 CSR_WRITE_FLUSH(sc);
6811 delay(10);
6812 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6813 data |= 1;
6814 CSR_WRITE(sc, WMREG_CTRL, v);
6815 CSR_WRITE_FLUSH(sc);
6816 delay(10);
6817 }
6818
6819 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6820 CSR_WRITE_FLUSH(sc);
6821 delay(10);
6822 CSR_WRITE(sc, WMREG_CTRL, v);
6823 CSR_WRITE_FLUSH(sc);
6824 delay(10);
6825
6826 return data;
6827 }
6828
6829 #undef MDI_IO
6830 #undef MDI_DIR
6831 #undef MDI_CLK
6832
6833 /*
6834 * wm_gmii_i82543_readreg: [mii interface function]
6835 *
6836 * Read a PHY register on the GMII (i82543 version).
6837 */
6838 static int
6839 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6840 {
6841 struct wm_softc *sc = device_private(self);
6842 int rv;
6843
6844 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6845 i82543_mii_sendbits(sc, reg | (phy << 5) |
6846 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6847 rv = i82543_mii_recvbits(sc) & 0xffff;
6848
6849 DPRINTF(WM_DEBUG_GMII,
6850 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6851 device_xname(sc->sc_dev), phy, reg, rv));
6852
6853 return rv;
6854 }
6855
6856 /*
6857 * wm_gmii_i82543_writereg: [mii interface function]
6858 *
6859 * Write a PHY register on the GMII (i82543 version).
6860 */
6861 static void
6862 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6863 {
6864 struct wm_softc *sc = device_private(self);
6865
6866 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6867 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6868 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6869 (MII_COMMAND_START << 30), 32);
6870 }
6871
6872 /*
6873 * wm_gmii_i82544_readreg: [mii interface function]
6874 *
6875 * Read a PHY register on the GMII.
6876 */
6877 static int
6878 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6879 {
6880 struct wm_softc *sc = device_private(self);
6881 uint32_t mdic = 0;
6882 int i, rv;
6883
6884 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6885 MDIC_REGADD(reg));
6886
6887 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6888 mdic = CSR_READ(sc, WMREG_MDIC);
6889 if (mdic & MDIC_READY)
6890 break;
6891 delay(50);
6892 }
6893
6894 if ((mdic & MDIC_READY) == 0) {
6895 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6896 device_xname(sc->sc_dev), phy, reg);
6897 rv = 0;
6898 } else if (mdic & MDIC_E) {
6899 #if 0 /* This is normal if no PHY is present. */
6900 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6901 device_xname(sc->sc_dev), phy, reg);
6902 #endif
6903 rv = 0;
6904 } else {
6905 rv = MDIC_DATA(mdic);
6906 if (rv == 0xffff)
6907 rv = 0;
6908 }
6909
6910 return rv;
6911 }
6912
6913 /*
6914 * wm_gmii_i82544_writereg: [mii interface function]
6915 *
6916 * Write a PHY register on the GMII.
6917 */
6918 static void
6919 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6920 {
6921 struct wm_softc *sc = device_private(self);
6922 uint32_t mdic = 0;
6923 int i;
6924
6925 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6926 MDIC_REGADD(reg) | MDIC_DATA(val));
6927
6928 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6929 mdic = CSR_READ(sc, WMREG_MDIC);
6930 if (mdic & MDIC_READY)
6931 break;
6932 delay(50);
6933 }
6934
6935 if ((mdic & MDIC_READY) == 0)
6936 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6937 device_xname(sc->sc_dev), phy, reg);
6938 else if (mdic & MDIC_E)
6939 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6940 device_xname(sc->sc_dev), phy, reg);
6941 }
6942
6943 /*
6944 * wm_gmii_i80003_readreg: [mii interface function]
6945 *
6946 * Read a PHY register on the kumeran
6947 * This could be handled by the PHY layer if we didn't have to lock the
6948 * ressource ...
6949 */
6950 static int
6951 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6952 {
6953 struct wm_softc *sc = device_private(self);
6954 int sem;
6955 int rv;
6956
6957 if (phy != 1) /* only one PHY on kumeran bus */
6958 return 0;
6959
6960 sem = swfwphysem[sc->sc_funcid];
6961 if (wm_get_swfw_semaphore(sc, sem)) {
6962 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6963 __func__);
6964 return 0;
6965 }
6966
6967 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6968 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6969 reg >> GG82563_PAGE_SHIFT);
6970 } else {
6971 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6972 reg >> GG82563_PAGE_SHIFT);
6973 }
6974 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6975 delay(200);
6976 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6977 delay(200);
6978
6979 wm_put_swfw_semaphore(sc, sem);
6980 return rv;
6981 }
6982
6983 /*
6984 * wm_gmii_i80003_writereg: [mii interface function]
6985 *
6986 * Write a PHY register on the kumeran.
6987 * This could be handled by the PHY layer if we didn't have to lock the
6988 * ressource ...
6989 */
6990 static void
6991 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6992 {
6993 struct wm_softc *sc = device_private(self);
6994 int sem;
6995
6996 if (phy != 1) /* only one PHY on kumeran bus */
6997 return;
6998
6999 sem = swfwphysem[sc->sc_funcid];
7000 if (wm_get_swfw_semaphore(sc, sem)) {
7001 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7002 __func__);
7003 return;
7004 }
7005
7006 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7007 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7008 reg >> GG82563_PAGE_SHIFT);
7009 } else {
7010 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7011 reg >> GG82563_PAGE_SHIFT);
7012 }
7013 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7014 delay(200);
7015 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7016 delay(200);
7017
7018 wm_put_swfw_semaphore(sc, sem);
7019 }
7020
7021 /*
7022 * wm_gmii_bm_readreg: [mii interface function]
7023 *
7024 * Read a PHY register on the kumeran
7025 * This could be handled by the PHY layer if we didn't have to lock the
7026 * ressource ...
7027 */
7028 static int
7029 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7030 {
7031 struct wm_softc *sc = device_private(self);
7032 int sem;
7033 int rv;
7034
7035 sem = swfwphysem[sc->sc_funcid];
7036 if (wm_get_swfw_semaphore(sc, sem)) {
7037 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7038 __func__);
7039 return 0;
7040 }
7041
7042 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7043 if (phy == 1)
7044 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7045 reg);
7046 else
7047 wm_gmii_i82544_writereg(self, phy,
7048 GG82563_PHY_PAGE_SELECT,
7049 reg >> GG82563_PAGE_SHIFT);
7050 }
7051
7052 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7053 wm_put_swfw_semaphore(sc, sem);
7054 return rv;
7055 }
7056
7057 /*
7058 * wm_gmii_bm_writereg: [mii interface function]
7059 *
7060 * Write a PHY register on the kumeran.
7061 * This could be handled by the PHY layer if we didn't have to lock the
7062 * ressource ...
7063 */
7064 static void
7065 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7066 {
7067 struct wm_softc *sc = device_private(self);
7068 int sem;
7069
7070 sem = swfwphysem[sc->sc_funcid];
7071 if (wm_get_swfw_semaphore(sc, sem)) {
7072 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7073 __func__);
7074 return;
7075 }
7076
7077 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7078 if (phy == 1)
7079 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7080 reg);
7081 else
7082 wm_gmii_i82544_writereg(self, phy,
7083 GG82563_PHY_PAGE_SELECT,
7084 reg >> GG82563_PAGE_SHIFT);
7085 }
7086
7087 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7088 wm_put_swfw_semaphore(sc, sem);
7089 }
7090
7091 static void
7092 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7093 {
7094 struct wm_softc *sc = device_private(self);
7095 uint16_t regnum = BM_PHY_REG_NUM(offset);
7096 uint16_t wuce;
7097
7098 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7099 if (sc->sc_type == WM_T_PCH) {
7100 /* XXX e1000 driver do nothing... why? */
7101 }
7102
7103 /* Set page 769 */
7104 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7105 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7106
7107 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7108
7109 wuce &= ~BM_WUC_HOST_WU_BIT;
7110 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7111 wuce | BM_WUC_ENABLE_BIT);
7112
7113 /* Select page 800 */
7114 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7115 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7116
7117 /* Write page 800 */
7118 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7119
7120 if (rd)
7121 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7122 else
7123 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7124
7125 /* Set page 769 */
7126 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7127 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7128
7129 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7130 }
7131
7132 /*
7133 * wm_gmii_hv_readreg: [mii interface function]
7134 *
7135 * Read a PHY register on the kumeran
7136 * This could be handled by the PHY layer if we didn't have to lock the
7137 * ressource ...
7138 */
7139 static int
7140 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7141 {
7142 struct wm_softc *sc = device_private(self);
7143 uint16_t page = BM_PHY_REG_PAGE(reg);
7144 uint16_t regnum = BM_PHY_REG_NUM(reg);
7145 uint16_t val;
7146 int rv;
7147
7148 if (wm_get_swfwhw_semaphore(sc)) {
7149 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7150 __func__);
7151 return 0;
7152 }
7153
7154 /* XXX Workaround failure in MDIO access while cable is disconnected */
7155 if (sc->sc_phytype == WMPHY_82577) {
7156 /* XXX must write */
7157 }
7158
7159 /* Page 800 works differently than the rest so it has its own func */
7160 if (page == BM_WUC_PAGE) {
7161 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7162 return val;
7163 }
7164
7165 /*
7166 * Lower than page 768 works differently than the rest so it has its
7167 * own func
7168 */
7169 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7170 printf("gmii_hv_readreg!!!\n");
7171 return 0;
7172 }
7173
7174 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7175 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7176 page << BME1000_PAGE_SHIFT);
7177 }
7178
7179 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7180 wm_put_swfwhw_semaphore(sc);
7181 return rv;
7182 }
7183
7184 /*
7185 * wm_gmii_hv_writereg: [mii interface function]
7186 *
7187 * Write a PHY register on the kumeran.
7188 * This could be handled by the PHY layer if we didn't have to lock the
7189 * ressource ...
7190 */
7191 static void
7192 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7193 {
7194 struct wm_softc *sc = device_private(self);
7195 uint16_t page = BM_PHY_REG_PAGE(reg);
7196 uint16_t regnum = BM_PHY_REG_NUM(reg);
7197
7198 if (wm_get_swfwhw_semaphore(sc)) {
7199 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7200 __func__);
7201 return;
7202 }
7203
7204 /* XXX Workaround failure in MDIO access while cable is disconnected */
7205
7206 /* Page 800 works differently than the rest so it has its own func */
7207 if (page == BM_WUC_PAGE) {
7208 uint16_t tmp;
7209
7210 tmp = val;
7211 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7212 return;
7213 }
7214
7215 /*
7216 * Lower than page 768 works differently than the rest so it has its
7217 * own func
7218 */
7219 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7220 printf("gmii_hv_writereg!!!\n");
7221 return;
7222 }
7223
7224 /*
7225 * XXX Workaround MDIO accesses being disabled after entering IEEE
7226 * Power Down (whenever bit 11 of the PHY control register is set)
7227 */
7228
7229 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7230 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7231 page << BME1000_PAGE_SHIFT);
7232 }
7233
7234 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7235 wm_put_swfwhw_semaphore(sc);
7236 }
7237
7238 /*
7239 * wm_sgmii_uses_mdio
7240 *
7241 * Check whether the transaction is to the internal PHY or the external
7242 * MDIO interface. Return true if it's MDIO.
7243 */
7244 static bool
7245 wm_sgmii_uses_mdio(struct wm_softc *sc)
7246 {
7247 uint32_t reg;
7248 bool ismdio = false;
7249
7250 switch (sc->sc_type) {
7251 case WM_T_82575:
7252 case WM_T_82576:
7253 reg = CSR_READ(sc, WMREG_MDIC);
7254 ismdio = ((reg & MDIC_DEST) != 0);
7255 break;
7256 case WM_T_82580:
7257 case WM_T_82580ER:
7258 case WM_T_I350:
7259 case WM_T_I354:
7260 case WM_T_I210:
7261 case WM_T_I211:
7262 reg = CSR_READ(sc, WMREG_MDICNFG);
7263 ismdio = ((reg & MDICNFG_DEST) != 0);
7264 break;
7265 default:
7266 break;
7267 }
7268
7269 return ismdio;
7270 }
7271
7272 /*
7273 * wm_sgmii_readreg: [mii interface function]
7274 *
7275 * Read a PHY register on the SGMII
7276 * This could be handled by the PHY layer if we didn't have to lock the
7277 * ressource ...
7278 */
7279 static int
7280 wm_sgmii_readreg(device_t self, int phy, int reg)
7281 {
7282 struct wm_softc *sc = device_private(self);
7283 uint32_t i2ccmd;
7284 int i, rv;
7285
7286 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7287 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7288 __func__);
7289 return 0;
7290 }
7291
7292 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7293 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7294 | I2CCMD_OPCODE_READ;
7295 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7296
7297 /* Poll the ready bit */
7298 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7299 delay(50);
7300 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7301 if (i2ccmd & I2CCMD_READY)
7302 break;
7303 }
7304 if ((i2ccmd & I2CCMD_READY) == 0)
7305 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7306 if ((i2ccmd & I2CCMD_ERROR) != 0)
7307 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7308
7309 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7310
7311 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7312 return rv;
7313 }
7314
7315 /*
7316 * wm_sgmii_writereg: [mii interface function]
7317 *
7318 * Write a PHY register on the SGMII.
7319 * This could be handled by the PHY layer if we didn't have to lock the
7320 * ressource ...
7321 */
7322 static void
7323 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7324 {
7325 struct wm_softc *sc = device_private(self);
7326 uint32_t i2ccmd;
7327 int i;
7328
7329 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7330 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7331 __func__);
7332 return;
7333 }
7334
7335 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7336 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7337 | I2CCMD_OPCODE_WRITE;
7338 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7339
7340 /* Poll the ready bit */
7341 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7342 delay(50);
7343 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7344 if (i2ccmd & I2CCMD_READY)
7345 break;
7346 }
7347 if ((i2ccmd & I2CCMD_READY) == 0)
7348 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7349 if ((i2ccmd & I2CCMD_ERROR) != 0)
7350 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7351
7352 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7353 }
7354
7355 /*
7356 * wm_gmii_82580_readreg: [mii interface function]
7357 *
7358 * Read a PHY register on the 82580 and I350.
7359 * This could be handled by the PHY layer if we didn't have to lock the
7360 * ressource ...
7361 */
7362 static int
7363 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7364 {
7365 struct wm_softc *sc = device_private(self);
7366 int sem;
7367 int rv;
7368
7369 sem = swfwphysem[sc->sc_funcid];
7370 if (wm_get_swfw_semaphore(sc, sem)) {
7371 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7372 __func__);
7373 return 0;
7374 }
7375
7376 rv = wm_gmii_i82544_readreg(self, phy, reg);
7377
7378 wm_put_swfw_semaphore(sc, sem);
7379 return rv;
7380 }
7381
7382 /*
7383 * wm_gmii_82580_writereg: [mii interface function]
7384 *
7385 * Write a PHY register on the 82580 and I350.
7386 * This could be handled by the PHY layer if we didn't have to lock the
7387 * ressource ...
7388 */
7389 static void
7390 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7391 {
7392 struct wm_softc *sc = device_private(self);
7393 int sem;
7394
7395 sem = swfwphysem[sc->sc_funcid];
7396 if (wm_get_swfw_semaphore(sc, sem)) {
7397 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7398 __func__);
7399 return;
7400 }
7401
7402 wm_gmii_i82544_writereg(self, phy, reg, val);
7403
7404 wm_put_swfw_semaphore(sc, sem);
7405 }
7406
7407 /*
7408 * wm_gmii_statchg: [mii interface function]
7409 *
7410 * Callback from MII layer when media changes.
7411 */
7412 static void
7413 wm_gmii_statchg(struct ifnet *ifp)
7414 {
7415 struct wm_softc *sc = ifp->if_softc;
7416 struct mii_data *mii = &sc->sc_mii;
7417
7418 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7419 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7420 sc->sc_fcrtl &= ~FCRTL_XONE;
7421
7422 /*
7423 * Get flow control negotiation result.
7424 */
7425 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7426 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7427 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7428 mii->mii_media_active &= ~IFM_ETH_FMASK;
7429 }
7430
7431 if (sc->sc_flowflags & IFM_FLOW) {
7432 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7433 sc->sc_ctrl |= CTRL_TFCE;
7434 sc->sc_fcrtl |= FCRTL_XONE;
7435 }
7436 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7437 sc->sc_ctrl |= CTRL_RFCE;
7438 }
7439
7440 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7441 DPRINTF(WM_DEBUG_LINK,
7442 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7443 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7444 } else {
7445 DPRINTF(WM_DEBUG_LINK,
7446 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7447 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7448 }
7449
7450 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7451 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7452 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7453 : WMREG_FCRTL, sc->sc_fcrtl);
7454 if (sc->sc_type == WM_T_80003) {
7455 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7456 case IFM_1000_T:
7457 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7458 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7459 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7460 break;
7461 default:
7462 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7463 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7464 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7465 break;
7466 }
7467 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7468 }
7469 }
7470
7471 /*
7472 * wm_kmrn_readreg:
7473 *
7474 * Read a kumeran register
7475 */
7476 static int
7477 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7478 {
7479 int rv;
7480
7481 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7482 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7483 aprint_error_dev(sc->sc_dev,
7484 "%s: failed to get semaphore\n", __func__);
7485 return 0;
7486 }
7487 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7488 if (wm_get_swfwhw_semaphore(sc)) {
7489 aprint_error_dev(sc->sc_dev,
7490 "%s: failed to get semaphore\n", __func__);
7491 return 0;
7492 }
7493 }
7494
7495 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7496 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7497 KUMCTRLSTA_REN);
7498 CSR_WRITE_FLUSH(sc);
7499 delay(2);
7500
7501 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7502
7503 if (sc->sc_flags == WM_F_SWFW_SYNC)
7504 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7505 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7506 wm_put_swfwhw_semaphore(sc);
7507
7508 return rv;
7509 }
7510
7511 /*
7512 * wm_kmrn_writereg:
7513 *
7514 * Write a kumeran register
7515 */
7516 static void
7517 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7518 {
7519
7520 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7521 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7522 aprint_error_dev(sc->sc_dev,
7523 "%s: failed to get semaphore\n", __func__);
7524 return;
7525 }
7526 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7527 if (wm_get_swfwhw_semaphore(sc)) {
7528 aprint_error_dev(sc->sc_dev,
7529 "%s: failed to get semaphore\n", __func__);
7530 return;
7531 }
7532 }
7533
7534 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7535 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7536 (val & KUMCTRLSTA_MASK));
7537
7538 if (sc->sc_flags == WM_F_SWFW_SYNC)
7539 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7540 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7541 wm_put_swfwhw_semaphore(sc);
7542 }
7543
7544 static int
7545 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7546 {
7547 uint32_t eecd = 0;
7548
7549 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7550 || sc->sc_type == WM_T_82583) {
7551 eecd = CSR_READ(sc, WMREG_EECD);
7552
7553 /* Isolate bits 15 & 16 */
7554 eecd = ((eecd >> 15) & 0x03);
7555
7556 /* If both bits are set, device is Flash type */
7557 if (eecd == 0x03)
7558 return 0;
7559 }
7560 return 1;
7561 }
7562
7563 static int
7564 wm_get_swsm_semaphore(struct wm_softc *sc)
7565 {
7566 int32_t timeout;
7567 uint32_t swsm;
7568
7569 /* Get the FW semaphore. */
7570 timeout = 1000 + 1; /* XXX */
7571 while (timeout) {
7572 swsm = CSR_READ(sc, WMREG_SWSM);
7573 swsm |= SWSM_SWESMBI;
7574 CSR_WRITE(sc, WMREG_SWSM, swsm);
7575 /* if we managed to set the bit we got the semaphore. */
7576 swsm = CSR_READ(sc, WMREG_SWSM);
7577 if (swsm & SWSM_SWESMBI)
7578 break;
7579
7580 delay(50);
7581 timeout--;
7582 }
7583
7584 if (timeout == 0) {
7585 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7586 /* Release semaphores */
7587 wm_put_swsm_semaphore(sc);
7588 return 1;
7589 }
7590 return 0;
7591 }
7592
7593 static void
7594 wm_put_swsm_semaphore(struct wm_softc *sc)
7595 {
7596 uint32_t swsm;
7597
7598 swsm = CSR_READ(sc, WMREG_SWSM);
7599 swsm &= ~(SWSM_SWESMBI);
7600 CSR_WRITE(sc, WMREG_SWSM, swsm);
7601 }
7602
7603 static int
7604 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7605 {
7606 uint32_t swfw_sync;
7607 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7608 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7609 int timeout = 200;
7610
7611 for (timeout = 0; timeout < 200; timeout++) {
7612 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7613 if (wm_get_swsm_semaphore(sc)) {
7614 aprint_error_dev(sc->sc_dev,
7615 "%s: failed to get semaphore\n",
7616 __func__);
7617 return 1;
7618 }
7619 }
7620 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7621 if ((swfw_sync & (swmask | fwmask)) == 0) {
7622 swfw_sync |= swmask;
7623 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7624 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7625 wm_put_swsm_semaphore(sc);
7626 return 0;
7627 }
7628 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7629 wm_put_swsm_semaphore(sc);
7630 delay(5000);
7631 }
7632 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7633 device_xname(sc->sc_dev), mask, swfw_sync);
7634 return 1;
7635 }
7636
7637 static void
7638 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7639 {
7640 uint32_t swfw_sync;
7641
7642 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7643 while (wm_get_swsm_semaphore(sc) != 0)
7644 continue;
7645 }
7646 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7647 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7648 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7649 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7650 wm_put_swsm_semaphore(sc);
7651 }
7652
7653 static int
7654 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7655 {
7656 uint32_t ext_ctrl;
7657 int timeout = 200;
7658
7659 for (timeout = 0; timeout < 200; timeout++) {
7660 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7661 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7662 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7663
7664 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7665 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7666 return 0;
7667 delay(5000);
7668 }
7669 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7670 device_xname(sc->sc_dev), ext_ctrl);
7671 return 1;
7672 }
7673
7674 static void
7675 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7676 {
7677 uint32_t ext_ctrl;
7678 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7679 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7680 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7681 }
7682
7683 static int
7684 wm_get_hw_semaphore_82573(struct wm_softc *sc)
7685 {
7686 int i = 0;
7687 uint32_t reg;
7688
7689 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7690 do {
7691 CSR_WRITE(sc, WMREG_EXTCNFCTR,
7692 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
7693 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7694 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
7695 break;
7696 delay(2*1000);
7697 i++;
7698 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
7699
7700 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
7701 wm_put_hw_semaphore_82573(sc);
7702 log(LOG_ERR, "%s: Driver can't access the PHY\n",
7703 device_xname(sc->sc_dev));
7704 return -1;
7705 }
7706
7707 return 0;
7708 }
7709
7710 static void
7711 wm_put_hw_semaphore_82573(struct wm_softc *sc)
7712 {
7713 uint32_t reg;
7714
7715 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7716 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
7717 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
7718 }
7719
7720 static int
7721 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7722 {
7723 uint32_t eecd;
7724 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7725 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7726 uint8_t sig_byte = 0;
7727
7728 switch (sc->sc_type) {
7729 case WM_T_ICH8:
7730 case WM_T_ICH9:
7731 eecd = CSR_READ(sc, WMREG_EECD);
7732 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7733 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7734 return 0;
7735 }
7736 /* FALLTHROUGH */
7737 default:
7738 /* Default to 0 */
7739 *bank = 0;
7740
7741 /* Check bank 0 */
7742 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7743 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7744 *bank = 0;
7745 return 0;
7746 }
7747
7748 /* Check bank 1 */
7749 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7750 &sig_byte);
7751 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7752 *bank = 1;
7753 return 0;
7754 }
7755 }
7756
7757 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7758 device_xname(sc->sc_dev)));
7759 return -1;
7760 }
7761
7762 /******************************************************************************
7763 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7764 * register.
7765 *
7766 * sc - Struct containing variables accessed by shared code
7767 * offset - offset of word in the EEPROM to read
7768 * data - word read from the EEPROM
7769 * words - number of words to read
7770 *****************************************************************************/
7771 static int
7772 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7773 {
7774 int32_t error = 0;
7775 uint32_t flash_bank = 0;
7776 uint32_t act_offset = 0;
7777 uint32_t bank_offset = 0;
7778 uint16_t word = 0;
7779 uint16_t i = 0;
7780
7781 /* We need to know which is the valid flash bank. In the event
7782 * that we didn't allocate eeprom_shadow_ram, we may not be
7783 * managing flash_bank. So it cannot be trusted and needs
7784 * to be updated with each read.
7785 */
7786 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7787 if (error) {
7788 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7789 __func__);
7790 flash_bank = 0;
7791 }
7792
7793 /*
7794 * Adjust offset appropriately if we're on bank 1 - adjust for word
7795 * size
7796 */
7797 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7798
7799 error = wm_get_swfwhw_semaphore(sc);
7800 if (error) {
7801 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7802 __func__);
7803 return error;
7804 }
7805
7806 for (i = 0; i < words; i++) {
7807 /* The NVM part needs a byte offset, hence * 2 */
7808 act_offset = bank_offset + ((offset + i) * 2);
7809 error = wm_read_ich8_word(sc, act_offset, &word);
7810 if (error) {
7811 aprint_error_dev(sc->sc_dev,
7812 "%s: failed to read NVM\n", __func__);
7813 break;
7814 }
7815 data[i] = word;
7816 }
7817
7818 wm_put_swfwhw_semaphore(sc);
7819 return error;
7820 }
7821
7822 /******************************************************************************
7823 * This function does initial flash setup so that a new read/write/erase cycle
7824 * can be started.
7825 *
7826 * sc - The pointer to the hw structure
7827 ****************************************************************************/
7828 static int32_t
7829 wm_ich8_cycle_init(struct wm_softc *sc)
7830 {
7831 uint16_t hsfsts;
7832 int32_t error = 1;
7833 int32_t i = 0;
7834
7835 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7836
7837 /* May be check the Flash Des Valid bit in Hw status */
7838 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7839 return error;
7840 }
7841
7842 /* Clear FCERR in Hw status by writing 1 */
7843 /* Clear DAEL in Hw status by writing a 1 */
7844 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7845
7846 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7847
7848 /*
7849 * Either we should have a hardware SPI cycle in progress bit to check
7850 * against, in order to start a new cycle or FDONE bit should be
7851 * changed in the hardware so that it is 1 after harware reset, which
7852 * can then be used as an indication whether a cycle is in progress or
7853 * has been completed .. we should also have some software semaphore
7854 * mechanism to guard FDONE or the cycle in progress bit so that two
7855 * threads access to those bits can be sequentiallized or a way so that
7856 * 2 threads dont start the cycle at the same time
7857 */
7858
7859 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7860 /*
7861 * There is no cycle running at present, so we can start a
7862 * cycle
7863 */
7864
7865 /* Begin by setting Flash Cycle Done. */
7866 hsfsts |= HSFSTS_DONE;
7867 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7868 error = 0;
7869 } else {
7870 /*
7871 * otherwise poll for sometime so the current cycle has a
7872 * chance to end before giving up.
7873 */
7874 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7875 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7876 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7877 error = 0;
7878 break;
7879 }
7880 delay(1);
7881 }
7882 if (error == 0) {
7883 /*
7884 * Successful in waiting for previous cycle to timeout,
7885 * now set the Flash Cycle Done.
7886 */
7887 hsfsts |= HSFSTS_DONE;
7888 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7889 }
7890 }
7891 return error;
7892 }
7893
7894 /******************************************************************************
7895 * This function starts a flash cycle and waits for its completion
7896 *
7897 * sc - The pointer to the hw structure
7898 ****************************************************************************/
7899 static int32_t
7900 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7901 {
7902 uint16_t hsflctl;
7903 uint16_t hsfsts;
7904 int32_t error = 1;
7905 uint32_t i = 0;
7906
7907 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7908 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7909 hsflctl |= HSFCTL_GO;
7910 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7911
7912 /* wait till FDONE bit is set to 1 */
7913 do {
7914 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7915 if (hsfsts & HSFSTS_DONE)
7916 break;
7917 delay(1);
7918 i++;
7919 } while (i < timeout);
7920 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7921 error = 0;
7922
7923 return error;
7924 }
7925
7926 /******************************************************************************
7927 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7928 *
7929 * sc - The pointer to the hw structure
7930 * index - The index of the byte or word to read.
7931 * size - Size of data to read, 1=byte 2=word
7932 * data - Pointer to the word to store the value read.
7933 *****************************************************************************/
7934 static int32_t
7935 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7936 uint32_t size, uint16_t* data)
7937 {
7938 uint16_t hsfsts;
7939 uint16_t hsflctl;
7940 uint32_t flash_linear_address;
7941 uint32_t flash_data = 0;
7942 int32_t error = 1;
7943 int32_t count = 0;
7944
7945 if (size < 1 || size > 2 || data == 0x0 ||
7946 index > ICH_FLASH_LINEAR_ADDR_MASK)
7947 return error;
7948
7949 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7950 sc->sc_ich8_flash_base;
7951
7952 do {
7953 delay(1);
7954 /* Steps */
7955 error = wm_ich8_cycle_init(sc);
7956 if (error)
7957 break;
7958
7959 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7960 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7961 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7962 & HSFCTL_BCOUNT_MASK;
7963 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7964 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7965
7966 /*
7967 * Write the last 24 bits of index into Flash Linear address
7968 * field in Flash Address
7969 */
7970 /* TODO: TBD maybe check the index against the size of flash */
7971
7972 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7973
7974 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7975
7976 /*
7977 * Check if FCERR is set to 1, if set to 1, clear it and try
7978 * the whole sequence a few more times, else read in (shift in)
7979 * the Flash Data0, the order is least significant byte first
7980 * msb to lsb
7981 */
7982 if (error == 0) {
7983 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7984 if (size == 1)
7985 *data = (uint8_t)(flash_data & 0x000000FF);
7986 else if (size == 2)
7987 *data = (uint16_t)(flash_data & 0x0000FFFF);
7988 break;
7989 } else {
7990 /*
7991 * If we've gotten here, then things are probably
7992 * completely hosed, but if the error condition is
7993 * detected, it won't hurt to give it another try...
7994 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7995 */
7996 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7997 if (hsfsts & HSFSTS_ERR) {
7998 /* Repeat for some time before giving up. */
7999 continue;
8000 } else if ((hsfsts & HSFSTS_DONE) == 0)
8001 break;
8002 }
8003 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8004
8005 return error;
8006 }
8007
8008 /******************************************************************************
8009 * Reads a single byte from the NVM using the ICH8 flash access registers.
8010 *
8011 * sc - pointer to wm_hw structure
8012 * index - The index of the byte to read.
8013 * data - Pointer to a byte to store the value read.
8014 *****************************************************************************/
8015 static int32_t
8016 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8017 {
8018 int32_t status;
8019 uint16_t word = 0;
8020
8021 status = wm_read_ich8_data(sc, index, 1, &word);
8022 if (status == 0)
8023 *data = (uint8_t)word;
8024 else
8025 *data = 0;
8026
8027 return status;
8028 }
8029
8030 /******************************************************************************
8031 * Reads a word from the NVM using the ICH8 flash access registers.
8032 *
8033 * sc - pointer to wm_hw structure
8034 * index - The starting byte index of the word to read.
8035 * data - Pointer to a word to store the value read.
8036 *****************************************************************************/
8037 static int32_t
8038 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8039 {
8040 int32_t status;
8041
8042 status = wm_read_ich8_data(sc, index, 2, data);
8043 return status;
8044 }
8045
8046 static int
8047 wm_check_mng_mode(struct wm_softc *sc)
8048 {
8049 int rv;
8050
8051 switch (sc->sc_type) {
8052 case WM_T_ICH8:
8053 case WM_T_ICH9:
8054 case WM_T_ICH10:
8055 case WM_T_PCH:
8056 case WM_T_PCH2:
8057 case WM_T_PCH_LPT:
8058 rv = wm_check_mng_mode_ich8lan(sc);
8059 break;
8060 case WM_T_82574:
8061 case WM_T_82583:
8062 rv = wm_check_mng_mode_82574(sc);
8063 break;
8064 case WM_T_82571:
8065 case WM_T_82572:
8066 case WM_T_82573:
8067 case WM_T_80003:
8068 rv = wm_check_mng_mode_generic(sc);
8069 break;
8070 default:
8071 /* noting to do */
8072 rv = 0;
8073 break;
8074 }
8075
8076 return rv;
8077 }
8078
8079 static int
8080 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8081 {
8082 uint32_t fwsm;
8083
8084 fwsm = CSR_READ(sc, WMREG_FWSM);
8085
8086 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8087 return 1;
8088
8089 return 0;
8090 }
8091
8092 static int
8093 wm_check_mng_mode_82574(struct wm_softc *sc)
8094 {
8095 uint16_t data;
8096
8097 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8098
8099 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8100 return 1;
8101
8102 return 0;
8103 }
8104
8105 static int
8106 wm_check_mng_mode_generic(struct wm_softc *sc)
8107 {
8108 uint32_t fwsm;
8109
8110 fwsm = CSR_READ(sc, WMREG_FWSM);
8111
8112 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8113 return 1;
8114
8115 return 0;
8116 }
8117
8118 static int
8119 wm_enable_mng_pass_thru(struct wm_softc *sc)
8120 {
8121 uint32_t manc, fwsm, factps;
8122
8123 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8124 return 0;
8125
8126 manc = CSR_READ(sc, WMREG_MANC);
8127
8128 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8129 device_xname(sc->sc_dev), manc));
8130 if ((manc & MANC_RECV_TCO_EN) == 0)
8131 return 0;
8132
8133 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8134 fwsm = CSR_READ(sc, WMREG_FWSM);
8135 factps = CSR_READ(sc, WMREG_FACTPS);
8136 if (((factps & FACTPS_MNGCG) == 0)
8137 && ((fwsm & FWSM_MODE_MASK)
8138 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8139 return 1;
8140 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8141 uint16_t data;
8142
8143 factps = CSR_READ(sc, WMREG_FACTPS);
8144 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8145 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8146 device_xname(sc->sc_dev), factps, data));
8147 if (((factps & FACTPS_MNGCG) == 0)
8148 && ((data & EEPROM_CFG2_MNGM_MASK)
8149 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8150 return 1;
8151 } else if (((manc & MANC_SMBUS_EN) != 0)
8152 && ((manc & MANC_ASF_EN) == 0))
8153 return 1;
8154
8155 return 0;
8156 }
8157
8158 static int
8159 wm_check_reset_block(struct wm_softc *sc)
8160 {
8161 uint32_t reg;
8162
8163 switch (sc->sc_type) {
8164 case WM_T_ICH8:
8165 case WM_T_ICH9:
8166 case WM_T_ICH10:
8167 case WM_T_PCH:
8168 case WM_T_PCH2:
8169 case WM_T_PCH_LPT:
8170 reg = CSR_READ(sc, WMREG_FWSM);
8171 if ((reg & FWSM_RSPCIPHY) != 0)
8172 return 0;
8173 else
8174 return -1;
8175 break;
8176 case WM_T_82571:
8177 case WM_T_82572:
8178 case WM_T_82573:
8179 case WM_T_82574:
8180 case WM_T_82583:
8181 case WM_T_80003:
8182 reg = CSR_READ(sc, WMREG_MANC);
8183 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8184 return -1;
8185 else
8186 return 0;
8187 break;
8188 default:
8189 /* no problem */
8190 break;
8191 }
8192
8193 return 0;
8194 }
8195
8196 static void
8197 wm_get_hw_control(struct wm_softc *sc)
8198 {
8199 uint32_t reg;
8200
8201 switch (sc->sc_type) {
8202 case WM_T_82573:
8203 reg = CSR_READ(sc, WMREG_SWSM);
8204 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8205 break;
8206 case WM_T_82571:
8207 case WM_T_82572:
8208 case WM_T_82574:
8209 case WM_T_82583:
8210 case WM_T_80003:
8211 case WM_T_ICH8:
8212 case WM_T_ICH9:
8213 case WM_T_ICH10:
8214 case WM_T_PCH:
8215 case WM_T_PCH2:
8216 case WM_T_PCH_LPT:
8217 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8218 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8219 break;
8220 default:
8221 break;
8222 }
8223 }
8224
8225 static void
8226 wm_release_hw_control(struct wm_softc *sc)
8227 {
8228 uint32_t reg;
8229
8230 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8231 return;
8232
8233 if (sc->sc_type == WM_T_82573) {
8234 reg = CSR_READ(sc, WMREG_SWSM);
8235 reg &= ~SWSM_DRV_LOAD;
8236 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8237 } else {
8238 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8239 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8240 }
8241 }
8242
8243 /* XXX Currently TBI only */
8244 static int
8245 wm_check_for_link(struct wm_softc *sc)
8246 {
8247 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8248 uint32_t rxcw;
8249 uint32_t ctrl;
8250 uint32_t status;
8251 uint32_t sig;
8252
8253 rxcw = CSR_READ(sc, WMREG_RXCW);
8254 ctrl = CSR_READ(sc, WMREG_CTRL);
8255 status = CSR_READ(sc, WMREG_STATUS);
8256
8257 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8258
8259 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8260 device_xname(sc->sc_dev), __func__,
8261 ((ctrl & CTRL_SWDPIN(1)) == sig),
8262 ((status & STATUS_LU) != 0),
8263 ((rxcw & RXCW_C) != 0)
8264 ));
8265
8266 /*
8267 * SWDPIN LU RXCW
8268 * 0 0 0
8269 * 0 0 1 (should not happen)
8270 * 0 1 0 (should not happen)
8271 * 0 1 1 (should not happen)
8272 * 1 0 0 Disable autonego and force linkup
8273 * 1 0 1 got /C/ but not linkup yet
8274 * 1 1 0 (linkup)
8275 * 1 1 1 If IFM_AUTO, back to autonego
8276 *
8277 */
8278 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8279 && ((status & STATUS_LU) == 0)
8280 && ((rxcw & RXCW_C) == 0)) {
8281 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8282 __func__));
8283 sc->sc_tbi_linkup = 0;
8284 /* Disable auto-negotiation in the TXCW register */
8285 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8286
8287 /*
8288 * Force link-up and also force full-duplex.
8289 *
8290 * NOTE: CTRL was updated TFCE and RFCE automatically,
8291 * so we should update sc->sc_ctrl
8292 */
8293 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8294 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8295 } else if (((status & STATUS_LU) != 0)
8296 && ((rxcw & RXCW_C) != 0)
8297 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8298 sc->sc_tbi_linkup = 1;
8299 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8300 __func__));
8301 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8302 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8303 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8304 && ((rxcw & RXCW_C) != 0)) {
8305 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8306 } else {
8307 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8308 status));
8309 }
8310
8311 return 0;
8312 }
8313
8314 /* Work-around for 82566 Kumeran PCS lock loss */
8315 static void
8316 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8317 {
8318 int miistatus, active, i;
8319 int reg;
8320
8321 miistatus = sc->sc_mii.mii_media_status;
8322
8323 /* If the link is not up, do nothing */
8324 if ((miistatus & IFM_ACTIVE) != 0)
8325 return;
8326
8327 active = sc->sc_mii.mii_media_active;
8328
8329 /* Nothing to do if the link is other than 1Gbps */
8330 if (IFM_SUBTYPE(active) != IFM_1000_T)
8331 return;
8332
8333 for (i = 0; i < 10; i++) {
8334 /* read twice */
8335 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8336 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8337 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8338 goto out; /* GOOD! */
8339
8340 /* Reset the PHY */
8341 wm_gmii_reset(sc);
8342 delay(5*1000);
8343 }
8344
8345 /* Disable GigE link negotiation */
8346 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8347 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8348 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8349
8350 /*
8351 * Call gig speed drop workaround on Gig disable before accessing
8352 * any PHY registers.
8353 */
8354 wm_gig_downshift_workaround_ich8lan(sc);
8355
8356 out:
8357 return;
8358 }
8359
8360 /* WOL from S5 stops working */
8361 static void
8362 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8363 {
8364 uint16_t kmrn_reg;
8365
8366 /* Only for igp3 */
8367 if (sc->sc_phytype == WMPHY_IGP_3) {
8368 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8369 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8370 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8371 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8372 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8373 }
8374 }
8375
8376 #ifdef WM_WOL
8377 /* Power down workaround on D3 */
8378 static void
8379 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8380 {
8381 uint32_t reg;
8382 int i;
8383
8384 for (i = 0; i < 2; i++) {
8385 /* Disable link */
8386 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8387 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8388 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8389
8390 /*
8391 * Call gig speed drop workaround on Gig disable before
8392 * accessing any PHY registers
8393 */
8394 if (sc->sc_type == WM_T_ICH8)
8395 wm_gig_downshift_workaround_ich8lan(sc);
8396
8397 /* Write VR power-down enable */
8398 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8399 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8400 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8401 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8402
8403 /* Read it back and test */
8404 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8405 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8406 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8407 break;
8408
8409 /* Issue PHY reset and repeat at most one more time */
8410 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8411 }
8412 }
8413 #endif /* WM_WOL */
8414
8415 /*
8416 * Workaround for pch's PHYs
8417 * XXX should be moved to new PHY driver?
8418 */
8419 static void
8420 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8421 {
8422 if (sc->sc_phytype == WMPHY_82577)
8423 wm_set_mdio_slow_mode_hv(sc);
8424
8425 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8426
8427 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8428
8429 /* 82578 */
8430 if (sc->sc_phytype == WMPHY_82578) {
8431 /* PCH rev. < 3 */
8432 if (sc->sc_rev < 3) {
8433 /* XXX 6 bit shift? Why? Is it page2? */
8434 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8435 0x66c0);
8436 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8437 0xffff);
8438 }
8439
8440 /* XXX phy rev. < 2 */
8441 }
8442
8443 /* Select page 0 */
8444
8445 /* XXX acquire semaphore */
8446 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8447 /* XXX release semaphore */
8448
8449 /*
8450 * Configure the K1 Si workaround during phy reset assuming there is
8451 * link so that it disables K1 if link is in 1Gbps.
8452 */
8453 wm_k1_gig_workaround_hv(sc, 1);
8454 }
8455
8456 static void
8457 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8458 {
8459
8460 wm_set_mdio_slow_mode_hv(sc);
8461 }
8462
8463 static void
8464 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8465 {
8466 int k1_enable = sc->sc_nvm_k1_enabled;
8467
8468 /* XXX acquire semaphore */
8469
8470 if (link) {
8471 k1_enable = 0;
8472
8473 /* Link stall fix for link up */
8474 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8475 } else {
8476 /* Link stall fix for link down */
8477 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8478 }
8479
8480 wm_configure_k1_ich8lan(sc, k1_enable);
8481
8482 /* XXX release semaphore */
8483 }
8484
8485 static void
8486 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8487 {
8488 uint32_t reg;
8489
8490 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8491 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8492 reg | HV_KMRN_MDIO_SLOW);
8493 }
8494
8495 static void
8496 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8497 {
8498 uint32_t ctrl, ctrl_ext, tmp;
8499 uint16_t kmrn_reg;
8500
8501 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8502
8503 if (k1_enable)
8504 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8505 else
8506 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8507
8508 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8509
8510 delay(20);
8511
8512 ctrl = CSR_READ(sc, WMREG_CTRL);
8513 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8514
8515 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8516 tmp |= CTRL_FRCSPD;
8517
8518 CSR_WRITE(sc, WMREG_CTRL, tmp);
8519 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8520 CSR_WRITE_FLUSH(sc);
8521 delay(20);
8522
8523 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8524 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8525 CSR_WRITE_FLUSH(sc);
8526 delay(20);
8527 }
8528
8529 static void
8530 wm_smbustopci(struct wm_softc *sc)
8531 {
8532 uint32_t fwsm;
8533
8534 fwsm = CSR_READ(sc, WMREG_FWSM);
8535 if (((fwsm & FWSM_FW_VALID) == 0)
8536 && ((wm_check_reset_block(sc) == 0))) {
8537 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8538 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8539 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8540 CSR_WRITE_FLUSH(sc);
8541 delay(10);
8542 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8543 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8544 CSR_WRITE_FLUSH(sc);
8545 delay(50*1000);
8546
8547 /*
8548 * Gate automatic PHY configuration by hardware on non-managed
8549 * 82579
8550 */
8551 if (sc->sc_type == WM_T_PCH2)
8552 wm_gate_hw_phy_config_ich8lan(sc, 1);
8553 }
8554 }
8555
8556 static void
8557 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8558 {
8559 uint32_t gcr;
8560 pcireg_t ctrl2;
8561
8562 gcr = CSR_READ(sc, WMREG_GCR);
8563
8564 /* Only take action if timeout value is defaulted to 0 */
8565 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8566 goto out;
8567
8568 if ((gcr & GCR_CAP_VER2) == 0) {
8569 gcr |= GCR_CMPL_TMOUT_10MS;
8570 goto out;
8571 }
8572
8573 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8574 sc->sc_pcixe_capoff + PCIE_DCSR2);
8575 ctrl2 |= WM_PCIE_DCSR2_16MS;
8576 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8577 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8578
8579 out:
8580 /* Disable completion timeout resend */
8581 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8582
8583 CSR_WRITE(sc, WMREG_GCR, gcr);
8584 }
8585
8586 /* special case - for 82575 - need to do manual init ... */
8587 static void
8588 wm_reset_init_script_82575(struct wm_softc *sc)
8589 {
8590 /*
8591 * remark: this is untested code - we have no board without EEPROM
8592 * same setup as mentioned int the freeBSD driver for the i82575
8593 */
8594
8595 /* SerDes configuration via SERDESCTRL */
8596 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8597 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8598 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8599 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8600
8601 /* CCM configuration via CCMCTL register */
8602 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8603 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8604
8605 /* PCIe lanes configuration */
8606 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8607 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8608 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8609 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8610
8611 /* PCIe PLL Configuration */
8612 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8613 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8614 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8615 }
8616
8617 static void
8618 wm_init_manageability(struct wm_softc *sc)
8619 {
8620
8621 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8622 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8623 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8624
8625 /* disabl hardware interception of ARP */
8626 manc &= ~MANC_ARP_EN;
8627
8628 /* enable receiving management packets to the host */
8629 if (sc->sc_type >= WM_T_82571) {
8630 manc |= MANC_EN_MNG2HOST;
8631 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8632 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8633
8634 }
8635
8636 CSR_WRITE(sc, WMREG_MANC, manc);
8637 }
8638 }
8639
8640 static void
8641 wm_release_manageability(struct wm_softc *sc)
8642 {
8643
8644 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8645 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8646
8647 manc |= MANC_ARP_EN;
8648 if (sc->sc_type >= WM_T_82571)
8649 manc &= ~MANC_EN_MNG2HOST;
8650
8651 CSR_WRITE(sc, WMREG_MANC, manc);
8652 }
8653 }
8654
8655 static void
8656 wm_get_wakeup(struct wm_softc *sc)
8657 {
8658
8659 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8660 switch (sc->sc_type) {
8661 case WM_T_82573:
8662 case WM_T_82583:
8663 sc->sc_flags |= WM_F_HAS_AMT;
8664 /* FALLTHROUGH */
8665 case WM_T_80003:
8666 case WM_T_82541:
8667 case WM_T_82547:
8668 case WM_T_82571:
8669 case WM_T_82572:
8670 case WM_T_82574:
8671 case WM_T_82575:
8672 case WM_T_82576:
8673 case WM_T_82580:
8674 case WM_T_82580ER:
8675 case WM_T_I350:
8676 case WM_T_I354:
8677 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8678 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8679 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8680 break;
8681 case WM_T_ICH8:
8682 case WM_T_ICH9:
8683 case WM_T_ICH10:
8684 case WM_T_PCH:
8685 case WM_T_PCH2:
8686 case WM_T_PCH_LPT:
8687 sc->sc_flags |= WM_F_HAS_AMT;
8688 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8689 break;
8690 default:
8691 break;
8692 }
8693
8694 /* 1: HAS_MANAGE */
8695 if (wm_enable_mng_pass_thru(sc) != 0)
8696 sc->sc_flags |= WM_F_HAS_MANAGE;
8697
8698 #ifdef WM_DEBUG
8699 printf("\n");
8700 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8701 printf("HAS_AMT,");
8702 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8703 printf("ARC_SUBSYS_VALID,");
8704 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8705 printf("ASF_FIRMWARE_PRES,");
8706 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8707 printf("HAS_MANAGE,");
8708 printf("\n");
8709 #endif
8710 /*
8711 * Note that the WOL flags is set after the resetting of the eeprom
8712 * stuff
8713 */
8714 }
8715
8716 #ifdef WM_WOL
8717 /* WOL in the newer chipset interfaces (pchlan) */
8718 static void
8719 wm_enable_phy_wakeup(struct wm_softc *sc)
8720 {
8721 #if 0
8722 uint16_t preg;
8723
8724 /* Copy MAC RARs to PHY RARs */
8725
8726 /* Copy MAC MTA to PHY MTA */
8727
8728 /* Configure PHY Rx Control register */
8729
8730 /* Enable PHY wakeup in MAC register */
8731
8732 /* Configure and enable PHY wakeup in PHY registers */
8733
8734 /* Activate PHY wakeup */
8735
8736 /* XXX */
8737 #endif
8738 }
8739
8740 static void
8741 wm_enable_wakeup(struct wm_softc *sc)
8742 {
8743 uint32_t reg, pmreg;
8744 pcireg_t pmode;
8745
8746 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8747 &pmreg, NULL) == 0)
8748 return;
8749
8750 /* Advertise the wakeup capability */
8751 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8752 | CTRL_SWDPIN(3));
8753 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8754
8755 /* ICH workaround */
8756 switch (sc->sc_type) {
8757 case WM_T_ICH8:
8758 case WM_T_ICH9:
8759 case WM_T_ICH10:
8760 case WM_T_PCH:
8761 case WM_T_PCH2:
8762 case WM_T_PCH_LPT:
8763 /* Disable gig during WOL */
8764 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8765 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8766 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8767 if (sc->sc_type == WM_T_PCH)
8768 wm_gmii_reset(sc);
8769
8770 /* Power down workaround */
8771 if (sc->sc_phytype == WMPHY_82577) {
8772 struct mii_softc *child;
8773
8774 /* Assume that the PHY is copper */
8775 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8776 if (child->mii_mpd_rev <= 2)
8777 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8778 (768 << 5) | 25, 0x0444); /* magic num */
8779 }
8780 break;
8781 default:
8782 break;
8783 }
8784
8785 /* Keep the laser running on fiber adapters */
8786 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8787 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8788 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8789 reg |= CTRL_EXT_SWDPIN(3);
8790 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8791 }
8792
8793 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8794 #if 0 /* for the multicast packet */
8795 reg |= WUFC_MC;
8796 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8797 #endif
8798
8799 if (sc->sc_type == WM_T_PCH) {
8800 wm_enable_phy_wakeup(sc);
8801 } else {
8802 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8803 CSR_WRITE(sc, WMREG_WUFC, reg);
8804 }
8805
8806 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8807 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8808 || (sc->sc_type == WM_T_PCH2))
8809 && (sc->sc_phytype == WMPHY_IGP_3))
8810 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8811
8812 /* Request PME */
8813 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8814 #if 0
8815 /* Disable WOL */
8816 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8817 #else
8818 /* For WOL */
8819 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8820 #endif
8821 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8822 }
8823 #endif /* WM_WOL */
8824
8825 static bool
8826 wm_suspend(device_t self, const pmf_qual_t *qual)
8827 {
8828 struct wm_softc *sc = device_private(self);
8829
8830 wm_release_manageability(sc);
8831 wm_release_hw_control(sc);
8832 #ifdef WM_WOL
8833 wm_enable_wakeup(sc);
8834 #endif
8835
8836 return true;
8837 }
8838
8839 static bool
8840 wm_resume(device_t self, const pmf_qual_t *qual)
8841 {
8842 struct wm_softc *sc = device_private(self);
8843
8844 wm_init_manageability(sc);
8845
8846 return true;
8847 }
8848
8849 static void
8850 wm_set_eee_i350(struct wm_softc * sc)
8851 {
8852 uint32_t ipcnfg, eeer;
8853
8854 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8855 eeer = CSR_READ(sc, WMREG_EEER);
8856
8857 if ((sc->sc_flags & WM_F_EEE) != 0) {
8858 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8859 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8860 | EEER_LPI_FC);
8861 } else {
8862 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8863 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8864 | EEER_LPI_FC);
8865 }
8866
8867 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8868 CSR_WRITE(sc, WMREG_EEER, eeer);
8869 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8870 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8871 }
8872