if_wm.c revision 1.249 1 /* $NetBSD: if_wm.c,v 1.249 2013/06/02 09:36:22 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.249 2013/06/02 09:36:22 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 /*
145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155 #define WM_NTXSEGS 256
156 #define WM_IFQUEUELEN 256
157 #define WM_TXQUEUELEN_MAX 64
158 #define WM_TXQUEUELEN_MAX_82547 16
159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162 #define WM_NTXDESC_82542 256
163 #define WM_NTXDESC_82544 4096
164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171
172 /*
173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178 #define WM_NRXDESC 256
179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182
183 /*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188 struct wm_control_data_82544 {
189 /*
190 * The receive descriptors.
191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194 /*
195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them.
197 */
198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u;
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257
258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */
260
261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak;
275
276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */
278
279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled;
283
284 /*
285 * Software state for the transmit and receive descriptors.
286 */
287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291 /*
292 * Control data structures.
293 */
294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 krndsource_t rnd_source; /* random source */
383 };
384
385 #define WM_RXCHAIN_RESET(sc) \
386 do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \
390 } while (/*CONSTCOND*/0)
391
392 #define WM_RXCHAIN_LINK(sc, m) \
393 do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \
396 } while (/*CONSTCOND*/0)
397
398 #ifdef WM_EVENT_COUNTERS
399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401 #else
402 #define WM_EVCNT_INCR(ev) /* nothing */
403 #define WM_EVCNT_ADD(ev, val) /* nothing */
404 #endif
405
406 #define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS)
412
413 #define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436 #define WM_CDTXSYNC(sc, x, n, ops) \
437 do { \
438 int __x, __n; \
439 \
440 __x = (x); \
441 __n = (n); \
442 \
443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \
450 } \
451 \
452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455 } while (/*CONSTCOND*/0)
456
457 #define WM_CDRXSYNC(sc, x, ops) \
458 do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_INIT_RXDESC(sc, x) \
464 do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \
469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \
473 * \
474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \
482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495 } while (/*CONSTCOND*/0)
496
497 static void wm_start(struct ifnet *);
498 static void wm_nq_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ifflags_cb(struct ethercom *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_check_alt_mac_addr(struct wm_softc *);
514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void wm_tick(void *);
516
517 static void wm_set_filter(struct wm_softc *);
518 static void wm_set_vlan(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_gmii_82580_readreg(device_t, int, int);
545 static void wm_gmii_82580_writereg(device_t, int, int, int);
546 static int wm_sgmii_readreg(device_t, int, int);
547 static void wm_sgmii_writereg(device_t, int, int, int);
548
549 static void wm_gmii_statchg(struct ifnet *);
550
551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int wm_gmii_mediachange(struct ifnet *);
553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554
555 static int wm_kmrn_readreg(struct wm_softc *, int);
556 static void wm_kmrn_writereg(struct wm_softc *, int, int);
557
558 static void wm_set_spiaddrbits(struct wm_softc *);
559 static int wm_match(device_t, cfdata_t, void *);
560 static void wm_attach(device_t, device_t, void *);
561 static int wm_detach(device_t, int);
562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void wm_get_auto_rd_done(struct wm_softc *);
564 static void wm_lan_init_done(struct wm_softc *);
565 static void wm_get_cfg_done(struct wm_softc *);
566 static int wm_get_swsm_semaphore(struct wm_softc *);
567 static void wm_put_swsm_semaphore(struct wm_softc *);
568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void wm_put_swfwhw_semaphore(struct wm_softc *);
573
574 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
578 uint32_t, uint16_t *);
579 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void wm_82547_txfifo_stall(void *);
582 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int wm_check_mng_mode(struct wm_softc *);
584 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int wm_check_mng_mode_82574(struct wm_softc *);
586 static int wm_check_mng_mode_generic(struct wm_softc *);
587 static int wm_enable_mng_pass_thru(struct wm_softc *);
588 static int wm_check_reset_block(struct wm_softc *);
589 static void wm_get_hw_control(struct wm_softc *);
590 static int wm_check_for_link(struct wm_softc *);
591 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void wm_smbustopci(struct wm_softc *);
602 static void wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void wm_reset_init_script_82575(struct wm_softc *);
604 static void wm_release_manageability(struct wm_softc *);
605 static void wm_release_hw_control(struct wm_softc *);
606 static void wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void wm_enable_phy_wakeup(struct wm_softc *);
609 static void wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void wm_init_manageability(struct wm_softc *);
612 static void wm_set_eee_i350(struct wm_softc *);
613
614 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
615 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
616
617 /*
618 * Devices supported by this driver.
619 */
620 static const struct wm_product {
621 pci_vendor_id_t wmp_vendor;
622 pci_product_id_t wmp_product;
623 const char *wmp_name;
624 wm_chip_type wmp_type;
625 int wmp_flags;
626 #define WMP_F_1000X 0x01
627 #define WMP_F_1000T 0x02
628 #define WMP_F_SERDES 0x04
629 } wm_products[] = {
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
631 "Intel i82542 1000BASE-X Ethernet",
632 WM_T_82542_2_1, WMP_F_1000X },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
635 "Intel i82543GC 1000BASE-X Ethernet",
636 WM_T_82543, WMP_F_1000X },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
639 "Intel i82543GC 1000BASE-T Ethernet",
640 WM_T_82543, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
643 "Intel i82544EI 1000BASE-T Ethernet",
644 WM_T_82544, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
647 "Intel i82544EI 1000BASE-X Ethernet",
648 WM_T_82544, WMP_F_1000X },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
651 "Intel i82544GC 1000BASE-T Ethernet",
652 WM_T_82544, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
655 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
659 "Intel i82540EM 1000BASE-T Ethernet",
660 WM_T_82540, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
663 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
667 "Intel i82540EP 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
679 "Intel i82545EM 1000BASE-T Ethernet",
680 WM_T_82545, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
683 "Intel i82545GM 1000BASE-T Ethernet",
684 WM_T_82545_3, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
687 "Intel i82545GM 1000BASE-X Ethernet",
688 WM_T_82545_3, WMP_F_1000X },
689 #if 0
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
691 "Intel i82545GM Gigabit Ethernet (SERDES)",
692 WM_T_82545_3, WMP_F_SERDES },
693 #endif
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
695 "Intel i82546EB 1000BASE-T Ethernet",
696 WM_T_82546, WMP_F_1000T },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
703 "Intel i82545EM 1000BASE-X Ethernet",
704 WM_T_82545, WMP_F_1000X },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
707 "Intel i82546EB 1000BASE-X Ethernet",
708 WM_T_82546, WMP_F_1000X },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
711 "Intel i82546GB 1000BASE-T Ethernet",
712 WM_T_82546_3, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
715 "Intel i82546GB 1000BASE-X Ethernet",
716 WM_T_82546_3, WMP_F_1000X },
717 #if 0
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
719 "Intel i82546GB Gigabit Ethernet (SERDES)",
720 WM_T_82546_3, WMP_F_SERDES },
721 #endif
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
723 "i82546GB quad-port Gigabit Ethernet",
724 WM_T_82546_3, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
727 "i82546GB quad-port Gigabit Ethernet (KSP3)",
728 WM_T_82546_3, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
731 "Intel PRO/1000MT (82546GB)",
732 WM_T_82546_3, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
735 "Intel i82541EI 1000BASE-T Ethernet",
736 WM_T_82541, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
739 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
743 "Intel i82541EI Mobile 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
747 "Intel i82541ER 1000BASE-T Ethernet",
748 WM_T_82541_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
751 "Intel i82541GI 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
755 "Intel i82541GI Mobile 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
759 "Intel i82541PI 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
763 "Intel i82547EI 1000BASE-T Ethernet",
764 WM_T_82547, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
767 "Intel i82547EI Mobile 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
771 "Intel i82547GI 1000BASE-T Ethernet",
772 WM_T_82547_2, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
775 "Intel PRO/1000 PT (82571EB)",
776 WM_T_82571, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
779 "Intel PRO/1000 PF (82571EB)",
780 WM_T_82571, WMP_F_1000X },
781 #if 0
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
783 "Intel PRO/1000 PB (82571EB)",
784 WM_T_82571, WMP_F_SERDES },
785 #endif
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
787 "Intel PRO/1000 QT (82571EB)",
788 WM_T_82571, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
791 "Intel i82572EI 1000baseT Ethernet",
792 WM_T_82572, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
795 "Intel PRO/1000 PT Quad Port Server Adapter",
796 WM_T_82571, WMP_F_1000T, },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
799 "Intel i82572EI 1000baseX Ethernet",
800 WM_T_82572, WMP_F_1000X },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
803 "Intel i82572EI Gigabit Ethernet (SERDES)",
804 WM_T_82572, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
808 "Intel i82572EI 1000baseT Ethernet",
809 WM_T_82572, WMP_F_1000T },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
812 "Intel i82573E",
813 WM_T_82573, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
816 "Intel i82573E IAMT",
817 WM_T_82573, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
820 "Intel i82573L Gigabit Ethernet",
821 WM_T_82573, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
824 "Intel i82574L",
825 WM_T_82574, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
828 "Intel i82583V",
829 WM_T_82583, WMP_F_1000T },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
832 "i80003 dual 1000baseT Ethernet",
833 WM_T_80003, WMP_F_1000T },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
836 "i80003 dual 1000baseX Ethernet",
837 WM_T_80003, WMP_F_1000T },
838 #if 0
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
840 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
841 WM_T_80003, WMP_F_SERDES },
842 #endif
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
845 "Intel i80003 1000baseT Ethernet",
846 WM_T_80003, WMP_F_1000T },
847 #if 0
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
849 "Intel i80003 Gigabit Ethernet (SERDES)",
850 WM_T_80003, WMP_F_SERDES },
851 #endif
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
853 "Intel i82801H (M_AMT) LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
856 "Intel i82801H (AMT) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
859 "Intel i82801H LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
862 "Intel i82801H (IFE) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
865 "Intel i82801H (M) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
868 "Intel i82801H IFE (GT) LAN Controller",
869 WM_T_ICH8, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
871 "Intel i82801H IFE (G) LAN Controller",
872 WM_T_ICH8, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
874 "82801I (AMT) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
877 "82801I LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
880 "82801I (G) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
883 "82801I (GT) LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
886 "82801I (C) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
889 "82801I mobile LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
892 "82801I mobile (V) LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
895 "82801I mobile (AMT) LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
898 "82567LM-4 LAN Controller",
899 WM_T_ICH9, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
901 "82567V-3 LAN Controller",
902 WM_T_ICH9, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
904 "82567LM-2 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
907 "82567LF-2 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
910 "82567LM-3 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
913 "82567LF-3 LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
916 "82567V-2 LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
919 "82567V-3? LAN Controller",
920 WM_T_ICH10, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
922 "HANKSVILLE LAN Controller",
923 WM_T_ICH10, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
925 "PCH LAN (82577LM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
928 "PCH LAN (82577LC) Controller",
929 WM_T_PCH, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
931 "PCH LAN (82578DM) Controller",
932 WM_T_PCH, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
934 "PCH LAN (82578DC) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
937 "PCH2 LAN (82579LM) Controller",
938 WM_T_PCH2, WMP_F_1000T },
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
940 "PCH2 LAN (82579V) Controller",
941 WM_T_PCH2, WMP_F_1000T },
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
943 "82575EB dual-1000baseT Ethernet",
944 WM_T_82575, WMP_F_1000T },
945 #if 0
946 /*
947 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
948 * disabled for now ...
949 */
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
951 "82575EB dual-1000baseX Ethernet (SERDES)",
952 WM_T_82575, WMP_F_SERDES },
953 #endif
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
955 "82575GB quad-1000baseT Ethernet",
956 WM_T_82575, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
958 "82575GB quad-1000baseT Ethernet (PM)",
959 WM_T_82575, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
961 "82576 1000BaseT Ethernet",
962 WM_T_82576, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
964 "82576 1000BaseX Ethernet",
965 WM_T_82576, WMP_F_1000X },
966 #if 0
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
968 "82576 gigabit Ethernet (SERDES)",
969 WM_T_82576, WMP_F_SERDES },
970 #endif
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
972 "82576 quad-1000BaseT Ethernet",
973 WM_T_82576, WMP_F_1000T },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
975 "82576 gigabit Ethernet",
976 WM_T_82576, WMP_F_1000T },
977 #if 0
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
979 "82576 gigabit Ethernet (SERDES)",
980 WM_T_82576, WMP_F_SERDES },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
982 "82576 quad-gigabit Ethernet (SERDES)",
983 WM_T_82576, WMP_F_SERDES },
984 #endif
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
986 "82580 1000BaseT Ethernet",
987 WM_T_82580, WMP_F_1000T },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
989 "82580 1000BaseX Ethernet",
990 WM_T_82580, WMP_F_1000X },
991 #if 0
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
993 "82580 1000BaseT Ethernet (SERDES)",
994 WM_T_82580, WMP_F_SERDES },
995 #endif
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
997 "82580 gigabit Ethernet (SGMII)",
998 WM_T_82580, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1003 "82580 1000BaseT Ethernet",
1004 WM_T_82580ER, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1006 "82580 dual-1000BaseT Ethernet",
1007 WM_T_82580ER, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1009 "82580 quad-1000BaseX Ethernet",
1010 WM_T_82580, WMP_F_1000X },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1012 "I350 Gigabit Network Connection",
1013 WM_T_I350, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1015 "I350 Gigabit Fiber Network Connection",
1016 WM_T_I350, WMP_F_1000X },
1017 #if 0
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1019 "I350 Gigabit Backplane Connection",
1020 WM_T_I350, WMP_F_SERDES },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1022 "I350 Gigabit Connection",
1023 WM_T_I350, WMP_F_1000T },
1024 #endif
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1026 "I210-T1 Ethernet Server Adapter",
1027 WM_T_I210, WMP_F_1000T },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1029 "I210 Ethernet (Copper OEM)",
1030 WM_T_I210, WMP_F_1000T },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1032 "I210 Ethernet (Copper IT)",
1033 WM_T_I210, WMP_F_1000T },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1035 "I210 Gigabit Ethernet (Fiber)",
1036 WM_T_I210, WMP_F_1000X },
1037 #if 0
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1039 "I210 Gigabit Ethernet (SERDES)",
1040 WM_T_I210, WMP_F_SERDES },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1042 "I210 Gigabit Ethernet (SGMII)",
1043 WM_T_I210, WMP_F_SERDES },
1044 #endif
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1046 "I211 Ethernet (COPPER)",
1047 WM_T_I211, WMP_F_1000T },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1049 "I217 V Ethernet Connection",
1050 WM_T_PCH_LPT, WMP_F_1000T },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1052 "I217 LM Ethernet Connection",
1053 WM_T_PCH_LPT, WMP_F_1000T },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1055 "I218 V Ethernet Connection",
1056 WM_T_PCH_LPT, WMP_F_1000T },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1058 "I218 LM Ethernet Connection",
1059 WM_T_PCH_LPT, WMP_F_1000T },
1060 { 0, 0,
1061 NULL,
1062 0, 0 },
1063 };
1064
1065 #ifdef WM_EVENT_COUNTERS
1066 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1067 #endif /* WM_EVENT_COUNTERS */
1068
1069 #if 0 /* Not currently used */
1070 static inline uint32_t
1071 wm_io_read(struct wm_softc *sc, int reg)
1072 {
1073
1074 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1075 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1076 }
1077 #endif
1078
1079 static inline void
1080 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1081 {
1082
1083 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1084 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1085 }
1086
1087 static inline void
1088 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1089 uint32_t data)
1090 {
1091 uint32_t regval;
1092 int i;
1093
1094 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1095
1096 CSR_WRITE(sc, reg, regval);
1097
1098 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1099 delay(5);
1100 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1101 break;
1102 }
1103 if (i == SCTL_CTL_POLL_TIMEOUT) {
1104 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1105 device_xname(sc->sc_dev), reg);
1106 }
1107 }
1108
1109 static inline void
1110 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1111 {
1112 wa->wa_low = htole32(v & 0xffffffffU);
1113 if (sizeof(bus_addr_t) == 8)
1114 wa->wa_high = htole32((uint64_t) v >> 32);
1115 else
1116 wa->wa_high = 0;
1117 }
1118
1119 static void
1120 wm_set_spiaddrbits(struct wm_softc *sc)
1121 {
1122 uint32_t reg;
1123
1124 sc->sc_flags |= WM_F_EEPROM_SPI;
1125 reg = CSR_READ(sc, WMREG_EECD);
1126 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1127 }
1128
1129 static const struct wm_product *
1130 wm_lookup(const struct pci_attach_args *pa)
1131 {
1132 const struct wm_product *wmp;
1133
1134 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1135 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1136 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1137 return wmp;
1138 }
1139 return NULL;
1140 }
1141
1142 static int
1143 wm_match(device_t parent, cfdata_t cf, void *aux)
1144 {
1145 struct pci_attach_args *pa = aux;
1146
1147 if (wm_lookup(pa) != NULL)
1148 return 1;
1149
1150 return 0;
1151 }
1152
1153 static void
1154 wm_attach(device_t parent, device_t self, void *aux)
1155 {
1156 struct wm_softc *sc = device_private(self);
1157 struct pci_attach_args *pa = aux;
1158 prop_dictionary_t dict;
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160 pci_chipset_tag_t pc = pa->pa_pc;
1161 pci_intr_handle_t ih;
1162 const char *intrstr = NULL;
1163 const char *eetype, *xname;
1164 bus_space_tag_t memt;
1165 bus_space_handle_t memh;
1166 bus_size_t memsize;
1167 int memh_valid;
1168 int i, error;
1169 const struct wm_product *wmp;
1170 prop_data_t ea;
1171 prop_number_t pn;
1172 uint8_t enaddr[ETHER_ADDR_LEN];
1173 uint16_t cfg1, cfg2, swdpin, io3;
1174 pcireg_t preg, memtype;
1175 uint16_t eeprom_data, apme_mask;
1176 uint32_t reg;
1177
1178 sc->sc_dev = self;
1179 callout_init(&sc->sc_tick_ch, 0);
1180
1181 sc->sc_wmp = wmp = wm_lookup(pa);
1182 if (wmp == NULL) {
1183 printf("\n");
1184 panic("wm_attach: impossible");
1185 }
1186
1187 sc->sc_pc = pa->pa_pc;
1188 sc->sc_pcitag = pa->pa_tag;
1189
1190 if (pci_dma64_available(pa))
1191 sc->sc_dmat = pa->pa_dmat64;
1192 else
1193 sc->sc_dmat = pa->pa_dmat;
1194
1195 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1196 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1197
1198 sc->sc_type = wmp->wmp_type;
1199 if (sc->sc_type < WM_T_82543) {
1200 if (sc->sc_rev < 2) {
1201 aprint_error_dev(sc->sc_dev,
1202 "i82542 must be at least rev. 2\n");
1203 return;
1204 }
1205 if (sc->sc_rev < 3)
1206 sc->sc_type = WM_T_82542_2_0;
1207 }
1208
1209 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1210 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1211 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
1212 || (sc->sc_type == WM_T_I211))
1213 sc->sc_flags |= WM_F_NEWQUEUE;
1214
1215 /* Set device properties (mactype) */
1216 dict = device_properties(sc->sc_dev);
1217 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1218
1219 /*
1220 * Map the device. All devices support memory-mapped acccess,
1221 * and it is really required for normal operation.
1222 */
1223 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1224 switch (memtype) {
1225 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1226 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1227 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1228 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1229 break;
1230 default:
1231 memh_valid = 0;
1232 break;
1233 }
1234
1235 if (memh_valid) {
1236 sc->sc_st = memt;
1237 sc->sc_sh = memh;
1238 sc->sc_ss = memsize;
1239 } else {
1240 aprint_error_dev(sc->sc_dev,
1241 "unable to map device registers\n");
1242 return;
1243 }
1244
1245 wm_get_wakeup(sc);
1246
1247 /*
1248 * In addition, i82544 and later support I/O mapped indirect
1249 * register access. It is not desirable (nor supported in
1250 * this driver) to use it for normal operation, though it is
1251 * required to work around bugs in some chip versions.
1252 */
1253 if (sc->sc_type >= WM_T_82544) {
1254 /* First we have to find the I/O BAR. */
1255 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1256 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1257 if (memtype == PCI_MAPREG_TYPE_IO)
1258 break;
1259 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1260 PCI_MAPREG_MEM_TYPE_64BIT)
1261 i += 4; /* skip high bits, too */
1262 }
1263 if (i < PCI_MAPREG_END) {
1264 /*
1265 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1266 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1267 * It's no problem because newer chips has no this
1268 * bug.
1269 *
1270 * The i8254x doesn't apparently respond when the
1271 * I/O BAR is 0, which looks somewhat like it's not
1272 * been configured.
1273 */
1274 preg = pci_conf_read(pc, pa->pa_tag, i);
1275 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1276 aprint_error_dev(sc->sc_dev,
1277 "WARNING: I/O BAR at zero.\n");
1278 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1279 0, &sc->sc_iot, &sc->sc_ioh,
1280 NULL, &sc->sc_ios) == 0) {
1281 sc->sc_flags |= WM_F_IOH_VALID;
1282 } else {
1283 aprint_error_dev(sc->sc_dev,
1284 "WARNING: unable to map I/O space\n");
1285 }
1286 }
1287
1288 }
1289
1290 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1291 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1292 preg |= PCI_COMMAND_MASTER_ENABLE;
1293 if (sc->sc_type < WM_T_82542_2_1)
1294 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1295 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1296
1297 /* power up chip */
1298 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1299 NULL)) && error != EOPNOTSUPP) {
1300 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1301 return;
1302 }
1303
1304 /*
1305 * Map and establish our interrupt.
1306 */
1307 if (pci_intr_map(pa, &ih)) {
1308 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1309 return;
1310 }
1311 intrstr = pci_intr_string(pc, ih);
1312 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1313 if (sc->sc_ih == NULL) {
1314 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1315 if (intrstr != NULL)
1316 aprint_error(" at %s", intrstr);
1317 aprint_error("\n");
1318 return;
1319 }
1320 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1321
1322 /*
1323 * Check the function ID (unit number of the chip).
1324 */
1325 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1326 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1327 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1328 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1329 || (sc->sc_type == WM_T_I350))
1330 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1331 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1332 else
1333 sc->sc_funcid = 0;
1334
1335 /*
1336 * Determine a few things about the bus we're connected to.
1337 */
1338 if (sc->sc_type < WM_T_82543) {
1339 /* We don't really know the bus characteristics here. */
1340 sc->sc_bus_speed = 33;
1341 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1342 /*
1343 * CSA (Communication Streaming Architecture) is about as fast
1344 * a 32-bit 66MHz PCI Bus.
1345 */
1346 sc->sc_flags |= WM_F_CSA;
1347 sc->sc_bus_speed = 66;
1348 aprint_verbose_dev(sc->sc_dev,
1349 "Communication Streaming Architecture\n");
1350 if (sc->sc_type == WM_T_82547) {
1351 callout_init(&sc->sc_txfifo_ch, 0);
1352 callout_setfunc(&sc->sc_txfifo_ch,
1353 wm_82547_txfifo_stall, sc);
1354 aprint_verbose_dev(sc->sc_dev,
1355 "using 82547 Tx FIFO stall work-around\n");
1356 }
1357 } else if (sc->sc_type >= WM_T_82571) {
1358 sc->sc_flags |= WM_F_PCIE;
1359 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1360 && (sc->sc_type != WM_T_ICH10)
1361 && (sc->sc_type != WM_T_PCH)
1362 && (sc->sc_type != WM_T_PCH2)
1363 && (sc->sc_type != WM_T_PCH_LPT)) {
1364 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1365 /* ICH* and PCH* have no PCIe capability registers */
1366 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1367 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1368 NULL) == 0)
1369 aprint_error_dev(sc->sc_dev,
1370 "unable to find PCIe capability\n");
1371 }
1372 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1373 } else {
1374 reg = CSR_READ(sc, WMREG_STATUS);
1375 if (reg & STATUS_BUS64)
1376 sc->sc_flags |= WM_F_BUS64;
1377 if ((reg & STATUS_PCIX_MODE) != 0) {
1378 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1379
1380 sc->sc_flags |= WM_F_PCIX;
1381 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1382 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1383 aprint_error_dev(sc->sc_dev,
1384 "unable to find PCIX capability\n");
1385 else if (sc->sc_type != WM_T_82545_3 &&
1386 sc->sc_type != WM_T_82546_3) {
1387 /*
1388 * Work around a problem caused by the BIOS
1389 * setting the max memory read byte count
1390 * incorrectly.
1391 */
1392 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1393 sc->sc_pcixe_capoff + PCIX_CMD);
1394 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1395 sc->sc_pcixe_capoff + PCIX_STATUS);
1396
1397 bytecnt =
1398 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1399 PCIX_CMD_BYTECNT_SHIFT;
1400 maxb =
1401 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1402 PCIX_STATUS_MAXB_SHIFT;
1403 if (bytecnt > maxb) {
1404 aprint_verbose_dev(sc->sc_dev,
1405 "resetting PCI-X MMRBC: %d -> %d\n",
1406 512 << bytecnt, 512 << maxb);
1407 pcix_cmd = (pcix_cmd &
1408 ~PCIX_CMD_BYTECNT_MASK) |
1409 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1410 pci_conf_write(pa->pa_pc, pa->pa_tag,
1411 sc->sc_pcixe_capoff + PCIX_CMD,
1412 pcix_cmd);
1413 }
1414 }
1415 }
1416 /*
1417 * The quad port adapter is special; it has a PCIX-PCIX
1418 * bridge on the board, and can run the secondary bus at
1419 * a higher speed.
1420 */
1421 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1422 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1423 : 66;
1424 } else if (sc->sc_flags & WM_F_PCIX) {
1425 switch (reg & STATUS_PCIXSPD_MASK) {
1426 case STATUS_PCIXSPD_50_66:
1427 sc->sc_bus_speed = 66;
1428 break;
1429 case STATUS_PCIXSPD_66_100:
1430 sc->sc_bus_speed = 100;
1431 break;
1432 case STATUS_PCIXSPD_100_133:
1433 sc->sc_bus_speed = 133;
1434 break;
1435 default:
1436 aprint_error_dev(sc->sc_dev,
1437 "unknown PCIXSPD %d; assuming 66MHz\n",
1438 reg & STATUS_PCIXSPD_MASK);
1439 sc->sc_bus_speed = 66;
1440 break;
1441 }
1442 } else
1443 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1444 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1445 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1446 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1447 }
1448
1449 /*
1450 * Allocate the control data structures, and create and load the
1451 * DMA map for it.
1452 *
1453 * NOTE: All Tx descriptors must be in the same 4G segment of
1454 * memory. So must Rx descriptors. We simplify by allocating
1455 * both sets within the same 4G segment.
1456 */
1457 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1458 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1459 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1460 sizeof(struct wm_control_data_82542) :
1461 sizeof(struct wm_control_data_82544);
1462 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1463 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1464 &sc->sc_cd_rseg, 0)) != 0) {
1465 aprint_error_dev(sc->sc_dev,
1466 "unable to allocate control data, error = %d\n",
1467 error);
1468 goto fail_0;
1469 }
1470
1471 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1472 sc->sc_cd_rseg, sc->sc_cd_size,
1473 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1474 aprint_error_dev(sc->sc_dev,
1475 "unable to map control data, error = %d\n", error);
1476 goto fail_1;
1477 }
1478
1479 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1480 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1481 aprint_error_dev(sc->sc_dev,
1482 "unable to create control data DMA map, error = %d\n",
1483 error);
1484 goto fail_2;
1485 }
1486
1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1488 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1489 aprint_error_dev(sc->sc_dev,
1490 "unable to load control data DMA map, error = %d\n",
1491 error);
1492 goto fail_3;
1493 }
1494
1495 /*
1496 * Create the transmit buffer DMA maps.
1497 */
1498 WM_TXQUEUELEN(sc) =
1499 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1500 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1501 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1502 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1503 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1504 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1505 aprint_error_dev(sc->sc_dev,
1506 "unable to create Tx DMA map %d, error = %d\n",
1507 i, error);
1508 goto fail_4;
1509 }
1510 }
1511
1512 /*
1513 * Create the receive buffer DMA maps.
1514 */
1515 for (i = 0; i < WM_NRXDESC; i++) {
1516 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1517 MCLBYTES, 0, 0,
1518 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1519 aprint_error_dev(sc->sc_dev,
1520 "unable to create Rx DMA map %d error = %d\n",
1521 i, error);
1522 goto fail_5;
1523 }
1524 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1525 }
1526
1527 /* clear interesting stat counters */
1528 CSR_READ(sc, WMREG_COLC);
1529 CSR_READ(sc, WMREG_RXERRC);
1530
1531 /* get PHY control from SMBus to PCIe */
1532 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1533 || (sc->sc_type == WM_T_PCH_LPT))
1534 wm_smbustopci(sc);
1535
1536 /*
1537 * Reset the chip to a known state.
1538 */
1539 wm_reset(sc);
1540
1541 switch (sc->sc_type) {
1542 case WM_T_82571:
1543 case WM_T_82572:
1544 case WM_T_82573:
1545 case WM_T_82574:
1546 case WM_T_82583:
1547 case WM_T_80003:
1548 case WM_T_ICH8:
1549 case WM_T_ICH9:
1550 case WM_T_ICH10:
1551 case WM_T_PCH:
1552 case WM_T_PCH2:
1553 case WM_T_PCH_LPT:
1554 if (wm_check_mng_mode(sc) != 0)
1555 wm_get_hw_control(sc);
1556 break;
1557 default:
1558 break;
1559 }
1560
1561 /*
1562 * Get some information about the EEPROM.
1563 */
1564 switch (sc->sc_type) {
1565 case WM_T_82542_2_0:
1566 case WM_T_82542_2_1:
1567 case WM_T_82543:
1568 case WM_T_82544:
1569 /* Microwire */
1570 sc->sc_ee_addrbits = 6;
1571 break;
1572 case WM_T_82540:
1573 case WM_T_82545:
1574 case WM_T_82545_3:
1575 case WM_T_82546:
1576 case WM_T_82546_3:
1577 /* Microwire */
1578 reg = CSR_READ(sc, WMREG_EECD);
1579 if (reg & EECD_EE_SIZE)
1580 sc->sc_ee_addrbits = 8;
1581 else
1582 sc->sc_ee_addrbits = 6;
1583 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1584 break;
1585 case WM_T_82541:
1586 case WM_T_82541_2:
1587 case WM_T_82547:
1588 case WM_T_82547_2:
1589 reg = CSR_READ(sc, WMREG_EECD);
1590 if (reg & EECD_EE_TYPE) {
1591 /* SPI */
1592 wm_set_spiaddrbits(sc);
1593 } else
1594 /* Microwire */
1595 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1596 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1597 break;
1598 case WM_T_82571:
1599 case WM_T_82572:
1600 /* SPI */
1601 wm_set_spiaddrbits(sc);
1602 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1603 break;
1604 case WM_T_82573:
1605 case WM_T_82574:
1606 case WM_T_82583:
1607 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1608 sc->sc_flags |= WM_F_EEPROM_FLASH;
1609 else {
1610 /* SPI */
1611 wm_set_spiaddrbits(sc);
1612 }
1613 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1614 break;
1615 case WM_T_82575:
1616 case WM_T_82576:
1617 case WM_T_82580:
1618 case WM_T_82580ER:
1619 case WM_T_I350:
1620 case WM_T_80003:
1621 /* SPI */
1622 wm_set_spiaddrbits(sc);
1623 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1624 break;
1625 case WM_T_ICH8:
1626 case WM_T_ICH9:
1627 case WM_T_ICH10:
1628 case WM_T_PCH:
1629 case WM_T_PCH2:
1630 case WM_T_PCH_LPT:
1631 /* FLASH */
1632 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1633 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1634 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1635 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1636 aprint_error_dev(sc->sc_dev,
1637 "can't map FLASH registers\n");
1638 return;
1639 }
1640 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1641 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1642 ICH_FLASH_SECTOR_SIZE;
1643 sc->sc_ich8_flash_bank_size =
1644 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1645 sc->sc_ich8_flash_bank_size -=
1646 (reg & ICH_GFPREG_BASE_MASK);
1647 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1648 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1649 break;
1650 case WM_T_I210:
1651 case WM_T_I211:
1652 #if 1
1653 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1654 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1655 #endif
1656 break;
1657 default:
1658 break;
1659 }
1660
1661 /*
1662 * Defer printing the EEPROM type until after verifying the checksum
1663 * This allows the EEPROM type to be printed correctly in the case
1664 * that no EEPROM is attached.
1665 */
1666 /*
1667 * Validate the EEPROM checksum. If the checksum fails, flag
1668 * this for later, so we can fail future reads from the EEPROM.
1669 */
1670 if (wm_validate_eeprom_checksum(sc)) {
1671 /*
1672 * Read twice again because some PCI-e parts fail the
1673 * first check due to the link being in sleep state.
1674 */
1675 if (wm_validate_eeprom_checksum(sc))
1676 sc->sc_flags |= WM_F_EEPROM_INVALID;
1677 }
1678
1679 /* Set device properties (macflags) */
1680 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1681
1682 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1683 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1684 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1685 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1686 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1687 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1688 } else {
1689 if (sc->sc_flags & WM_F_EEPROM_SPI)
1690 eetype = "SPI";
1691 else
1692 eetype = "MicroWire";
1693 aprint_verbose_dev(sc->sc_dev,
1694 "%u word (%d address bits) %s EEPROM\n",
1695 1U << sc->sc_ee_addrbits,
1696 sc->sc_ee_addrbits, eetype);
1697 }
1698
1699 /*
1700 * Read the Ethernet address from the EEPROM, if not first found
1701 * in device properties.
1702 */
1703 ea = prop_dictionary_get(dict, "mac-address");
1704 if (ea != NULL) {
1705 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1706 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1707 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1708 } else {
1709 if (wm_read_mac_addr(sc, enaddr) != 0) {
1710 aprint_error_dev(sc->sc_dev,
1711 "unable to read Ethernet address\n");
1712 return;
1713 }
1714 }
1715
1716 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1717 ether_sprintf(enaddr));
1718
1719 /*
1720 * Read the config info from the EEPROM, and set up various
1721 * bits in the control registers based on their contents.
1722 */
1723 pn = prop_dictionary_get(dict, "i82543-cfg1");
1724 if (pn != NULL) {
1725 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1726 cfg1 = (uint16_t) prop_number_integer_value(pn);
1727 } else {
1728 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1729 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1730 return;
1731 }
1732 }
1733
1734 pn = prop_dictionary_get(dict, "i82543-cfg2");
1735 if (pn != NULL) {
1736 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1737 cfg2 = (uint16_t) prop_number_integer_value(pn);
1738 } else {
1739 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1740 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1741 return;
1742 }
1743 }
1744
1745 /* check for WM_F_WOL */
1746 switch (sc->sc_type) {
1747 case WM_T_82542_2_0:
1748 case WM_T_82542_2_1:
1749 case WM_T_82543:
1750 /* dummy? */
1751 eeprom_data = 0;
1752 apme_mask = EEPROM_CFG3_APME;
1753 break;
1754 case WM_T_82544:
1755 apme_mask = EEPROM_CFG2_82544_APM_EN;
1756 eeprom_data = cfg2;
1757 break;
1758 case WM_T_82546:
1759 case WM_T_82546_3:
1760 case WM_T_82571:
1761 case WM_T_82572:
1762 case WM_T_82573:
1763 case WM_T_82574:
1764 case WM_T_82583:
1765 case WM_T_80003:
1766 default:
1767 apme_mask = EEPROM_CFG3_APME;
1768 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1769 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1770 break;
1771 case WM_T_82575:
1772 case WM_T_82576:
1773 case WM_T_82580:
1774 case WM_T_82580ER:
1775 case WM_T_I350:
1776 case WM_T_ICH8:
1777 case WM_T_ICH9:
1778 case WM_T_ICH10:
1779 case WM_T_PCH:
1780 case WM_T_PCH2:
1781 case WM_T_PCH_LPT:
1782 /* XXX The funcid should be checked on some devices */
1783 apme_mask = WUC_APME;
1784 eeprom_data = CSR_READ(sc, WMREG_WUC);
1785 break;
1786 }
1787
1788 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1789 if ((eeprom_data & apme_mask) != 0)
1790 sc->sc_flags |= WM_F_WOL;
1791 #ifdef WM_DEBUG
1792 if ((sc->sc_flags & WM_F_WOL) != 0)
1793 printf("WOL\n");
1794 #endif
1795
1796 /*
1797 * XXX need special handling for some multiple port cards
1798 * to disable a paticular port.
1799 */
1800
1801 if (sc->sc_type >= WM_T_82544) {
1802 pn = prop_dictionary_get(dict, "i82543-swdpin");
1803 if (pn != NULL) {
1804 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1805 swdpin = (uint16_t) prop_number_integer_value(pn);
1806 } else {
1807 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1808 aprint_error_dev(sc->sc_dev,
1809 "unable to read SWDPIN\n");
1810 return;
1811 }
1812 }
1813 }
1814
1815 if (cfg1 & EEPROM_CFG1_ILOS)
1816 sc->sc_ctrl |= CTRL_ILOS;
1817 if (sc->sc_type >= WM_T_82544) {
1818 sc->sc_ctrl |=
1819 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1820 CTRL_SWDPIO_SHIFT;
1821 sc->sc_ctrl |=
1822 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1823 CTRL_SWDPINS_SHIFT;
1824 } else {
1825 sc->sc_ctrl |=
1826 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1827 CTRL_SWDPIO_SHIFT;
1828 }
1829
1830 #if 0
1831 if (sc->sc_type >= WM_T_82544) {
1832 if (cfg1 & EEPROM_CFG1_IPS0)
1833 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1834 if (cfg1 & EEPROM_CFG1_IPS1)
1835 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1836 sc->sc_ctrl_ext |=
1837 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1838 CTRL_EXT_SWDPIO_SHIFT;
1839 sc->sc_ctrl_ext |=
1840 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1841 CTRL_EXT_SWDPINS_SHIFT;
1842 } else {
1843 sc->sc_ctrl_ext |=
1844 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1845 CTRL_EXT_SWDPIO_SHIFT;
1846 }
1847 #endif
1848
1849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1850 #if 0
1851 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1852 #endif
1853
1854 /*
1855 * Set up some register offsets that are different between
1856 * the i82542 and the i82543 and later chips.
1857 */
1858 if (sc->sc_type < WM_T_82543) {
1859 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1860 sc->sc_tdt_reg = WMREG_OLD_TDT;
1861 } else {
1862 sc->sc_rdt_reg = WMREG_RDT;
1863 sc->sc_tdt_reg = WMREG_TDT;
1864 }
1865
1866 if (sc->sc_type == WM_T_PCH) {
1867 uint16_t val;
1868
1869 /* Save the NVM K1 bit setting */
1870 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1871
1872 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1873 sc->sc_nvm_k1_enabled = 1;
1874 else
1875 sc->sc_nvm_k1_enabled = 0;
1876 }
1877
1878 /*
1879 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1880 * media structures accordingly.
1881 */
1882 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1883 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1884 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1885 || sc->sc_type == WM_T_82573
1886 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1887 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1888 wm_gmii_mediainit(sc, wmp->wmp_product);
1889 } else if (sc->sc_type < WM_T_82543 ||
1890 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1891 if (wmp->wmp_flags & WMP_F_1000T)
1892 aprint_error_dev(sc->sc_dev,
1893 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1894 wm_tbi_mediainit(sc);
1895 } else {
1896 switch (sc->sc_type) {
1897 case WM_T_82575:
1898 case WM_T_82576:
1899 case WM_T_82580:
1900 case WM_T_82580ER:
1901 case WM_T_I350:
1902 case WM_T_I210:
1903 case WM_T_I211:
1904 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1905 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1906 case CTRL_EXT_LINK_MODE_SGMII:
1907 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1908 sc->sc_flags |= WM_F_SGMII;
1909 CSR_WRITE(sc, WMREG_CTRL_EXT,
1910 reg | CTRL_EXT_I2C_ENA);
1911 wm_gmii_mediainit(sc, wmp->wmp_product);
1912 break;
1913 case CTRL_EXT_LINK_MODE_1000KX:
1914 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1915 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1916 CSR_WRITE(sc, WMREG_CTRL_EXT,
1917 reg | CTRL_EXT_I2C_ENA);
1918 panic("not supported yet\n");
1919 break;
1920 case CTRL_EXT_LINK_MODE_GMII:
1921 default:
1922 CSR_WRITE(sc, WMREG_CTRL_EXT,
1923 reg & ~CTRL_EXT_I2C_ENA);
1924 wm_gmii_mediainit(sc, wmp->wmp_product);
1925 break;
1926 }
1927 break;
1928 default:
1929 if (wmp->wmp_flags & WMP_F_1000X)
1930 aprint_error_dev(sc->sc_dev,
1931 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1932 wm_gmii_mediainit(sc, wmp->wmp_product);
1933 }
1934 }
1935
1936 ifp = &sc->sc_ethercom.ec_if;
1937 xname = device_xname(sc->sc_dev);
1938 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1939 ifp->if_softc = sc;
1940 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1941 ifp->if_ioctl = wm_ioctl;
1942 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1943 ifp->if_start = wm_nq_start;
1944 else
1945 ifp->if_start = wm_start;
1946 ifp->if_watchdog = wm_watchdog;
1947 ifp->if_init = wm_init;
1948 ifp->if_stop = wm_stop;
1949 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1950 IFQ_SET_READY(&ifp->if_snd);
1951
1952 /* Check for jumbo frame */
1953 switch (sc->sc_type) {
1954 case WM_T_82573:
1955 /* XXX limited to 9234 if ASPM is disabled */
1956 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1957 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1958 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1959 break;
1960 case WM_T_82571:
1961 case WM_T_82572:
1962 case WM_T_82574:
1963 case WM_T_82575:
1964 case WM_T_82576:
1965 case WM_T_82580:
1966 case WM_T_82580ER:
1967 case WM_T_I350:
1968 case WM_T_I210:
1969 case WM_T_I211:
1970 case WM_T_80003:
1971 case WM_T_ICH9:
1972 case WM_T_ICH10:
1973 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1974 case WM_T_PCH_LPT:
1975 /* XXX limited to 9234 */
1976 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1977 break;
1978 case WM_T_PCH:
1979 /* XXX limited to 4096 */
1980 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1981 break;
1982 case WM_T_82542_2_0:
1983 case WM_T_82542_2_1:
1984 case WM_T_82583:
1985 case WM_T_ICH8:
1986 /* No support for jumbo frame */
1987 break;
1988 default:
1989 /* ETHER_MAX_LEN_JUMBO */
1990 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1991 break;
1992 }
1993
1994 /*
1995 * If we're a i82543 or greater, we can support VLANs.
1996 */
1997 if (sc->sc_type >= WM_T_82543)
1998 sc->sc_ethercom.ec_capabilities |=
1999 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2000
2001 /*
2002 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2003 * on i82543 and later.
2004 */
2005 if (sc->sc_type >= WM_T_82543) {
2006 ifp->if_capabilities |=
2007 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2008 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2009 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2010 IFCAP_CSUM_TCPv6_Tx |
2011 IFCAP_CSUM_UDPv6_Tx;
2012 }
2013
2014 /*
2015 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2016 *
2017 * 82541GI (8086:1076) ... no
2018 * 82572EI (8086:10b9) ... yes
2019 */
2020 if (sc->sc_type >= WM_T_82571) {
2021 ifp->if_capabilities |=
2022 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2023 }
2024
2025 /*
2026 * If we're a i82544 or greater (except i82547), we can do
2027 * TCP segmentation offload.
2028 */
2029 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2030 ifp->if_capabilities |= IFCAP_TSOv4;
2031 }
2032
2033 if (sc->sc_type >= WM_T_82571) {
2034 ifp->if_capabilities |= IFCAP_TSOv6;
2035 }
2036
2037 /*
2038 * Attach the interface.
2039 */
2040 if_attach(ifp);
2041 ether_ifattach(ifp, enaddr);
2042 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2043 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2044
2045 #ifdef WM_EVENT_COUNTERS
2046 /* Attach event counters. */
2047 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2048 NULL, xname, "txsstall");
2049 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2050 NULL, xname, "txdstall");
2051 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2052 NULL, xname, "txfifo_stall");
2053 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2054 NULL, xname, "txdw");
2055 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2056 NULL, xname, "txqe");
2057 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2058 NULL, xname, "rxintr");
2059 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2060 NULL, xname, "linkintr");
2061
2062 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2063 NULL, xname, "rxipsum");
2064 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2065 NULL, xname, "rxtusum");
2066 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2067 NULL, xname, "txipsum");
2068 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2069 NULL, xname, "txtusum");
2070 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2071 NULL, xname, "txtusum6");
2072
2073 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2074 NULL, xname, "txtso");
2075 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2076 NULL, xname, "txtso6");
2077 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2078 NULL, xname, "txtsopain");
2079
2080 for (i = 0; i < WM_NTXSEGS; i++) {
2081 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2082 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2083 NULL, xname, wm_txseg_evcnt_names[i]);
2084 }
2085
2086 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2087 NULL, xname, "txdrop");
2088
2089 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2090 NULL, xname, "tu");
2091
2092 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2093 NULL, xname, "tx_xoff");
2094 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2095 NULL, xname, "tx_xon");
2096 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2097 NULL, xname, "rx_xoff");
2098 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2099 NULL, xname, "rx_xon");
2100 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2101 NULL, xname, "rx_macctl");
2102 #endif /* WM_EVENT_COUNTERS */
2103
2104 if (pmf_device_register(self, wm_suspend, wm_resume))
2105 pmf_class_network_register(self, ifp);
2106 else
2107 aprint_error_dev(self, "couldn't establish power handler\n");
2108
2109 return;
2110
2111 /*
2112 * Free any resources we've allocated during the failed attach
2113 * attempt. Do this in reverse order and fall through.
2114 */
2115 fail_5:
2116 for (i = 0; i < WM_NRXDESC; i++) {
2117 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2118 bus_dmamap_destroy(sc->sc_dmat,
2119 sc->sc_rxsoft[i].rxs_dmamap);
2120 }
2121 fail_4:
2122 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2123 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2124 bus_dmamap_destroy(sc->sc_dmat,
2125 sc->sc_txsoft[i].txs_dmamap);
2126 }
2127 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2128 fail_3:
2129 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2130 fail_2:
2131 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2132 sc->sc_cd_size);
2133 fail_1:
2134 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2135 fail_0:
2136 return;
2137 }
2138
2139 static int
2140 wm_detach(device_t self, int flags __unused)
2141 {
2142 struct wm_softc *sc = device_private(self);
2143 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2144 int i, s;
2145
2146 s = splnet();
2147 /* Stop the interface. Callouts are stopped in it. */
2148 wm_stop(ifp, 1);
2149 splx(s);
2150
2151 pmf_device_deregister(self);
2152
2153 /* Tell the firmware about the release */
2154 wm_release_manageability(sc);
2155 wm_release_hw_control(sc);
2156
2157 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2158
2159 /* Delete all remaining media. */
2160 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2161
2162 ether_ifdetach(ifp);
2163 if_detach(ifp);
2164
2165
2166 /* Unload RX dmamaps and free mbufs */
2167 wm_rxdrain(sc);
2168
2169 /* Free dmamap. It's the same as the end of the wm_attach() function */
2170 for (i = 0; i < WM_NRXDESC; i++) {
2171 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2172 bus_dmamap_destroy(sc->sc_dmat,
2173 sc->sc_rxsoft[i].rxs_dmamap);
2174 }
2175 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2176 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2177 bus_dmamap_destroy(sc->sc_dmat,
2178 sc->sc_txsoft[i].txs_dmamap);
2179 }
2180 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2181 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2182 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2183 sc->sc_cd_size);
2184 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2185
2186 /* Disestablish the interrupt handler */
2187 if (sc->sc_ih != NULL) {
2188 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2189 sc->sc_ih = NULL;
2190 }
2191
2192 /* Unmap the registers */
2193 if (sc->sc_ss) {
2194 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2195 sc->sc_ss = 0;
2196 }
2197
2198 if (sc->sc_ios) {
2199 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2200 sc->sc_ios = 0;
2201 }
2202
2203 return 0;
2204 }
2205
2206 /*
2207 * wm_tx_offload:
2208 *
2209 * Set up TCP/IP checksumming parameters for the
2210 * specified packet.
2211 */
2212 static int
2213 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2214 uint8_t *fieldsp)
2215 {
2216 struct mbuf *m0 = txs->txs_mbuf;
2217 struct livengood_tcpip_ctxdesc *t;
2218 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2219 uint32_t ipcse;
2220 struct ether_header *eh;
2221 int offset, iphl;
2222 uint8_t fields;
2223
2224 /*
2225 * XXX It would be nice if the mbuf pkthdr had offset
2226 * fields for the protocol headers.
2227 */
2228
2229 eh = mtod(m0, struct ether_header *);
2230 switch (htons(eh->ether_type)) {
2231 case ETHERTYPE_IP:
2232 case ETHERTYPE_IPV6:
2233 offset = ETHER_HDR_LEN;
2234 break;
2235
2236 case ETHERTYPE_VLAN:
2237 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2238 break;
2239
2240 default:
2241 /*
2242 * Don't support this protocol or encapsulation.
2243 */
2244 *fieldsp = 0;
2245 *cmdp = 0;
2246 return 0;
2247 }
2248
2249 if ((m0->m_pkthdr.csum_flags &
2250 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2251 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2252 } else {
2253 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2254 }
2255 ipcse = offset + iphl - 1;
2256
2257 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2258 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2259 seg = 0;
2260 fields = 0;
2261
2262 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2263 int hlen = offset + iphl;
2264 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2265
2266 if (__predict_false(m0->m_len <
2267 (hlen + sizeof(struct tcphdr)))) {
2268 /*
2269 * TCP/IP headers are not in the first mbuf; we need
2270 * to do this the slow and painful way. Let's just
2271 * hope this doesn't happen very often.
2272 */
2273 struct tcphdr th;
2274
2275 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2276
2277 m_copydata(m0, hlen, sizeof(th), &th);
2278 if (v4) {
2279 struct ip ip;
2280
2281 m_copydata(m0, offset, sizeof(ip), &ip);
2282 ip.ip_len = 0;
2283 m_copyback(m0,
2284 offset + offsetof(struct ip, ip_len),
2285 sizeof(ip.ip_len), &ip.ip_len);
2286 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2287 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2288 } else {
2289 struct ip6_hdr ip6;
2290
2291 m_copydata(m0, offset, sizeof(ip6), &ip6);
2292 ip6.ip6_plen = 0;
2293 m_copyback(m0,
2294 offset + offsetof(struct ip6_hdr, ip6_plen),
2295 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2296 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2297 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2298 }
2299 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2300 sizeof(th.th_sum), &th.th_sum);
2301
2302 hlen += th.th_off << 2;
2303 } else {
2304 /*
2305 * TCP/IP headers are in the first mbuf; we can do
2306 * this the easy way.
2307 */
2308 struct tcphdr *th;
2309
2310 if (v4) {
2311 struct ip *ip =
2312 (void *)(mtod(m0, char *) + offset);
2313 th = (void *)(mtod(m0, char *) + hlen);
2314
2315 ip->ip_len = 0;
2316 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2317 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2318 } else {
2319 struct ip6_hdr *ip6 =
2320 (void *)(mtod(m0, char *) + offset);
2321 th = (void *)(mtod(m0, char *) + hlen);
2322
2323 ip6->ip6_plen = 0;
2324 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2325 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2326 }
2327 hlen += th->th_off << 2;
2328 }
2329
2330 if (v4) {
2331 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2332 cmdlen |= WTX_TCPIP_CMD_IP;
2333 } else {
2334 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2335 ipcse = 0;
2336 }
2337 cmd |= WTX_TCPIP_CMD_TSE;
2338 cmdlen |= WTX_TCPIP_CMD_TSE |
2339 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2340 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2341 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2342 }
2343
2344 /*
2345 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2346 * offload feature, if we load the context descriptor, we
2347 * MUST provide valid values for IPCSS and TUCSS fields.
2348 */
2349
2350 ipcs = WTX_TCPIP_IPCSS(offset) |
2351 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2352 WTX_TCPIP_IPCSE(ipcse);
2353 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2354 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2355 fields |= WTX_IXSM;
2356 }
2357
2358 offset += iphl;
2359
2360 if (m0->m_pkthdr.csum_flags &
2361 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2362 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2363 fields |= WTX_TXSM;
2364 tucs = WTX_TCPIP_TUCSS(offset) |
2365 WTX_TCPIP_TUCSO(offset +
2366 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2367 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2368 } else if ((m0->m_pkthdr.csum_flags &
2369 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2370 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2371 fields |= WTX_TXSM;
2372 tucs = WTX_TCPIP_TUCSS(offset) |
2373 WTX_TCPIP_TUCSO(offset +
2374 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2375 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2376 } else {
2377 /* Just initialize it to a valid TCP context. */
2378 tucs = WTX_TCPIP_TUCSS(offset) |
2379 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2380 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2381 }
2382
2383 /* Fill in the context descriptor. */
2384 t = (struct livengood_tcpip_ctxdesc *)
2385 &sc->sc_txdescs[sc->sc_txnext];
2386 t->tcpip_ipcs = htole32(ipcs);
2387 t->tcpip_tucs = htole32(tucs);
2388 t->tcpip_cmdlen = htole32(cmdlen);
2389 t->tcpip_seg = htole32(seg);
2390 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2391
2392 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2393 txs->txs_ndesc++;
2394
2395 *cmdp = cmd;
2396 *fieldsp = fields;
2397
2398 return 0;
2399 }
2400
2401 static void
2402 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2403 {
2404 struct mbuf *m;
2405 int i;
2406
2407 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2408 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2409 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2410 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2411 m->m_data, m->m_len, m->m_flags);
2412 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2413 i, i == 1 ? "" : "s");
2414 }
2415
2416 /*
2417 * wm_82547_txfifo_stall:
2418 *
2419 * Callout used to wait for the 82547 Tx FIFO to drain,
2420 * reset the FIFO pointers, and restart packet transmission.
2421 */
2422 static void
2423 wm_82547_txfifo_stall(void *arg)
2424 {
2425 struct wm_softc *sc = arg;
2426 int s;
2427
2428 s = splnet();
2429
2430 if (sc->sc_txfifo_stall) {
2431 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2432 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2433 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2434 /*
2435 * Packets have drained. Stop transmitter, reset
2436 * FIFO pointers, restart transmitter, and kick
2437 * the packet queue.
2438 */
2439 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2440 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2441 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2442 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2443 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2444 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2445 CSR_WRITE(sc, WMREG_TCTL, tctl);
2446 CSR_WRITE_FLUSH(sc);
2447
2448 sc->sc_txfifo_head = 0;
2449 sc->sc_txfifo_stall = 0;
2450 wm_start(&sc->sc_ethercom.ec_if);
2451 } else {
2452 /*
2453 * Still waiting for packets to drain; try again in
2454 * another tick.
2455 */
2456 callout_schedule(&sc->sc_txfifo_ch, 1);
2457 }
2458 }
2459
2460 splx(s);
2461 }
2462
2463 static void
2464 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2465 {
2466 uint32_t reg;
2467
2468 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2469
2470 if (on != 0)
2471 reg |= EXTCNFCTR_GATE_PHY_CFG;
2472 else
2473 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2474
2475 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2476 }
2477
2478 /*
2479 * wm_82547_txfifo_bugchk:
2480 *
2481 * Check for bug condition in the 82547 Tx FIFO. We need to
2482 * prevent enqueueing a packet that would wrap around the end
2483 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2484 *
2485 * We do this by checking the amount of space before the end
2486 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2487 * the Tx FIFO, wait for all remaining packets to drain, reset
2488 * the internal FIFO pointers to the beginning, and restart
2489 * transmission on the interface.
2490 */
2491 #define WM_FIFO_HDR 0x10
2492 #define WM_82547_PAD_LEN 0x3e0
2493 static int
2494 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2495 {
2496 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2497 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2498
2499 /* Just return if already stalled. */
2500 if (sc->sc_txfifo_stall)
2501 return 1;
2502
2503 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2504 /* Stall only occurs in half-duplex mode. */
2505 goto send_packet;
2506 }
2507
2508 if (len >= WM_82547_PAD_LEN + space) {
2509 sc->sc_txfifo_stall = 1;
2510 callout_schedule(&sc->sc_txfifo_ch, 1);
2511 return 1;
2512 }
2513
2514 send_packet:
2515 sc->sc_txfifo_head += len;
2516 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2517 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2518
2519 return 0;
2520 }
2521
2522 /*
2523 * wm_start: [ifnet interface function]
2524 *
2525 * Start packet transmission on the interface.
2526 */
2527 static void
2528 wm_start(struct ifnet *ifp)
2529 {
2530 struct wm_softc *sc = ifp->if_softc;
2531 struct mbuf *m0;
2532 struct m_tag *mtag;
2533 struct wm_txsoft *txs;
2534 bus_dmamap_t dmamap;
2535 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2536 bus_addr_t curaddr;
2537 bus_size_t seglen, curlen;
2538 uint32_t cksumcmd;
2539 uint8_t cksumfields;
2540
2541 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2542 return;
2543
2544 /*
2545 * Remember the previous number of free descriptors.
2546 */
2547 ofree = sc->sc_txfree;
2548
2549 /*
2550 * Loop through the send queue, setting up transmit descriptors
2551 * until we drain the queue, or use up all available transmit
2552 * descriptors.
2553 */
2554 for (;;) {
2555 /* Grab a packet off the queue. */
2556 IFQ_POLL(&ifp->if_snd, m0);
2557 if (m0 == NULL)
2558 break;
2559
2560 DPRINTF(WM_DEBUG_TX,
2561 ("%s: TX: have packet to transmit: %p\n",
2562 device_xname(sc->sc_dev), m0));
2563
2564 /* Get a work queue entry. */
2565 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2566 wm_txintr(sc);
2567 if (sc->sc_txsfree == 0) {
2568 DPRINTF(WM_DEBUG_TX,
2569 ("%s: TX: no free job descriptors\n",
2570 device_xname(sc->sc_dev)));
2571 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2572 break;
2573 }
2574 }
2575
2576 txs = &sc->sc_txsoft[sc->sc_txsnext];
2577 dmamap = txs->txs_dmamap;
2578
2579 use_tso = (m0->m_pkthdr.csum_flags &
2580 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2581
2582 /*
2583 * So says the Linux driver:
2584 * The controller does a simple calculation to make sure
2585 * there is enough room in the FIFO before initiating the
2586 * DMA for each buffer. The calc is:
2587 * 4 = ceil(buffer len / MSS)
2588 * To make sure we don't overrun the FIFO, adjust the max
2589 * buffer len if the MSS drops.
2590 */
2591 dmamap->dm_maxsegsz =
2592 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2593 ? m0->m_pkthdr.segsz << 2
2594 : WTX_MAX_LEN;
2595
2596 /*
2597 * Load the DMA map. If this fails, the packet either
2598 * didn't fit in the allotted number of segments, or we
2599 * were short on resources. For the too-many-segments
2600 * case, we simply report an error and drop the packet,
2601 * since we can't sanely copy a jumbo packet to a single
2602 * buffer.
2603 */
2604 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2605 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2606 if (error) {
2607 if (error == EFBIG) {
2608 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2609 log(LOG_ERR, "%s: Tx packet consumes too many "
2610 "DMA segments, dropping...\n",
2611 device_xname(sc->sc_dev));
2612 IFQ_DEQUEUE(&ifp->if_snd, m0);
2613 wm_dump_mbuf_chain(sc, m0);
2614 m_freem(m0);
2615 continue;
2616 }
2617 /*
2618 * Short on resources, just stop for now.
2619 */
2620 DPRINTF(WM_DEBUG_TX,
2621 ("%s: TX: dmamap load failed: %d\n",
2622 device_xname(sc->sc_dev), error));
2623 break;
2624 }
2625
2626 segs_needed = dmamap->dm_nsegs;
2627 if (use_tso) {
2628 /* For sentinel descriptor; see below. */
2629 segs_needed++;
2630 }
2631
2632 /*
2633 * Ensure we have enough descriptors free to describe
2634 * the packet. Note, we always reserve one descriptor
2635 * at the end of the ring due to the semantics of the
2636 * TDT register, plus one more in the event we need
2637 * to load offload context.
2638 */
2639 if (segs_needed > sc->sc_txfree - 2) {
2640 /*
2641 * Not enough free descriptors to transmit this
2642 * packet. We haven't committed anything yet,
2643 * so just unload the DMA map, put the packet
2644 * pack on the queue, and punt. Notify the upper
2645 * layer that there are no more slots left.
2646 */
2647 DPRINTF(WM_DEBUG_TX,
2648 ("%s: TX: need %d (%d) descriptors, have %d\n",
2649 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2650 segs_needed, sc->sc_txfree - 1));
2651 ifp->if_flags |= IFF_OACTIVE;
2652 bus_dmamap_unload(sc->sc_dmat, dmamap);
2653 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2654 break;
2655 }
2656
2657 /*
2658 * Check for 82547 Tx FIFO bug. We need to do this
2659 * once we know we can transmit the packet, since we
2660 * do some internal FIFO space accounting here.
2661 */
2662 if (sc->sc_type == WM_T_82547 &&
2663 wm_82547_txfifo_bugchk(sc, m0)) {
2664 DPRINTF(WM_DEBUG_TX,
2665 ("%s: TX: 82547 Tx FIFO bug detected\n",
2666 device_xname(sc->sc_dev)));
2667 ifp->if_flags |= IFF_OACTIVE;
2668 bus_dmamap_unload(sc->sc_dmat, dmamap);
2669 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2670 break;
2671 }
2672
2673 IFQ_DEQUEUE(&ifp->if_snd, m0);
2674
2675 /*
2676 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2677 */
2678
2679 DPRINTF(WM_DEBUG_TX,
2680 ("%s: TX: packet has %d (%d) DMA segments\n",
2681 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2682
2683 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2684
2685 /*
2686 * Store a pointer to the packet so that we can free it
2687 * later.
2688 *
2689 * Initially, we consider the number of descriptors the
2690 * packet uses the number of DMA segments. This may be
2691 * incremented by 1 if we do checksum offload (a descriptor
2692 * is used to set the checksum context).
2693 */
2694 txs->txs_mbuf = m0;
2695 txs->txs_firstdesc = sc->sc_txnext;
2696 txs->txs_ndesc = segs_needed;
2697
2698 /* Set up offload parameters for this packet. */
2699 if (m0->m_pkthdr.csum_flags &
2700 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2701 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2702 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2703 if (wm_tx_offload(sc, txs, &cksumcmd,
2704 &cksumfields) != 0) {
2705 /* Error message already displayed. */
2706 bus_dmamap_unload(sc->sc_dmat, dmamap);
2707 continue;
2708 }
2709 } else {
2710 cksumcmd = 0;
2711 cksumfields = 0;
2712 }
2713
2714 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2715
2716 /* Sync the DMA map. */
2717 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2718 BUS_DMASYNC_PREWRITE);
2719
2720 /*
2721 * Initialize the transmit descriptor.
2722 */
2723 for (nexttx = sc->sc_txnext, seg = 0;
2724 seg < dmamap->dm_nsegs; seg++) {
2725 for (seglen = dmamap->dm_segs[seg].ds_len,
2726 curaddr = dmamap->dm_segs[seg].ds_addr;
2727 seglen != 0;
2728 curaddr += curlen, seglen -= curlen,
2729 nexttx = WM_NEXTTX(sc, nexttx)) {
2730 curlen = seglen;
2731
2732 /*
2733 * So says the Linux driver:
2734 * Work around for premature descriptor
2735 * write-backs in TSO mode. Append a
2736 * 4-byte sentinel descriptor.
2737 */
2738 if (use_tso &&
2739 seg == dmamap->dm_nsegs - 1 &&
2740 curlen > 8)
2741 curlen -= 4;
2742
2743 wm_set_dma_addr(
2744 &sc->sc_txdescs[nexttx].wtx_addr,
2745 curaddr);
2746 sc->sc_txdescs[nexttx].wtx_cmdlen =
2747 htole32(cksumcmd | curlen);
2748 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2749 0;
2750 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2751 cksumfields;
2752 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2753 lasttx = nexttx;
2754
2755 DPRINTF(WM_DEBUG_TX,
2756 ("%s: TX: desc %d: low %#" PRIx64 ", "
2757 "len %#04zx\n",
2758 device_xname(sc->sc_dev), nexttx,
2759 (uint64_t)curaddr, curlen));
2760 }
2761 }
2762
2763 KASSERT(lasttx != -1);
2764
2765 /*
2766 * Set up the command byte on the last descriptor of
2767 * the packet. If we're in the interrupt delay window,
2768 * delay the interrupt.
2769 */
2770 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2771 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2772
2773 /*
2774 * If VLANs are enabled and the packet has a VLAN tag, set
2775 * up the descriptor to encapsulate the packet for us.
2776 *
2777 * This is only valid on the last descriptor of the packet.
2778 */
2779 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2780 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2781 htole32(WTX_CMD_VLE);
2782 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2783 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2784 }
2785
2786 txs->txs_lastdesc = lasttx;
2787
2788 DPRINTF(WM_DEBUG_TX,
2789 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2790 device_xname(sc->sc_dev),
2791 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2792
2793 /* Sync the descriptors we're using. */
2794 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2795 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2796
2797 /* Give the packet to the chip. */
2798 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2799
2800 DPRINTF(WM_DEBUG_TX,
2801 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2802
2803 DPRINTF(WM_DEBUG_TX,
2804 ("%s: TX: finished transmitting packet, job %d\n",
2805 device_xname(sc->sc_dev), sc->sc_txsnext));
2806
2807 /* Advance the tx pointer. */
2808 sc->sc_txfree -= txs->txs_ndesc;
2809 sc->sc_txnext = nexttx;
2810
2811 sc->sc_txsfree--;
2812 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2813
2814 /* Pass the packet to any BPF listeners. */
2815 bpf_mtap(ifp, m0);
2816 }
2817
2818 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2819 /* No more slots; notify upper layer. */
2820 ifp->if_flags |= IFF_OACTIVE;
2821 }
2822
2823 if (sc->sc_txfree != ofree) {
2824 /* Set a watchdog timer in case the chip flakes out. */
2825 ifp->if_timer = 5;
2826 }
2827 }
2828
2829 /*
2830 * wm_nq_tx_offload:
2831 *
2832 * Set up TCP/IP checksumming parameters for the
2833 * specified packet, for NEWQUEUE devices
2834 */
2835 static int
2836 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2837 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2838 {
2839 struct mbuf *m0 = txs->txs_mbuf;
2840 struct m_tag *mtag;
2841 uint32_t vl_len, mssidx, cmdc;
2842 struct ether_header *eh;
2843 int offset, iphl;
2844
2845 /*
2846 * XXX It would be nice if the mbuf pkthdr had offset
2847 * fields for the protocol headers.
2848 */
2849 *cmdlenp = 0;
2850 *fieldsp = 0;
2851
2852 eh = mtod(m0, struct ether_header *);
2853 switch (htons(eh->ether_type)) {
2854 case ETHERTYPE_IP:
2855 case ETHERTYPE_IPV6:
2856 offset = ETHER_HDR_LEN;
2857 break;
2858
2859 case ETHERTYPE_VLAN:
2860 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2861 break;
2862
2863 default:
2864 /*
2865 * Don't support this protocol or encapsulation.
2866 */
2867 *do_csum = false;
2868 return 0;
2869 }
2870 *do_csum = true;
2871 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2872 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2873
2874 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2875 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2876
2877 if ((m0->m_pkthdr.csum_flags &
2878 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2879 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2880 } else {
2881 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2882 }
2883 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2884 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2885
2886 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2887 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2888 << NQTXC_VLLEN_VLAN_SHIFT);
2889 *cmdlenp |= NQTX_CMD_VLE;
2890 }
2891
2892 mssidx = 0;
2893
2894 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2895 int hlen = offset + iphl;
2896 int tcp_hlen;
2897 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2898
2899 if (__predict_false(m0->m_len <
2900 (hlen + sizeof(struct tcphdr)))) {
2901 /*
2902 * TCP/IP headers are not in the first mbuf; we need
2903 * to do this the slow and painful way. Let's just
2904 * hope this doesn't happen very often.
2905 */
2906 struct tcphdr th;
2907
2908 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2909
2910 m_copydata(m0, hlen, sizeof(th), &th);
2911 if (v4) {
2912 struct ip ip;
2913
2914 m_copydata(m0, offset, sizeof(ip), &ip);
2915 ip.ip_len = 0;
2916 m_copyback(m0,
2917 offset + offsetof(struct ip, ip_len),
2918 sizeof(ip.ip_len), &ip.ip_len);
2919 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2920 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2921 } else {
2922 struct ip6_hdr ip6;
2923
2924 m_copydata(m0, offset, sizeof(ip6), &ip6);
2925 ip6.ip6_plen = 0;
2926 m_copyback(m0,
2927 offset + offsetof(struct ip6_hdr, ip6_plen),
2928 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2929 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2930 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2931 }
2932 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2933 sizeof(th.th_sum), &th.th_sum);
2934
2935 tcp_hlen = th.th_off << 2;
2936 } else {
2937 /*
2938 * TCP/IP headers are in the first mbuf; we can do
2939 * this the easy way.
2940 */
2941 struct tcphdr *th;
2942
2943 if (v4) {
2944 struct ip *ip =
2945 (void *)(mtod(m0, char *) + offset);
2946 th = (void *)(mtod(m0, char *) + hlen);
2947
2948 ip->ip_len = 0;
2949 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2950 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2951 } else {
2952 struct ip6_hdr *ip6 =
2953 (void *)(mtod(m0, char *) + offset);
2954 th = (void *)(mtod(m0, char *) + hlen);
2955
2956 ip6->ip6_plen = 0;
2957 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2958 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2959 }
2960 tcp_hlen = th->th_off << 2;
2961 }
2962 hlen += tcp_hlen;
2963 *cmdlenp |= NQTX_CMD_TSE;
2964
2965 if (v4) {
2966 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2967 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2968 } else {
2969 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2970 *fieldsp |= NQTXD_FIELDS_TUXSM;
2971 }
2972 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2973 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2974 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2975 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2976 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2977 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2978 } else {
2979 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2980 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2981 }
2982
2983 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2984 *fieldsp |= NQTXD_FIELDS_IXSM;
2985 cmdc |= NQTXC_CMD_IP4;
2986 }
2987
2988 if (m0->m_pkthdr.csum_flags &
2989 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2990 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2991 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2992 cmdc |= NQTXC_CMD_TCP;
2993 } else {
2994 cmdc |= NQTXC_CMD_UDP;
2995 }
2996 cmdc |= NQTXC_CMD_IP4;
2997 *fieldsp |= NQTXD_FIELDS_TUXSM;
2998 }
2999 if (m0->m_pkthdr.csum_flags &
3000 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3001 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3002 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3003 cmdc |= NQTXC_CMD_TCP;
3004 } else {
3005 cmdc |= NQTXC_CMD_UDP;
3006 }
3007 cmdc |= NQTXC_CMD_IP6;
3008 *fieldsp |= NQTXD_FIELDS_TUXSM;
3009 }
3010
3011 /* Fill in the context descriptor. */
3012 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3013 htole32(vl_len);
3014 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3015 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3016 htole32(cmdc);
3017 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3018 htole32(mssidx);
3019 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3020 DPRINTF(WM_DEBUG_TX,
3021 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3022 sc->sc_txnext, 0, vl_len));
3023 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3024 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3025 txs->txs_ndesc++;
3026 return 0;
3027 }
3028
3029 /*
3030 * wm_nq_start: [ifnet interface function]
3031 *
3032 * Start packet transmission on the interface for NEWQUEUE devices
3033 */
3034 static void
3035 wm_nq_start(struct ifnet *ifp)
3036 {
3037 struct wm_softc *sc = ifp->if_softc;
3038 struct mbuf *m0;
3039 struct m_tag *mtag;
3040 struct wm_txsoft *txs;
3041 bus_dmamap_t dmamap;
3042 int error, nexttx, lasttx = -1, seg, segs_needed;
3043 bool do_csum, sent;
3044
3045 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3046 return;
3047
3048 sent = false;
3049
3050 /*
3051 * Loop through the send queue, setting up transmit descriptors
3052 * until we drain the queue, or use up all available transmit
3053 * descriptors.
3054 */
3055 for (;;) {
3056 /* Grab a packet off the queue. */
3057 IFQ_POLL(&ifp->if_snd, m0);
3058 if (m0 == NULL)
3059 break;
3060
3061 DPRINTF(WM_DEBUG_TX,
3062 ("%s: TX: have packet to transmit: %p\n",
3063 device_xname(sc->sc_dev), m0));
3064
3065 /* Get a work queue entry. */
3066 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3067 wm_txintr(sc);
3068 if (sc->sc_txsfree == 0) {
3069 DPRINTF(WM_DEBUG_TX,
3070 ("%s: TX: no free job descriptors\n",
3071 device_xname(sc->sc_dev)));
3072 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3073 break;
3074 }
3075 }
3076
3077 txs = &sc->sc_txsoft[sc->sc_txsnext];
3078 dmamap = txs->txs_dmamap;
3079
3080 /*
3081 * Load the DMA map. If this fails, the packet either
3082 * didn't fit in the allotted number of segments, or we
3083 * were short on resources. For the too-many-segments
3084 * case, we simply report an error and drop the packet,
3085 * since we can't sanely copy a jumbo packet to a single
3086 * buffer.
3087 */
3088 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3089 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3090 if (error) {
3091 if (error == EFBIG) {
3092 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3093 log(LOG_ERR, "%s: Tx packet consumes too many "
3094 "DMA segments, dropping...\n",
3095 device_xname(sc->sc_dev));
3096 IFQ_DEQUEUE(&ifp->if_snd, m0);
3097 wm_dump_mbuf_chain(sc, m0);
3098 m_freem(m0);
3099 continue;
3100 }
3101 /*
3102 * Short on resources, just stop for now.
3103 */
3104 DPRINTF(WM_DEBUG_TX,
3105 ("%s: TX: dmamap load failed: %d\n",
3106 device_xname(sc->sc_dev), error));
3107 break;
3108 }
3109
3110 segs_needed = dmamap->dm_nsegs;
3111
3112 /*
3113 * Ensure we have enough descriptors free to describe
3114 * the packet. Note, we always reserve one descriptor
3115 * at the end of the ring due to the semantics of the
3116 * TDT register, plus one more in the event we need
3117 * to load offload context.
3118 */
3119 if (segs_needed > sc->sc_txfree - 2) {
3120 /*
3121 * Not enough free descriptors to transmit this
3122 * packet. We haven't committed anything yet,
3123 * so just unload the DMA map, put the packet
3124 * pack on the queue, and punt. Notify the upper
3125 * layer that there are no more slots left.
3126 */
3127 DPRINTF(WM_DEBUG_TX,
3128 ("%s: TX: need %d (%d) descriptors, have %d\n",
3129 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3130 segs_needed, sc->sc_txfree - 1));
3131 ifp->if_flags |= IFF_OACTIVE;
3132 bus_dmamap_unload(sc->sc_dmat, dmamap);
3133 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3134 break;
3135 }
3136
3137 IFQ_DEQUEUE(&ifp->if_snd, m0);
3138
3139 /*
3140 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3141 */
3142
3143 DPRINTF(WM_DEBUG_TX,
3144 ("%s: TX: packet has %d (%d) DMA segments\n",
3145 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3146
3147 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3148
3149 /*
3150 * Store a pointer to the packet so that we can free it
3151 * later.
3152 *
3153 * Initially, we consider the number of descriptors the
3154 * packet uses the number of DMA segments. This may be
3155 * incremented by 1 if we do checksum offload (a descriptor
3156 * is used to set the checksum context).
3157 */
3158 txs->txs_mbuf = m0;
3159 txs->txs_firstdesc = sc->sc_txnext;
3160 txs->txs_ndesc = segs_needed;
3161
3162 /* Set up offload parameters for this packet. */
3163 uint32_t cmdlen, fields, dcmdlen;
3164 if (m0->m_pkthdr.csum_flags &
3165 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3166 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3167 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3168 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3169 &do_csum) != 0) {
3170 /* Error message already displayed. */
3171 bus_dmamap_unload(sc->sc_dmat, dmamap);
3172 continue;
3173 }
3174 } else {
3175 do_csum = false;
3176 cmdlen = 0;
3177 fields = 0;
3178 }
3179
3180 /* Sync the DMA map. */
3181 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3182 BUS_DMASYNC_PREWRITE);
3183
3184 /*
3185 * Initialize the first transmit descriptor.
3186 */
3187 nexttx = sc->sc_txnext;
3188 if (!do_csum) {
3189 /* setup a legacy descriptor */
3190 wm_set_dma_addr(
3191 &sc->sc_txdescs[nexttx].wtx_addr,
3192 dmamap->dm_segs[0].ds_addr);
3193 sc->sc_txdescs[nexttx].wtx_cmdlen =
3194 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3195 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3196 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3197 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3198 NULL) {
3199 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3200 htole32(WTX_CMD_VLE);
3201 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3202 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3203 } else {
3204 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3205 }
3206 dcmdlen = 0;
3207 } else {
3208 /* setup an advanced data descriptor */
3209 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3210 htole64(dmamap->dm_segs[0].ds_addr);
3211 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3212 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3213 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3214 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3215 htole32(fields);
3216 DPRINTF(WM_DEBUG_TX,
3217 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3218 device_xname(sc->sc_dev), nexttx,
3219 (uint64_t)dmamap->dm_segs[0].ds_addr));
3220 DPRINTF(WM_DEBUG_TX,
3221 ("\t 0x%08x%08x\n", fields,
3222 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3223 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3224 }
3225
3226 lasttx = nexttx;
3227 nexttx = WM_NEXTTX(sc, nexttx);
3228 /*
3229 * fill in the next descriptors. legacy or adcanced format
3230 * is the same here
3231 */
3232 for (seg = 1; seg < dmamap->dm_nsegs;
3233 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3234 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3235 htole64(dmamap->dm_segs[seg].ds_addr);
3236 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3237 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3238 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3239 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3240 lasttx = nexttx;
3241
3242 DPRINTF(WM_DEBUG_TX,
3243 ("%s: TX: desc %d: %#" PRIx64 ", "
3244 "len %#04zx\n",
3245 device_xname(sc->sc_dev), nexttx,
3246 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3247 dmamap->dm_segs[seg].ds_len));
3248 }
3249
3250 KASSERT(lasttx != -1);
3251
3252 /*
3253 * Set up the command byte on the last descriptor of
3254 * the packet. If we're in the interrupt delay window,
3255 * delay the interrupt.
3256 */
3257 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3258 (NQTX_CMD_EOP | NQTX_CMD_RS));
3259 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3260 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3261
3262 txs->txs_lastdesc = lasttx;
3263
3264 DPRINTF(WM_DEBUG_TX,
3265 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3266 device_xname(sc->sc_dev),
3267 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3268
3269 /* Sync the descriptors we're using. */
3270 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3271 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3272
3273 /* Give the packet to the chip. */
3274 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3275 sent = true;
3276
3277 DPRINTF(WM_DEBUG_TX,
3278 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3279
3280 DPRINTF(WM_DEBUG_TX,
3281 ("%s: TX: finished transmitting packet, job %d\n",
3282 device_xname(sc->sc_dev), sc->sc_txsnext));
3283
3284 /* Advance the tx pointer. */
3285 sc->sc_txfree -= txs->txs_ndesc;
3286 sc->sc_txnext = nexttx;
3287
3288 sc->sc_txsfree--;
3289 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3290
3291 /* Pass the packet to any BPF listeners. */
3292 bpf_mtap(ifp, m0);
3293 }
3294
3295 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3296 /* No more slots; notify upper layer. */
3297 ifp->if_flags |= IFF_OACTIVE;
3298 }
3299
3300 if (sent) {
3301 /* Set a watchdog timer in case the chip flakes out. */
3302 ifp->if_timer = 5;
3303 }
3304 }
3305
3306 /*
3307 * wm_watchdog: [ifnet interface function]
3308 *
3309 * Watchdog timer handler.
3310 */
3311 static void
3312 wm_watchdog(struct ifnet *ifp)
3313 {
3314 struct wm_softc *sc = ifp->if_softc;
3315
3316 /*
3317 * Since we're using delayed interrupts, sweep up
3318 * before we report an error.
3319 */
3320 wm_txintr(sc);
3321
3322 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3323 #ifdef WM_DEBUG
3324 int i, j;
3325 struct wm_txsoft *txs;
3326 #endif
3327 log(LOG_ERR,
3328 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3329 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3330 sc->sc_txnext);
3331 ifp->if_oerrors++;
3332 #ifdef WM_DEBUG
3333 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3334 i = WM_NEXTTXS(sc, i)) {
3335 txs = &sc->sc_txsoft[i];
3336 printf("txs %d tx %d -> %d\n",
3337 i, txs->txs_firstdesc, txs->txs_lastdesc);
3338 for (j = txs->txs_firstdesc; ;
3339 j = WM_NEXTTX(sc, j)) {
3340 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3341 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3342 printf("\t %#08x%08x\n",
3343 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3344 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3345 if (j == txs->txs_lastdesc)
3346 break;
3347 }
3348 }
3349 #endif
3350 /* Reset the interface. */
3351 (void) wm_init(ifp);
3352 }
3353
3354 /* Try to get more packets going. */
3355 ifp->if_start(ifp);
3356 }
3357
3358 static int
3359 wm_ifflags_cb(struct ethercom *ec)
3360 {
3361 struct ifnet *ifp = &ec->ec_if;
3362 struct wm_softc *sc = ifp->if_softc;
3363 int change = ifp->if_flags ^ sc->sc_if_flags;
3364
3365 if (change != 0)
3366 sc->sc_if_flags = ifp->if_flags;
3367
3368 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3369 return ENETRESET;
3370
3371 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3372 wm_set_filter(sc);
3373
3374 wm_set_vlan(sc);
3375
3376 return 0;
3377 }
3378
3379 /*
3380 * wm_ioctl: [ifnet interface function]
3381 *
3382 * Handle control requests from the operator.
3383 */
3384 static int
3385 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3386 {
3387 struct wm_softc *sc = ifp->if_softc;
3388 struct ifreq *ifr = (struct ifreq *) data;
3389 struct ifaddr *ifa = (struct ifaddr *)data;
3390 struct sockaddr_dl *sdl;
3391 int s, error;
3392
3393 s = splnet();
3394
3395 switch (cmd) {
3396 case SIOCSIFMEDIA:
3397 case SIOCGIFMEDIA:
3398 /* Flow control requires full-duplex mode. */
3399 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3400 (ifr->ifr_media & IFM_FDX) == 0)
3401 ifr->ifr_media &= ~IFM_ETH_FMASK;
3402 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3403 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3404 /* We can do both TXPAUSE and RXPAUSE. */
3405 ifr->ifr_media |=
3406 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3407 }
3408 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3409 }
3410 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3411 break;
3412 case SIOCINITIFADDR:
3413 if (ifa->ifa_addr->sa_family == AF_LINK) {
3414 sdl = satosdl(ifp->if_dl->ifa_addr);
3415 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3416 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3417 /* unicast address is first multicast entry */
3418 wm_set_filter(sc);
3419 error = 0;
3420 break;
3421 }
3422 /*FALLTHROUGH*/
3423 default:
3424 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3425 break;
3426
3427 error = 0;
3428
3429 if (cmd == SIOCSIFCAP)
3430 error = (*ifp->if_init)(ifp);
3431 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3432 ;
3433 else if (ifp->if_flags & IFF_RUNNING) {
3434 /*
3435 * Multicast list has changed; set the hardware filter
3436 * accordingly.
3437 */
3438 wm_set_filter(sc);
3439 }
3440 break;
3441 }
3442
3443 /* Try to get more packets going. */
3444 ifp->if_start(ifp);
3445
3446 splx(s);
3447 return error;
3448 }
3449
3450 /*
3451 * wm_intr:
3452 *
3453 * Interrupt service routine.
3454 */
3455 static int
3456 wm_intr(void *arg)
3457 {
3458 struct wm_softc *sc = arg;
3459 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3460 uint32_t icr;
3461 int handled = 0;
3462
3463 while (1 /* CONSTCOND */) {
3464 icr = CSR_READ(sc, WMREG_ICR);
3465 if ((icr & sc->sc_icr) == 0)
3466 break;
3467 rnd_add_uint32(&sc->rnd_source, icr);
3468
3469 handled = 1;
3470
3471 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3472 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3473 DPRINTF(WM_DEBUG_RX,
3474 ("%s: RX: got Rx intr 0x%08x\n",
3475 device_xname(sc->sc_dev),
3476 icr & (ICR_RXDMT0|ICR_RXT0)));
3477 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3478 }
3479 #endif
3480 wm_rxintr(sc);
3481
3482 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3483 if (icr & ICR_TXDW) {
3484 DPRINTF(WM_DEBUG_TX,
3485 ("%s: TX: got TXDW interrupt\n",
3486 device_xname(sc->sc_dev)));
3487 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3488 }
3489 #endif
3490 wm_txintr(sc);
3491
3492 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3493 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3494 wm_linkintr(sc, icr);
3495 }
3496
3497 if (icr & ICR_RXO) {
3498 #if defined(WM_DEBUG)
3499 log(LOG_WARNING, "%s: Receive overrun\n",
3500 device_xname(sc->sc_dev));
3501 #endif /* defined(WM_DEBUG) */
3502 }
3503 }
3504
3505 if (handled) {
3506 /* Try to get more packets going. */
3507 ifp->if_start(ifp);
3508 }
3509
3510 return handled;
3511 }
3512
3513 /*
3514 * wm_txintr:
3515 *
3516 * Helper; handle transmit interrupts.
3517 */
3518 static void
3519 wm_txintr(struct wm_softc *sc)
3520 {
3521 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3522 struct wm_txsoft *txs;
3523 uint8_t status;
3524 int i;
3525
3526 ifp->if_flags &= ~IFF_OACTIVE;
3527
3528 /*
3529 * Go through the Tx list and free mbufs for those
3530 * frames which have been transmitted.
3531 */
3532 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3533 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3534 txs = &sc->sc_txsoft[i];
3535
3536 DPRINTF(WM_DEBUG_TX,
3537 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3538
3539 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3540 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3541
3542 status =
3543 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3544 if ((status & WTX_ST_DD) == 0) {
3545 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3546 BUS_DMASYNC_PREREAD);
3547 break;
3548 }
3549
3550 DPRINTF(WM_DEBUG_TX,
3551 ("%s: TX: job %d done: descs %d..%d\n",
3552 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3553 txs->txs_lastdesc));
3554
3555 /*
3556 * XXX We should probably be using the statistics
3557 * XXX registers, but I don't know if they exist
3558 * XXX on chips before the i82544.
3559 */
3560
3561 #ifdef WM_EVENT_COUNTERS
3562 if (status & WTX_ST_TU)
3563 WM_EVCNT_INCR(&sc->sc_ev_tu);
3564 #endif /* WM_EVENT_COUNTERS */
3565
3566 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3567 ifp->if_oerrors++;
3568 if (status & WTX_ST_LC)
3569 log(LOG_WARNING, "%s: late collision\n",
3570 device_xname(sc->sc_dev));
3571 else if (status & WTX_ST_EC) {
3572 ifp->if_collisions += 16;
3573 log(LOG_WARNING, "%s: excessive collisions\n",
3574 device_xname(sc->sc_dev));
3575 }
3576 } else
3577 ifp->if_opackets++;
3578
3579 sc->sc_txfree += txs->txs_ndesc;
3580 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3581 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3582 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3583 m_freem(txs->txs_mbuf);
3584 txs->txs_mbuf = NULL;
3585 }
3586
3587 /* Update the dirty transmit buffer pointer. */
3588 sc->sc_txsdirty = i;
3589 DPRINTF(WM_DEBUG_TX,
3590 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3591
3592 /*
3593 * If there are no more pending transmissions, cancel the watchdog
3594 * timer.
3595 */
3596 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3597 ifp->if_timer = 0;
3598 }
3599
3600 /*
3601 * wm_rxintr:
3602 *
3603 * Helper; handle receive interrupts.
3604 */
3605 static void
3606 wm_rxintr(struct wm_softc *sc)
3607 {
3608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3609 struct wm_rxsoft *rxs;
3610 struct mbuf *m;
3611 int i, len;
3612 uint8_t status, errors;
3613 uint16_t vlantag;
3614
3615 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3616 rxs = &sc->sc_rxsoft[i];
3617
3618 DPRINTF(WM_DEBUG_RX,
3619 ("%s: RX: checking descriptor %d\n",
3620 device_xname(sc->sc_dev), i));
3621
3622 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3623
3624 status = sc->sc_rxdescs[i].wrx_status;
3625 errors = sc->sc_rxdescs[i].wrx_errors;
3626 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3627 vlantag = sc->sc_rxdescs[i].wrx_special;
3628
3629 if ((status & WRX_ST_DD) == 0) {
3630 /*
3631 * We have processed all of the receive descriptors.
3632 */
3633 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3634 break;
3635 }
3636
3637 if (__predict_false(sc->sc_rxdiscard)) {
3638 DPRINTF(WM_DEBUG_RX,
3639 ("%s: RX: discarding contents of descriptor %d\n",
3640 device_xname(sc->sc_dev), i));
3641 WM_INIT_RXDESC(sc, i);
3642 if (status & WRX_ST_EOP) {
3643 /* Reset our state. */
3644 DPRINTF(WM_DEBUG_RX,
3645 ("%s: RX: resetting rxdiscard -> 0\n",
3646 device_xname(sc->sc_dev)));
3647 sc->sc_rxdiscard = 0;
3648 }
3649 continue;
3650 }
3651
3652 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3653 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3654
3655 m = rxs->rxs_mbuf;
3656
3657 /*
3658 * Add a new receive buffer to the ring, unless of
3659 * course the length is zero. Treat the latter as a
3660 * failed mapping.
3661 */
3662 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3663 /*
3664 * Failed, throw away what we've done so
3665 * far, and discard the rest of the packet.
3666 */
3667 ifp->if_ierrors++;
3668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3670 WM_INIT_RXDESC(sc, i);
3671 if ((status & WRX_ST_EOP) == 0)
3672 sc->sc_rxdiscard = 1;
3673 if (sc->sc_rxhead != NULL)
3674 m_freem(sc->sc_rxhead);
3675 WM_RXCHAIN_RESET(sc);
3676 DPRINTF(WM_DEBUG_RX,
3677 ("%s: RX: Rx buffer allocation failed, "
3678 "dropping packet%s\n", device_xname(sc->sc_dev),
3679 sc->sc_rxdiscard ? " (discard)" : ""));
3680 continue;
3681 }
3682
3683 m->m_len = len;
3684 sc->sc_rxlen += len;
3685 DPRINTF(WM_DEBUG_RX,
3686 ("%s: RX: buffer at %p len %d\n",
3687 device_xname(sc->sc_dev), m->m_data, len));
3688
3689 /*
3690 * If this is not the end of the packet, keep
3691 * looking.
3692 */
3693 if ((status & WRX_ST_EOP) == 0) {
3694 WM_RXCHAIN_LINK(sc, m);
3695 DPRINTF(WM_DEBUG_RX,
3696 ("%s: RX: not yet EOP, rxlen -> %d\n",
3697 device_xname(sc->sc_dev), sc->sc_rxlen));
3698 continue;
3699 }
3700
3701 /*
3702 * Okay, we have the entire packet now. The chip is
3703 * configured to include the FCS except I350 and I21[01]
3704 * (not all chips can be configured to strip it),
3705 * so we need to trim it.
3706 * May need to adjust length of previous mbuf in the
3707 * chain if the current mbuf is too short.
3708 * For an eratta, the RCTL_SECRC bit in RCTL register
3709 * is always set in I350, so we don't trim it.
3710 */
3711 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210)
3712 && (sc->sc_type != WM_T_I211)) {
3713 if (m->m_len < ETHER_CRC_LEN) {
3714 sc->sc_rxtail->m_len
3715 -= (ETHER_CRC_LEN - m->m_len);
3716 m->m_len = 0;
3717 } else
3718 m->m_len -= ETHER_CRC_LEN;
3719 len = sc->sc_rxlen - ETHER_CRC_LEN;
3720 } else
3721 len = sc->sc_rxlen;
3722
3723 WM_RXCHAIN_LINK(sc, m);
3724
3725 *sc->sc_rxtailp = NULL;
3726 m = sc->sc_rxhead;
3727
3728 WM_RXCHAIN_RESET(sc);
3729
3730 DPRINTF(WM_DEBUG_RX,
3731 ("%s: RX: have entire packet, len -> %d\n",
3732 device_xname(sc->sc_dev), len));
3733
3734 /*
3735 * If an error occurred, update stats and drop the packet.
3736 */
3737 if (errors &
3738 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3739 if (errors & WRX_ER_SE)
3740 log(LOG_WARNING, "%s: symbol error\n",
3741 device_xname(sc->sc_dev));
3742 else if (errors & WRX_ER_SEQ)
3743 log(LOG_WARNING, "%s: receive sequence error\n",
3744 device_xname(sc->sc_dev));
3745 else if (errors & WRX_ER_CE)
3746 log(LOG_WARNING, "%s: CRC error\n",
3747 device_xname(sc->sc_dev));
3748 m_freem(m);
3749 continue;
3750 }
3751
3752 /*
3753 * No errors. Receive the packet.
3754 */
3755 m->m_pkthdr.rcvif = ifp;
3756 m->m_pkthdr.len = len;
3757
3758 /*
3759 * If VLANs are enabled, VLAN packets have been unwrapped
3760 * for us. Associate the tag with the packet.
3761 */
3762 if ((status & WRX_ST_VP) != 0) {
3763 VLAN_INPUT_TAG(ifp, m,
3764 le16toh(vlantag),
3765 continue);
3766 }
3767
3768 /*
3769 * Set up checksum info for this packet.
3770 */
3771 if ((status & WRX_ST_IXSM) == 0) {
3772 if (status & WRX_ST_IPCS) {
3773 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3774 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3775 if (errors & WRX_ER_IPE)
3776 m->m_pkthdr.csum_flags |=
3777 M_CSUM_IPv4_BAD;
3778 }
3779 if (status & WRX_ST_TCPCS) {
3780 /*
3781 * Note: we don't know if this was TCP or UDP,
3782 * so we just set both bits, and expect the
3783 * upper layers to deal.
3784 */
3785 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3786 m->m_pkthdr.csum_flags |=
3787 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3788 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3789 if (errors & WRX_ER_TCPE)
3790 m->m_pkthdr.csum_flags |=
3791 M_CSUM_TCP_UDP_BAD;
3792 }
3793 }
3794
3795 ifp->if_ipackets++;
3796
3797 /* Pass this up to any BPF listeners. */
3798 bpf_mtap(ifp, m);
3799
3800 /* Pass it on. */
3801 (*ifp->if_input)(ifp, m);
3802 }
3803
3804 /* Update the receive pointer. */
3805 sc->sc_rxptr = i;
3806
3807 DPRINTF(WM_DEBUG_RX,
3808 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3809 }
3810
3811 /*
3812 * wm_linkintr_gmii:
3813 *
3814 * Helper; handle link interrupts for GMII.
3815 */
3816 static void
3817 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3818 {
3819
3820 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3821 __func__));
3822
3823 if (icr & ICR_LSC) {
3824 DPRINTF(WM_DEBUG_LINK,
3825 ("%s: LINK: LSC -> mii_tick\n",
3826 device_xname(sc->sc_dev)));
3827 mii_tick(&sc->sc_mii);
3828 if (sc->sc_type == WM_T_82543) {
3829 int miistatus, active;
3830
3831 /*
3832 * With 82543, we need to force speed and
3833 * duplex on the MAC equal to what the PHY
3834 * speed and duplex configuration is.
3835 */
3836 miistatus = sc->sc_mii.mii_media_status;
3837
3838 if (miistatus & IFM_ACTIVE) {
3839 active = sc->sc_mii.mii_media_active;
3840 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3841 switch (IFM_SUBTYPE(active)) {
3842 case IFM_10_T:
3843 sc->sc_ctrl |= CTRL_SPEED_10;
3844 break;
3845 case IFM_100_TX:
3846 sc->sc_ctrl |= CTRL_SPEED_100;
3847 break;
3848 case IFM_1000_T:
3849 sc->sc_ctrl |= CTRL_SPEED_1000;
3850 break;
3851 default:
3852 /*
3853 * fiber?
3854 * Shoud not enter here.
3855 */
3856 printf("unknown media (%x)\n",
3857 active);
3858 break;
3859 }
3860 if (active & IFM_FDX)
3861 sc->sc_ctrl |= CTRL_FD;
3862 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3863 }
3864 } else if ((sc->sc_type == WM_T_ICH8)
3865 && (sc->sc_phytype == WMPHY_IGP_3)) {
3866 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3867 } else if (sc->sc_type == WM_T_PCH) {
3868 wm_k1_gig_workaround_hv(sc,
3869 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3870 }
3871
3872 if ((sc->sc_phytype == WMPHY_82578)
3873 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3874 == IFM_1000_T)) {
3875
3876 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3877 delay(200*1000); /* XXX too big */
3878
3879 /* Link stall fix for link up */
3880 wm_gmii_hv_writereg(sc->sc_dev, 1,
3881 HV_MUX_DATA_CTRL,
3882 HV_MUX_DATA_CTRL_GEN_TO_MAC
3883 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3884 wm_gmii_hv_writereg(sc->sc_dev, 1,
3885 HV_MUX_DATA_CTRL,
3886 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3887 }
3888 }
3889 } else if (icr & ICR_RXSEQ) {
3890 DPRINTF(WM_DEBUG_LINK,
3891 ("%s: LINK Receive sequence error\n",
3892 device_xname(sc->sc_dev)));
3893 }
3894 }
3895
3896 /*
3897 * wm_linkintr_tbi:
3898 *
3899 * Helper; handle link interrupts for TBI mode.
3900 */
3901 static void
3902 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3903 {
3904 uint32_t status;
3905
3906 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3907 __func__));
3908
3909 status = CSR_READ(sc, WMREG_STATUS);
3910 if (icr & ICR_LSC) {
3911 if (status & STATUS_LU) {
3912 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3913 device_xname(sc->sc_dev),
3914 (status & STATUS_FD) ? "FDX" : "HDX"));
3915 /*
3916 * NOTE: CTRL will update TFCE and RFCE automatically,
3917 * so we should update sc->sc_ctrl
3918 */
3919
3920 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3921 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3922 sc->sc_fcrtl &= ~FCRTL_XONE;
3923 if (status & STATUS_FD)
3924 sc->sc_tctl |=
3925 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3926 else
3927 sc->sc_tctl |=
3928 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3929 if (sc->sc_ctrl & CTRL_TFCE)
3930 sc->sc_fcrtl |= FCRTL_XONE;
3931 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3932 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3933 WMREG_OLD_FCRTL : WMREG_FCRTL,
3934 sc->sc_fcrtl);
3935 sc->sc_tbi_linkup = 1;
3936 } else {
3937 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3938 device_xname(sc->sc_dev)));
3939 sc->sc_tbi_linkup = 0;
3940 }
3941 wm_tbi_set_linkled(sc);
3942 } else if (icr & ICR_RXCFG) {
3943 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3944 device_xname(sc->sc_dev)));
3945 sc->sc_tbi_nrxcfg++;
3946 wm_check_for_link(sc);
3947 } else if (icr & ICR_RXSEQ) {
3948 DPRINTF(WM_DEBUG_LINK,
3949 ("%s: LINK: Receive sequence error\n",
3950 device_xname(sc->sc_dev)));
3951 }
3952 }
3953
3954 /*
3955 * wm_linkintr:
3956 *
3957 * Helper; handle link interrupts.
3958 */
3959 static void
3960 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3961 {
3962
3963 if (sc->sc_flags & WM_F_HAS_MII)
3964 wm_linkintr_gmii(sc, icr);
3965 else
3966 wm_linkintr_tbi(sc, icr);
3967 }
3968
3969 /*
3970 * wm_tick:
3971 *
3972 * One second timer, used to check link status, sweep up
3973 * completed transmit jobs, etc.
3974 */
3975 static void
3976 wm_tick(void *arg)
3977 {
3978 struct wm_softc *sc = arg;
3979 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3980 int s;
3981
3982 s = splnet();
3983
3984 if (sc->sc_type >= WM_T_82542_2_1) {
3985 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3986 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3987 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3988 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3989 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3990 }
3991
3992 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3993 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3994 + CSR_READ(sc, WMREG_CRCERRS)
3995 + CSR_READ(sc, WMREG_ALGNERRC)
3996 + CSR_READ(sc, WMREG_SYMERRC)
3997 + CSR_READ(sc, WMREG_RXERRC)
3998 + CSR_READ(sc, WMREG_SEC)
3999 + CSR_READ(sc, WMREG_CEXTERR)
4000 + CSR_READ(sc, WMREG_RLEC);
4001 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4002
4003 if (sc->sc_flags & WM_F_HAS_MII)
4004 mii_tick(&sc->sc_mii);
4005 else
4006 wm_tbi_check_link(sc);
4007
4008 splx(s);
4009
4010 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4011 }
4012
4013 /*
4014 * wm_reset:
4015 *
4016 * Reset the i82542 chip.
4017 */
4018 static void
4019 wm_reset(struct wm_softc *sc)
4020 {
4021 int phy_reset = 0;
4022 uint32_t reg, mask;
4023 int i;
4024
4025 /*
4026 * Allocate on-chip memory according to the MTU size.
4027 * The Packet Buffer Allocation register must be written
4028 * before the chip is reset.
4029 */
4030 switch (sc->sc_type) {
4031 case WM_T_82547:
4032 case WM_T_82547_2:
4033 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4034 PBA_22K : PBA_30K;
4035 sc->sc_txfifo_head = 0;
4036 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4037 sc->sc_txfifo_size =
4038 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4039 sc->sc_txfifo_stall = 0;
4040 break;
4041 case WM_T_82571:
4042 case WM_T_82572:
4043 case WM_T_82575: /* XXX need special handing for jumbo frames */
4044 case WM_T_I350:
4045 case WM_T_80003:
4046 sc->sc_pba = PBA_32K;
4047 break;
4048 case WM_T_82580:
4049 case WM_T_82580ER:
4050 sc->sc_pba = PBA_35K;
4051 break;
4052 case WM_T_I210:
4053 case WM_T_I211:
4054 sc->sc_pba = PBA_34K;
4055 break;
4056 case WM_T_82576:
4057 sc->sc_pba = PBA_64K;
4058 break;
4059 case WM_T_82573:
4060 sc->sc_pba = PBA_12K;
4061 break;
4062 case WM_T_82574:
4063 case WM_T_82583:
4064 sc->sc_pba = PBA_20K;
4065 break;
4066 case WM_T_ICH8:
4067 sc->sc_pba = PBA_8K;
4068 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4069 break;
4070 case WM_T_ICH9:
4071 case WM_T_ICH10:
4072 sc->sc_pba = PBA_10K;
4073 break;
4074 case WM_T_PCH:
4075 case WM_T_PCH2:
4076 case WM_T_PCH_LPT:
4077 sc->sc_pba = PBA_26K;
4078 break;
4079 default:
4080 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4081 PBA_40K : PBA_48K;
4082 break;
4083 }
4084 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4085
4086 /* Prevent the PCI-E bus from sticking */
4087 if (sc->sc_flags & WM_F_PCIE) {
4088 int timeout = 800;
4089
4090 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4091 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4092
4093 while (timeout--) {
4094 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4095 == 0)
4096 break;
4097 delay(100);
4098 }
4099 }
4100
4101 /* Set the completion timeout for interface */
4102 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4103 || (sc->sc_type == WM_T_I350))
4104 wm_set_pcie_completion_timeout(sc);
4105
4106 /* Clear interrupt */
4107 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4108
4109 /* Stop the transmit and receive processes. */
4110 CSR_WRITE(sc, WMREG_RCTL, 0);
4111 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4112 sc->sc_rctl &= ~RCTL_EN;
4113
4114 /* XXX set_tbi_sbp_82543() */
4115
4116 delay(10*1000);
4117
4118 /* Must acquire the MDIO ownership before MAC reset */
4119 switch (sc->sc_type) {
4120 case WM_T_82573:
4121 case WM_T_82574:
4122 case WM_T_82583:
4123 i = 0;
4124 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4125 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4126 do {
4127 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4128 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4129 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4130 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4131 break;
4132 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4133 delay(2*1000);
4134 i++;
4135 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4136 break;
4137 default:
4138 break;
4139 }
4140
4141 /*
4142 * 82541 Errata 29? & 82547 Errata 28?
4143 * See also the description about PHY_RST bit in CTRL register
4144 * in 8254x_GBe_SDM.pdf.
4145 */
4146 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4147 CSR_WRITE(sc, WMREG_CTRL,
4148 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4149 delay(5000);
4150 }
4151
4152 switch (sc->sc_type) {
4153 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4154 case WM_T_82541:
4155 case WM_T_82541_2:
4156 case WM_T_82547:
4157 case WM_T_82547_2:
4158 /*
4159 * On some chipsets, a reset through a memory-mapped write
4160 * cycle can cause the chip to reset before completing the
4161 * write cycle. This causes major headache that can be
4162 * avoided by issuing the reset via indirect register writes
4163 * through I/O space.
4164 *
4165 * So, if we successfully mapped the I/O BAR at attach time,
4166 * use that. Otherwise, try our luck with a memory-mapped
4167 * reset.
4168 */
4169 if (sc->sc_flags & WM_F_IOH_VALID)
4170 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4171 else
4172 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4173 break;
4174 case WM_T_82545_3:
4175 case WM_T_82546_3:
4176 /* Use the shadow control register on these chips. */
4177 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4178 break;
4179 case WM_T_80003:
4180 mask = swfwphysem[sc->sc_funcid];
4181 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4182 wm_get_swfw_semaphore(sc, mask);
4183 CSR_WRITE(sc, WMREG_CTRL, reg);
4184 wm_put_swfw_semaphore(sc, mask);
4185 break;
4186 case WM_T_ICH8:
4187 case WM_T_ICH9:
4188 case WM_T_ICH10:
4189 case WM_T_PCH:
4190 case WM_T_PCH2:
4191 case WM_T_PCH_LPT:
4192 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4193 if (wm_check_reset_block(sc) == 0) {
4194 /*
4195 * Gate automatic PHY configuration by hardware on
4196 * non-managed 82579
4197 */
4198 if ((sc->sc_type == WM_T_PCH2)
4199 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4200 != 0))
4201 wm_gate_hw_phy_config_ich8lan(sc, 1);
4202
4203
4204 reg |= CTRL_PHY_RESET;
4205 phy_reset = 1;
4206 }
4207 wm_get_swfwhw_semaphore(sc);
4208 CSR_WRITE(sc, WMREG_CTRL, reg);
4209 delay(20*1000);
4210 wm_put_swfwhw_semaphore(sc);
4211 break;
4212 case WM_T_82542_2_0:
4213 case WM_T_82542_2_1:
4214 case WM_T_82543:
4215 case WM_T_82540:
4216 case WM_T_82545:
4217 case WM_T_82546:
4218 case WM_T_82571:
4219 case WM_T_82572:
4220 case WM_T_82573:
4221 case WM_T_82574:
4222 case WM_T_82575:
4223 case WM_T_82576:
4224 case WM_T_82580:
4225 case WM_T_82580ER:
4226 case WM_T_82583:
4227 case WM_T_I350:
4228 case WM_T_I210:
4229 case WM_T_I211:
4230 default:
4231 /* Everything else can safely use the documented method. */
4232 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4233 break;
4234 }
4235
4236 if (phy_reset != 0)
4237 wm_get_cfg_done(sc);
4238
4239 /* reload EEPROM */
4240 switch (sc->sc_type) {
4241 case WM_T_82542_2_0:
4242 case WM_T_82542_2_1:
4243 case WM_T_82543:
4244 case WM_T_82544:
4245 delay(10);
4246 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4247 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4248 delay(2000);
4249 break;
4250 case WM_T_82540:
4251 case WM_T_82545:
4252 case WM_T_82545_3:
4253 case WM_T_82546:
4254 case WM_T_82546_3:
4255 delay(5*1000);
4256 /* XXX Disable HW ARPs on ASF enabled adapters */
4257 break;
4258 case WM_T_82541:
4259 case WM_T_82541_2:
4260 case WM_T_82547:
4261 case WM_T_82547_2:
4262 delay(20000);
4263 /* XXX Disable HW ARPs on ASF enabled adapters */
4264 break;
4265 case WM_T_82571:
4266 case WM_T_82572:
4267 case WM_T_82573:
4268 case WM_T_82574:
4269 case WM_T_82583:
4270 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4271 delay(10);
4272 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4273 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4274 }
4275 /* check EECD_EE_AUTORD */
4276 wm_get_auto_rd_done(sc);
4277 /*
4278 * Phy configuration from NVM just starts after EECD_AUTO_RD
4279 * is set.
4280 */
4281 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4282 || (sc->sc_type == WM_T_82583))
4283 delay(25*1000);
4284 break;
4285 case WM_T_82575:
4286 case WM_T_82576:
4287 case WM_T_82580:
4288 case WM_T_82580ER:
4289 case WM_T_I350:
4290 case WM_T_I210:
4291 case WM_T_I211:
4292 case WM_T_80003:
4293 case WM_T_ICH8:
4294 case WM_T_ICH9:
4295 /* check EECD_EE_AUTORD */
4296 wm_get_auto_rd_done(sc);
4297 break;
4298 case WM_T_ICH10:
4299 case WM_T_PCH:
4300 case WM_T_PCH2:
4301 case WM_T_PCH_LPT:
4302 wm_lan_init_done(sc);
4303 break;
4304 default:
4305 panic("%s: unknown type\n", __func__);
4306 }
4307
4308 /* Check whether EEPROM is present or not */
4309 switch (sc->sc_type) {
4310 case WM_T_82575:
4311 case WM_T_82576:
4312 #if 0 /* XXX */
4313 case WM_T_82580:
4314 case WM_T_82580ER:
4315 #endif
4316 case WM_T_I350:
4317 case WM_T_ICH8:
4318 case WM_T_ICH9:
4319 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4320 /* Not found */
4321 sc->sc_flags |= WM_F_EEPROM_INVALID;
4322 if ((sc->sc_type == WM_T_82575)
4323 || (sc->sc_type == WM_T_82576)
4324 || (sc->sc_type == WM_T_82580)
4325 || (sc->sc_type == WM_T_82580ER)
4326 || (sc->sc_type == WM_T_I350))
4327 wm_reset_init_script_82575(sc);
4328 }
4329 break;
4330 default:
4331 break;
4332 }
4333
4334 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4335 || (sc->sc_type == WM_T_I350)) {
4336 /* clear global device reset status bit */
4337 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4338 }
4339
4340 /* Clear any pending interrupt events. */
4341 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4342 reg = CSR_READ(sc, WMREG_ICR);
4343
4344 /* reload sc_ctrl */
4345 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4346
4347 if (sc->sc_type == WM_T_I350)
4348 wm_set_eee_i350(sc);
4349
4350 /* dummy read from WUC */
4351 if (sc->sc_type == WM_T_PCH)
4352 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4353 /*
4354 * For PCH, this write will make sure that any noise will be detected
4355 * as a CRC error and be dropped rather than show up as a bad packet
4356 * to the DMA engine
4357 */
4358 if (sc->sc_type == WM_T_PCH)
4359 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4360
4361 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4362 CSR_WRITE(sc, WMREG_WUC, 0);
4363
4364 /* XXX need special handling for 82580 */
4365 }
4366
4367 static void
4368 wm_set_vlan(struct wm_softc *sc)
4369 {
4370 /* Deal with VLAN enables. */
4371 if (VLAN_ATTACHED(&sc->sc_ethercom))
4372 sc->sc_ctrl |= CTRL_VME;
4373 else
4374 sc->sc_ctrl &= ~CTRL_VME;
4375
4376 /* Write the control registers. */
4377 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4378 }
4379
4380 /*
4381 * wm_init: [ifnet interface function]
4382 *
4383 * Initialize the interface. Must be called at splnet().
4384 */
4385 static int
4386 wm_init(struct ifnet *ifp)
4387 {
4388 struct wm_softc *sc = ifp->if_softc;
4389 struct wm_rxsoft *rxs;
4390 int i, j, trynum, error = 0;
4391 uint32_t reg;
4392
4393 /*
4394 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4395 * There is a small but measurable benefit to avoiding the adjusment
4396 * of the descriptor so that the headers are aligned, for normal mtu,
4397 * on such platforms. One possibility is that the DMA itself is
4398 * slightly more efficient if the front of the entire packet (instead
4399 * of the front of the headers) is aligned.
4400 *
4401 * Note we must always set align_tweak to 0 if we are using
4402 * jumbo frames.
4403 */
4404 #ifdef __NO_STRICT_ALIGNMENT
4405 sc->sc_align_tweak = 0;
4406 #else
4407 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4408 sc->sc_align_tweak = 0;
4409 else
4410 sc->sc_align_tweak = 2;
4411 #endif /* __NO_STRICT_ALIGNMENT */
4412
4413 /* Cancel any pending I/O. */
4414 wm_stop(ifp, 0);
4415
4416 /* update statistics before reset */
4417 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4418 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4419
4420 /* Reset the chip to a known state. */
4421 wm_reset(sc);
4422
4423 switch (sc->sc_type) {
4424 case WM_T_82571:
4425 case WM_T_82572:
4426 case WM_T_82573:
4427 case WM_T_82574:
4428 case WM_T_82583:
4429 case WM_T_80003:
4430 case WM_T_ICH8:
4431 case WM_T_ICH9:
4432 case WM_T_ICH10:
4433 case WM_T_PCH:
4434 case WM_T_PCH2:
4435 case WM_T_PCH_LPT:
4436 if (wm_check_mng_mode(sc) != 0)
4437 wm_get_hw_control(sc);
4438 break;
4439 default:
4440 break;
4441 }
4442
4443 /* Reset the PHY. */
4444 if (sc->sc_flags & WM_F_HAS_MII)
4445 wm_gmii_reset(sc);
4446
4447 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4448 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4449 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
4450 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4451
4452 /* Initialize the transmit descriptor ring. */
4453 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4454 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4455 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4456 sc->sc_txfree = WM_NTXDESC(sc);
4457 sc->sc_txnext = 0;
4458
4459 if (sc->sc_type < WM_T_82543) {
4460 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4461 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4462 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4463 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4464 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4465 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4466 } else {
4467 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4468 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4469 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4470 CSR_WRITE(sc, WMREG_TDH, 0);
4471 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4472 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4473
4474 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4475 /*
4476 * Don't write TDT before TCTL.EN is set.
4477 * See the document.
4478 */
4479 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4480 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4481 | TXDCTL_WTHRESH(0));
4482 else {
4483 CSR_WRITE(sc, WMREG_TDT, 0);
4484 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4485 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4486 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4487 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4488 }
4489 }
4490 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4491 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4492
4493 /* Initialize the transmit job descriptors. */
4494 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4495 sc->sc_txsoft[i].txs_mbuf = NULL;
4496 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4497 sc->sc_txsnext = 0;
4498 sc->sc_txsdirty = 0;
4499
4500 /*
4501 * Initialize the receive descriptor and receive job
4502 * descriptor rings.
4503 */
4504 if (sc->sc_type < WM_T_82543) {
4505 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4506 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4507 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4508 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4509 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4510 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4511
4512 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4513 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4514 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4515 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4516 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4517 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4518 } else {
4519 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4520 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4521 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4522 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4523 CSR_WRITE(sc, WMREG_EITR(0), 450);
4524 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4525 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4526 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4527 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4528 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4529 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4530 | RXDCTL_WTHRESH(1));
4531 } else {
4532 CSR_WRITE(sc, WMREG_RDH, 0);
4533 CSR_WRITE(sc, WMREG_RDT, 0);
4534 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4535 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4536 }
4537 }
4538 for (i = 0; i < WM_NRXDESC; i++) {
4539 rxs = &sc->sc_rxsoft[i];
4540 if (rxs->rxs_mbuf == NULL) {
4541 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4542 log(LOG_ERR, "%s: unable to allocate or map "
4543 "rx buffer %d, error = %d\n",
4544 device_xname(sc->sc_dev), i, error);
4545 /*
4546 * XXX Should attempt to run with fewer receive
4547 * XXX buffers instead of just failing.
4548 */
4549 wm_rxdrain(sc);
4550 goto out;
4551 }
4552 } else {
4553 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4554 WM_INIT_RXDESC(sc, i);
4555 /*
4556 * For 82575 and newer device, the RX descriptors
4557 * must be initialized after the setting of RCTL.EN in
4558 * wm_set_filter()
4559 */
4560 }
4561 }
4562 sc->sc_rxptr = 0;
4563 sc->sc_rxdiscard = 0;
4564 WM_RXCHAIN_RESET(sc);
4565
4566 /*
4567 * Clear out the VLAN table -- we don't use it (yet).
4568 */
4569 CSR_WRITE(sc, WMREG_VET, 0);
4570 if (sc->sc_type == WM_T_I350)
4571 trynum = 10; /* Due to hw errata */
4572 else
4573 trynum = 1;
4574 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4575 for (j = 0; j < trynum; j++)
4576 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4577
4578 /*
4579 * Set up flow-control parameters.
4580 *
4581 * XXX Values could probably stand some tuning.
4582 */
4583 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4584 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4585 && (sc->sc_type != WM_T_PCH2)) {
4586 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4587 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4588 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4589 }
4590
4591 sc->sc_fcrtl = FCRTL_DFLT;
4592 if (sc->sc_type < WM_T_82543) {
4593 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4594 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4595 } else {
4596 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4597 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4598 }
4599
4600 if (sc->sc_type == WM_T_80003)
4601 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4602 else
4603 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4604
4605 /* Writes the control register. */
4606 wm_set_vlan(sc);
4607
4608 if (sc->sc_flags & WM_F_HAS_MII) {
4609 int val;
4610
4611 switch (sc->sc_type) {
4612 case WM_T_80003:
4613 case WM_T_ICH8:
4614 case WM_T_ICH9:
4615 case WM_T_ICH10:
4616 case WM_T_PCH:
4617 case WM_T_PCH2:
4618 case WM_T_PCH_LPT:
4619 /*
4620 * Set the mac to wait the maximum time between each
4621 * iteration and increase the max iterations when
4622 * polling the phy; this fixes erroneous timeouts at
4623 * 10Mbps.
4624 */
4625 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4626 0xFFFF);
4627 val = wm_kmrn_readreg(sc,
4628 KUMCTRLSTA_OFFSET_INB_PARAM);
4629 val |= 0x3F;
4630 wm_kmrn_writereg(sc,
4631 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4632 break;
4633 default:
4634 break;
4635 }
4636
4637 if (sc->sc_type == WM_T_80003) {
4638 val = CSR_READ(sc, WMREG_CTRL_EXT);
4639 val &= ~CTRL_EXT_LINK_MODE_MASK;
4640 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4641
4642 /* Bypass RX and TX FIFO's */
4643 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4644 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4645 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4646 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4647 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4648 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4649 }
4650 }
4651 #if 0
4652 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4653 #endif
4654
4655 /*
4656 * Set up checksum offload parameters.
4657 */
4658 reg = CSR_READ(sc, WMREG_RXCSUM);
4659 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4660 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4661 reg |= RXCSUM_IPOFL;
4662 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4663 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4664 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4665 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4666 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4667
4668 /* Reset TBI's RXCFG count */
4669 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4670
4671 /*
4672 * Set up the interrupt registers.
4673 */
4674 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4675 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4676 ICR_RXO | ICR_RXT0;
4677 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4678 sc->sc_icr |= ICR_RXCFG;
4679 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4680
4681 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4682 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4683 || (sc->sc_type == WM_T_PCH2)) {
4684 reg = CSR_READ(sc, WMREG_KABGTXD);
4685 reg |= KABGTXD_BGSQLBIAS;
4686 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4687 }
4688
4689 /* Set up the inter-packet gap. */
4690 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4691
4692 if (sc->sc_type >= WM_T_82543) {
4693 /*
4694 * Set up the interrupt throttling register (units of 256ns)
4695 * Note that a footnote in Intel's documentation says this
4696 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4697 * or 10Mbit mode. Empirically, it appears to be the case
4698 * that that is also true for the 1024ns units of the other
4699 * interrupt-related timer registers -- so, really, we ought
4700 * to divide this value by 4 when the link speed is low.
4701 *
4702 * XXX implement this division at link speed change!
4703 */
4704
4705 /*
4706 * For N interrupts/sec, set this value to:
4707 * 1000000000 / (N * 256). Note that we set the
4708 * absolute and packet timer values to this value
4709 * divided by 4 to get "simple timer" behavior.
4710 */
4711
4712 sc->sc_itr = 1500; /* 2604 ints/sec */
4713 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4714 }
4715
4716 /* Set the VLAN ethernetype. */
4717 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4718
4719 /*
4720 * Set up the transmit control register; we start out with
4721 * a collision distance suitable for FDX, but update it whe
4722 * we resolve the media type.
4723 */
4724 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4725 | TCTL_CT(TX_COLLISION_THRESHOLD)
4726 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4727 if (sc->sc_type >= WM_T_82571)
4728 sc->sc_tctl |= TCTL_MULR;
4729 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4730
4731 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4732 /*
4733 * Write TDT after TCTL.EN is set.
4734 * See the document.
4735 */
4736 CSR_WRITE(sc, WMREG_TDT, 0);
4737 }
4738
4739 if (sc->sc_type == WM_T_80003) {
4740 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4741 reg &= ~TCTL_EXT_GCEX_MASK;
4742 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4743 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4744 }
4745
4746 /* Set the media. */
4747 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4748 goto out;
4749
4750 /* Configure for OS presence */
4751 wm_init_manageability(sc);
4752
4753 /*
4754 * Set up the receive control register; we actually program
4755 * the register when we set the receive filter. Use multicast
4756 * address offset type 0.
4757 *
4758 * Only the i82544 has the ability to strip the incoming
4759 * CRC, so we don't enable that feature.
4760 */
4761 sc->sc_mchash_type = 0;
4762 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4763 | RCTL_MO(sc->sc_mchash_type);
4764
4765 /*
4766 * The I350 has a bug where it always strips the CRC whether
4767 * asked to or not. So ask for stripped CRC here and cope in rxeof
4768 */
4769 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210))
4770 sc->sc_rctl |= RCTL_SECRC;
4771
4772 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4773 && (ifp->if_mtu > ETHERMTU)) {
4774 sc->sc_rctl |= RCTL_LPE;
4775 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4776 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4777 }
4778
4779 if (MCLBYTES == 2048) {
4780 sc->sc_rctl |= RCTL_2k;
4781 } else {
4782 if (sc->sc_type >= WM_T_82543) {
4783 switch (MCLBYTES) {
4784 case 4096:
4785 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4786 break;
4787 case 8192:
4788 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4789 break;
4790 case 16384:
4791 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4792 break;
4793 default:
4794 panic("wm_init: MCLBYTES %d unsupported",
4795 MCLBYTES);
4796 break;
4797 }
4798 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4799 }
4800
4801 /* Set the receive filter. */
4802 wm_set_filter(sc);
4803
4804 /* On 575 and later set RDT only if RX enabled */
4805 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4806 for (i = 0; i < WM_NRXDESC; i++)
4807 WM_INIT_RXDESC(sc, i);
4808
4809 /* Start the one second link check clock. */
4810 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4811
4812 /* ...all done! */
4813 ifp->if_flags |= IFF_RUNNING;
4814 ifp->if_flags &= ~IFF_OACTIVE;
4815
4816 out:
4817 sc->sc_if_flags = ifp->if_flags;
4818 if (error)
4819 log(LOG_ERR, "%s: interface not running\n",
4820 device_xname(sc->sc_dev));
4821 return error;
4822 }
4823
4824 /*
4825 * wm_rxdrain:
4826 *
4827 * Drain the receive queue.
4828 */
4829 static void
4830 wm_rxdrain(struct wm_softc *sc)
4831 {
4832 struct wm_rxsoft *rxs;
4833 int i;
4834
4835 for (i = 0; i < WM_NRXDESC; i++) {
4836 rxs = &sc->sc_rxsoft[i];
4837 if (rxs->rxs_mbuf != NULL) {
4838 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4839 m_freem(rxs->rxs_mbuf);
4840 rxs->rxs_mbuf = NULL;
4841 }
4842 }
4843 }
4844
4845 /*
4846 * wm_stop: [ifnet interface function]
4847 *
4848 * Stop transmission on the interface.
4849 */
4850 static void
4851 wm_stop(struct ifnet *ifp, int disable)
4852 {
4853 struct wm_softc *sc = ifp->if_softc;
4854 struct wm_txsoft *txs;
4855 int i;
4856
4857 /* Stop the one second clock. */
4858 callout_stop(&sc->sc_tick_ch);
4859
4860 /* Stop the 82547 Tx FIFO stall check timer. */
4861 if (sc->sc_type == WM_T_82547)
4862 callout_stop(&sc->sc_txfifo_ch);
4863
4864 if (sc->sc_flags & WM_F_HAS_MII) {
4865 /* Down the MII. */
4866 mii_down(&sc->sc_mii);
4867 } else {
4868 #if 0
4869 /* Should we clear PHY's status properly? */
4870 wm_reset(sc);
4871 #endif
4872 }
4873
4874 /* Stop the transmit and receive processes. */
4875 CSR_WRITE(sc, WMREG_TCTL, 0);
4876 CSR_WRITE(sc, WMREG_RCTL, 0);
4877 sc->sc_rctl &= ~RCTL_EN;
4878
4879 /*
4880 * Clear the interrupt mask to ensure the device cannot assert its
4881 * interrupt line.
4882 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4883 * any currently pending or shared interrupt.
4884 */
4885 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4886 sc->sc_icr = 0;
4887
4888 /* Release any queued transmit buffers. */
4889 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4890 txs = &sc->sc_txsoft[i];
4891 if (txs->txs_mbuf != NULL) {
4892 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4893 m_freem(txs->txs_mbuf);
4894 txs->txs_mbuf = NULL;
4895 }
4896 }
4897
4898 /* Mark the interface as down and cancel the watchdog timer. */
4899 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4900 ifp->if_timer = 0;
4901
4902 if (disable)
4903 wm_rxdrain(sc);
4904
4905 #if 0 /* notyet */
4906 if (sc->sc_type >= WM_T_82544)
4907 CSR_WRITE(sc, WMREG_WUC, 0);
4908 #endif
4909 }
4910
4911 void
4912 wm_get_auto_rd_done(struct wm_softc *sc)
4913 {
4914 int i;
4915
4916 /* wait for eeprom to reload */
4917 switch (sc->sc_type) {
4918 case WM_T_82571:
4919 case WM_T_82572:
4920 case WM_T_82573:
4921 case WM_T_82574:
4922 case WM_T_82583:
4923 case WM_T_82575:
4924 case WM_T_82576:
4925 case WM_T_82580:
4926 case WM_T_82580ER:
4927 case WM_T_I350:
4928 case WM_T_I210:
4929 case WM_T_I211:
4930 case WM_T_80003:
4931 case WM_T_ICH8:
4932 case WM_T_ICH9:
4933 for (i = 0; i < 10; i++) {
4934 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4935 break;
4936 delay(1000);
4937 }
4938 if (i == 10) {
4939 log(LOG_ERR, "%s: auto read from eeprom failed to "
4940 "complete\n", device_xname(sc->sc_dev));
4941 }
4942 break;
4943 default:
4944 break;
4945 }
4946 }
4947
4948 void
4949 wm_lan_init_done(struct wm_softc *sc)
4950 {
4951 uint32_t reg = 0;
4952 int i;
4953
4954 /* wait for eeprom to reload */
4955 switch (sc->sc_type) {
4956 case WM_T_ICH10:
4957 case WM_T_PCH:
4958 case WM_T_PCH2:
4959 case WM_T_PCH_LPT:
4960 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4961 reg = CSR_READ(sc, WMREG_STATUS);
4962 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4963 break;
4964 delay(100);
4965 }
4966 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4967 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4968 "complete\n", device_xname(sc->sc_dev), __func__);
4969 }
4970 break;
4971 default:
4972 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4973 __func__);
4974 break;
4975 }
4976
4977 reg &= ~STATUS_LAN_INIT_DONE;
4978 CSR_WRITE(sc, WMREG_STATUS, reg);
4979 }
4980
4981 void
4982 wm_get_cfg_done(struct wm_softc *sc)
4983 {
4984 int mask;
4985 uint32_t reg;
4986 int i;
4987
4988 /* wait for eeprom to reload */
4989 switch (sc->sc_type) {
4990 case WM_T_82542_2_0:
4991 case WM_T_82542_2_1:
4992 /* null */
4993 break;
4994 case WM_T_82543:
4995 case WM_T_82544:
4996 case WM_T_82540:
4997 case WM_T_82545:
4998 case WM_T_82545_3:
4999 case WM_T_82546:
5000 case WM_T_82546_3:
5001 case WM_T_82541:
5002 case WM_T_82541_2:
5003 case WM_T_82547:
5004 case WM_T_82547_2:
5005 case WM_T_82573:
5006 case WM_T_82574:
5007 case WM_T_82583:
5008 /* generic */
5009 delay(10*1000);
5010 break;
5011 case WM_T_80003:
5012 case WM_T_82571:
5013 case WM_T_82572:
5014 case WM_T_82575:
5015 case WM_T_82576:
5016 case WM_T_82580:
5017 case WM_T_82580ER:
5018 case WM_T_I350:
5019 case WM_T_I210:
5020 case WM_T_I211:
5021 if (sc->sc_type == WM_T_82571) {
5022 /* Only 82571 shares port 0 */
5023 mask = EEMNGCTL_CFGDONE_0;
5024 } else
5025 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5026 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5027 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5028 break;
5029 delay(1000);
5030 }
5031 if (i >= WM_PHY_CFG_TIMEOUT) {
5032 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5033 device_xname(sc->sc_dev), __func__));
5034 }
5035 break;
5036 case WM_T_ICH8:
5037 case WM_T_ICH9:
5038 case WM_T_ICH10:
5039 case WM_T_PCH:
5040 case WM_T_PCH2:
5041 case WM_T_PCH_LPT:
5042 if (sc->sc_type >= WM_T_PCH) {
5043 reg = CSR_READ(sc, WMREG_STATUS);
5044 if ((reg & STATUS_PHYRA) != 0)
5045 CSR_WRITE(sc, WMREG_STATUS,
5046 reg & ~STATUS_PHYRA);
5047 }
5048 delay(10*1000);
5049 break;
5050 default:
5051 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5052 __func__);
5053 break;
5054 }
5055 }
5056
5057 /*
5058 * wm_acquire_eeprom:
5059 *
5060 * Perform the EEPROM handshake required on some chips.
5061 */
5062 static int
5063 wm_acquire_eeprom(struct wm_softc *sc)
5064 {
5065 uint32_t reg;
5066 int x;
5067 int ret = 0;
5068
5069 /* always success */
5070 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5071 return 0;
5072
5073 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5074 ret = wm_get_swfwhw_semaphore(sc);
5075 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5076 /* this will also do wm_get_swsm_semaphore() if needed */
5077 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5078 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5079 ret = wm_get_swsm_semaphore(sc);
5080 }
5081
5082 if (ret) {
5083 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5084 __func__);
5085 return 1;
5086 }
5087
5088 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5089 reg = CSR_READ(sc, WMREG_EECD);
5090
5091 /* Request EEPROM access. */
5092 reg |= EECD_EE_REQ;
5093 CSR_WRITE(sc, WMREG_EECD, reg);
5094
5095 /* ..and wait for it to be granted. */
5096 for (x = 0; x < 1000; x++) {
5097 reg = CSR_READ(sc, WMREG_EECD);
5098 if (reg & EECD_EE_GNT)
5099 break;
5100 delay(5);
5101 }
5102 if ((reg & EECD_EE_GNT) == 0) {
5103 aprint_error_dev(sc->sc_dev,
5104 "could not acquire EEPROM GNT\n");
5105 reg &= ~EECD_EE_REQ;
5106 CSR_WRITE(sc, WMREG_EECD, reg);
5107 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5108 wm_put_swfwhw_semaphore(sc);
5109 if (sc->sc_flags & WM_F_SWFW_SYNC)
5110 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5111 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5112 wm_put_swsm_semaphore(sc);
5113 return 1;
5114 }
5115 }
5116
5117 return 0;
5118 }
5119
5120 /*
5121 * wm_release_eeprom:
5122 *
5123 * Release the EEPROM mutex.
5124 */
5125 static void
5126 wm_release_eeprom(struct wm_softc *sc)
5127 {
5128 uint32_t reg;
5129
5130 /* always success */
5131 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5132 return;
5133
5134 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5135 reg = CSR_READ(sc, WMREG_EECD);
5136 reg &= ~EECD_EE_REQ;
5137 CSR_WRITE(sc, WMREG_EECD, reg);
5138 }
5139
5140 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5141 wm_put_swfwhw_semaphore(sc);
5142 if (sc->sc_flags & WM_F_SWFW_SYNC)
5143 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5144 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5145 wm_put_swsm_semaphore(sc);
5146 }
5147
5148 /*
5149 * wm_eeprom_sendbits:
5150 *
5151 * Send a series of bits to the EEPROM.
5152 */
5153 static void
5154 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5155 {
5156 uint32_t reg;
5157 int x;
5158
5159 reg = CSR_READ(sc, WMREG_EECD);
5160
5161 for (x = nbits; x > 0; x--) {
5162 if (bits & (1U << (x - 1)))
5163 reg |= EECD_DI;
5164 else
5165 reg &= ~EECD_DI;
5166 CSR_WRITE(sc, WMREG_EECD, reg);
5167 delay(2);
5168 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5169 delay(2);
5170 CSR_WRITE(sc, WMREG_EECD, reg);
5171 delay(2);
5172 }
5173 }
5174
5175 /*
5176 * wm_eeprom_recvbits:
5177 *
5178 * Receive a series of bits from the EEPROM.
5179 */
5180 static void
5181 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5182 {
5183 uint32_t reg, val;
5184 int x;
5185
5186 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5187
5188 val = 0;
5189 for (x = nbits; x > 0; x--) {
5190 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5191 delay(2);
5192 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5193 val |= (1U << (x - 1));
5194 CSR_WRITE(sc, WMREG_EECD, reg);
5195 delay(2);
5196 }
5197 *valp = val;
5198 }
5199
5200 /*
5201 * wm_read_eeprom_uwire:
5202 *
5203 * Read a word from the EEPROM using the MicroWire protocol.
5204 */
5205 static int
5206 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5207 {
5208 uint32_t reg, val;
5209 int i;
5210
5211 for (i = 0; i < wordcnt; i++) {
5212 /* Clear SK and DI. */
5213 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5214 CSR_WRITE(sc, WMREG_EECD, reg);
5215
5216 /*
5217 * XXX: workaround for a bug in qemu-0.12.x and prior
5218 * and Xen.
5219 *
5220 * We use this workaround only for 82540 because qemu's
5221 * e1000 act as 82540.
5222 */
5223 if (sc->sc_type == WM_T_82540) {
5224 reg |= EECD_SK;
5225 CSR_WRITE(sc, WMREG_EECD, reg);
5226 reg &= ~EECD_SK;
5227 CSR_WRITE(sc, WMREG_EECD, reg);
5228 delay(2);
5229 }
5230 /* XXX: end of workaround */
5231
5232 /* Set CHIP SELECT. */
5233 reg |= EECD_CS;
5234 CSR_WRITE(sc, WMREG_EECD, reg);
5235 delay(2);
5236
5237 /* Shift in the READ command. */
5238 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5239
5240 /* Shift in address. */
5241 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5242
5243 /* Shift out the data. */
5244 wm_eeprom_recvbits(sc, &val, 16);
5245 data[i] = val & 0xffff;
5246
5247 /* Clear CHIP SELECT. */
5248 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5249 CSR_WRITE(sc, WMREG_EECD, reg);
5250 delay(2);
5251 }
5252
5253 return 0;
5254 }
5255
5256 /*
5257 * wm_spi_eeprom_ready:
5258 *
5259 * Wait for a SPI EEPROM to be ready for commands.
5260 */
5261 static int
5262 wm_spi_eeprom_ready(struct wm_softc *sc)
5263 {
5264 uint32_t val;
5265 int usec;
5266
5267 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5268 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5269 wm_eeprom_recvbits(sc, &val, 8);
5270 if ((val & SPI_SR_RDY) == 0)
5271 break;
5272 }
5273 if (usec >= SPI_MAX_RETRIES) {
5274 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5275 return 1;
5276 }
5277 return 0;
5278 }
5279
5280 /*
5281 * wm_read_eeprom_spi:
5282 *
5283 * Read a work from the EEPROM using the SPI protocol.
5284 */
5285 static int
5286 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5287 {
5288 uint32_t reg, val;
5289 int i;
5290 uint8_t opc;
5291
5292 /* Clear SK and CS. */
5293 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5294 CSR_WRITE(sc, WMREG_EECD, reg);
5295 delay(2);
5296
5297 if (wm_spi_eeprom_ready(sc))
5298 return 1;
5299
5300 /* Toggle CS to flush commands. */
5301 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5302 delay(2);
5303 CSR_WRITE(sc, WMREG_EECD, reg);
5304 delay(2);
5305
5306 opc = SPI_OPC_READ;
5307 if (sc->sc_ee_addrbits == 8 && word >= 128)
5308 opc |= SPI_OPC_A8;
5309
5310 wm_eeprom_sendbits(sc, opc, 8);
5311 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5312
5313 for (i = 0; i < wordcnt; i++) {
5314 wm_eeprom_recvbits(sc, &val, 16);
5315 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5316 }
5317
5318 /* Raise CS and clear SK. */
5319 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5320 CSR_WRITE(sc, WMREG_EECD, reg);
5321 delay(2);
5322
5323 return 0;
5324 }
5325
5326 #define NVM_CHECKSUM 0xBABA
5327 #define EEPROM_SIZE 0x0040
5328 #define NVM_COMPAT 0x0003
5329 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
5330 #define NVM_FUTURE_INIT_WORD1 0x0019
5331 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
5332
5333 /*
5334 * wm_validate_eeprom_checksum
5335 *
5336 * The checksum is defined as the sum of the first 64 (16 bit) words.
5337 */
5338 static int
5339 wm_validate_eeprom_checksum(struct wm_softc *sc)
5340 {
5341 uint16_t checksum, valid_checksum;
5342 uint16_t eeprom_data;
5343 uint16_t csum_wordaddr;
5344 int i;
5345
5346 checksum = 0;
5347
5348 /* Don't check for I211 */
5349 if (sc->sc_type == WM_T_I211)
5350 return 0;
5351
5352 if (sc->sc_type == WM_T_PCH_LPT) {
5353 printf("[PCH_LPT]");
5354 csum_wordaddr = NVM_COMPAT;
5355 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5356 } else {
5357 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5358 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5359 }
5360
5361 #ifdef WM_DEBUG
5362 /* Dump EEPROM image for debug */
5363 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5364 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5365 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5366 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5367 if ((eeprom_data & valid_checksum) == 0) {
5368 DPRINTF(WM_DEBUG_NVM,
5369 ("%s: NVM need to be updated (%04x != %04x)\n",
5370 device_xname(sc->sc_dev), eeprom_data,
5371 valid_checksum));
5372 }
5373 }
5374
5375 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5376 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5377 for (i = 0; i < EEPROM_SIZE; i++) {
5378 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5379 printf("XX ");
5380 else
5381 printf("%04x ", eeprom_data);
5382 if (i % 8 == 7)
5383 printf("\n");
5384 }
5385 }
5386
5387 #endif /* WM_DEBUG */
5388
5389 for (i = 0; i < EEPROM_SIZE; i++) {
5390 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5391 return 1;
5392 checksum += eeprom_data;
5393 }
5394
5395 if (checksum != (uint16_t) NVM_CHECKSUM) {
5396 #ifdef WM_DEBUG
5397 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5398 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5399 #endif
5400 /*
5401 * XXX quick hack for non-updated NVM.
5402 * Check only last 12bit until wm_write_eeprom() will be
5403 * implemented.
5404 */
5405 if ((checksum & 0x0fff) != ((uint16_t)NVM_CHECKSUM & 0x0fff))
5406 return 1;
5407 }
5408
5409 return 0;
5410 }
5411
5412 /*
5413 * wm_read_eeprom:
5414 *
5415 * Read data from the serial EEPROM.
5416 */
5417 static int
5418 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5419 {
5420 int rv;
5421
5422 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5423 return 1;
5424
5425 if (wm_acquire_eeprom(sc))
5426 return 1;
5427
5428 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5429 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5430 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5431 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5432 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5433 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5434 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5435 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5436 else
5437 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5438
5439 wm_release_eeprom(sc);
5440 return rv;
5441 }
5442
5443 static int
5444 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5445 uint16_t *data)
5446 {
5447 int i, eerd = 0;
5448 int error = 0;
5449
5450 for (i = 0; i < wordcnt; i++) {
5451 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5452
5453 CSR_WRITE(sc, WMREG_EERD, eerd);
5454 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5455 if (error != 0)
5456 break;
5457
5458 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5459 }
5460
5461 return error;
5462 }
5463
5464 static int
5465 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5466 {
5467 uint32_t attempts = 100000;
5468 uint32_t i, reg = 0;
5469 int32_t done = -1;
5470
5471 for (i = 0; i < attempts; i++) {
5472 reg = CSR_READ(sc, rw);
5473
5474 if (reg & EERD_DONE) {
5475 done = 0;
5476 break;
5477 }
5478 delay(5);
5479 }
5480
5481 return done;
5482 }
5483
5484 static int
5485 wm_check_alt_mac_addr(struct wm_softc *sc)
5486 {
5487 uint16_t myea[ETHER_ADDR_LEN / 2];
5488 uint16_t offset = EEPROM_OFF_MACADDR;
5489
5490 /* Try to read alternative MAC address pointer */
5491 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5492 return -1;
5493
5494 /* Check pointer */
5495 if (offset == 0xffff)
5496 return -1;
5497
5498 /*
5499 * Check whether alternative MAC address is valid or not.
5500 * Some cards have non 0xffff pointer but those don't use
5501 * alternative MAC address in reality.
5502 *
5503 * Check whether the broadcast bit is set or not.
5504 */
5505 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5506 if (((myea[0] & 0xff) & 0x01) == 0)
5507 return 0; /* found! */
5508
5509 /* not found */
5510 return -1;
5511 }
5512
5513 static int
5514 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5515 {
5516 uint16_t myea[ETHER_ADDR_LEN / 2];
5517 uint16_t offset = EEPROM_OFF_MACADDR;
5518 int do_invert = 0;
5519
5520 switch (sc->sc_type) {
5521 case WM_T_82580:
5522 case WM_T_82580ER:
5523 case WM_T_I350:
5524 switch (sc->sc_funcid) {
5525 case 0:
5526 /* default value (== EEPROM_OFF_MACADDR) */
5527 break;
5528 case 1:
5529 offset = EEPROM_OFF_LAN1;
5530 break;
5531 case 2:
5532 offset = EEPROM_OFF_LAN2;
5533 break;
5534 case 3:
5535 offset = EEPROM_OFF_LAN3;
5536 break;
5537 default:
5538 goto bad;
5539 /* NOTREACHED */
5540 break;
5541 }
5542 break;
5543 case WM_T_82571:
5544 case WM_T_82575:
5545 case WM_T_82576:
5546 case WM_T_80003:
5547 case WM_T_I210:
5548 case WM_T_I211:
5549 if (wm_check_alt_mac_addr(sc) != 0) {
5550 /* reset the offset to LAN0 */
5551 offset = EEPROM_OFF_MACADDR;
5552 if ((sc->sc_funcid & 0x01) == 1)
5553 do_invert = 1;
5554 goto do_read;
5555 }
5556 switch (sc->sc_funcid) {
5557 case 0:
5558 /*
5559 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5560 * itself.
5561 */
5562 break;
5563 case 1:
5564 offset += EEPROM_OFF_MACADDR_LAN1;
5565 break;
5566 case 2:
5567 offset += EEPROM_OFF_MACADDR_LAN2;
5568 break;
5569 case 3:
5570 offset += EEPROM_OFF_MACADDR_LAN3;
5571 break;
5572 default:
5573 goto bad;
5574 /* NOTREACHED */
5575 break;
5576 }
5577 break;
5578 default:
5579 if ((sc->sc_funcid & 0x01) == 1)
5580 do_invert = 1;
5581 break;
5582 }
5583
5584 do_read:
5585 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5586 myea) != 0) {
5587 goto bad;
5588 }
5589
5590 enaddr[0] = myea[0] & 0xff;
5591 enaddr[1] = myea[0] >> 8;
5592 enaddr[2] = myea[1] & 0xff;
5593 enaddr[3] = myea[1] >> 8;
5594 enaddr[4] = myea[2] & 0xff;
5595 enaddr[5] = myea[2] >> 8;
5596
5597 /*
5598 * Toggle the LSB of the MAC address on the second port
5599 * of some dual port cards.
5600 */
5601 if (do_invert != 0)
5602 enaddr[5] ^= 1;
5603
5604 return 0;
5605
5606 bad:
5607 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5608
5609 return -1;
5610 }
5611
5612 /*
5613 * wm_add_rxbuf:
5614 *
5615 * Add a receive buffer to the indiciated descriptor.
5616 */
5617 static int
5618 wm_add_rxbuf(struct wm_softc *sc, int idx)
5619 {
5620 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5621 struct mbuf *m;
5622 int error;
5623
5624 MGETHDR(m, M_DONTWAIT, MT_DATA);
5625 if (m == NULL)
5626 return ENOBUFS;
5627
5628 MCLGET(m, M_DONTWAIT);
5629 if ((m->m_flags & M_EXT) == 0) {
5630 m_freem(m);
5631 return ENOBUFS;
5632 }
5633
5634 if (rxs->rxs_mbuf != NULL)
5635 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5636
5637 rxs->rxs_mbuf = m;
5638
5639 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5640 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5641 BUS_DMA_READ|BUS_DMA_NOWAIT);
5642 if (error) {
5643 /* XXX XXX XXX */
5644 aprint_error_dev(sc->sc_dev,
5645 "unable to load rx DMA map %d, error = %d\n",
5646 idx, error);
5647 panic("wm_add_rxbuf");
5648 }
5649
5650 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5651 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5652
5653 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5654 if ((sc->sc_rctl & RCTL_EN) != 0)
5655 WM_INIT_RXDESC(sc, idx);
5656 } else
5657 WM_INIT_RXDESC(sc, idx);
5658
5659 return 0;
5660 }
5661
5662 /*
5663 * wm_set_ral:
5664 *
5665 * Set an entery in the receive address list.
5666 */
5667 static void
5668 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5669 {
5670 uint32_t ral_lo, ral_hi;
5671
5672 if (enaddr != NULL) {
5673 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5674 (enaddr[3] << 24);
5675 ral_hi = enaddr[4] | (enaddr[5] << 8);
5676 ral_hi |= RAL_AV;
5677 } else {
5678 ral_lo = 0;
5679 ral_hi = 0;
5680 }
5681
5682 if (sc->sc_type >= WM_T_82544) {
5683 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5684 ral_lo);
5685 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5686 ral_hi);
5687 } else {
5688 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5689 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5690 }
5691 }
5692
5693 /*
5694 * wm_mchash:
5695 *
5696 * Compute the hash of the multicast address for the 4096-bit
5697 * multicast filter.
5698 */
5699 static uint32_t
5700 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5701 {
5702 static const int lo_shift[4] = { 4, 3, 2, 0 };
5703 static const int hi_shift[4] = { 4, 5, 6, 8 };
5704 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5705 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5706 uint32_t hash;
5707
5708 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5709 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5710 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5711 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5712 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5713 return (hash & 0x3ff);
5714 }
5715 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5716 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5717
5718 return (hash & 0xfff);
5719 }
5720
5721 /*
5722 * wm_set_filter:
5723 *
5724 * Set up the receive filter.
5725 */
5726 static void
5727 wm_set_filter(struct wm_softc *sc)
5728 {
5729 struct ethercom *ec = &sc->sc_ethercom;
5730 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5731 struct ether_multi *enm;
5732 struct ether_multistep step;
5733 bus_addr_t mta_reg;
5734 uint32_t hash, reg, bit;
5735 int i, size;
5736
5737 if (sc->sc_type >= WM_T_82544)
5738 mta_reg = WMREG_CORDOVA_MTA;
5739 else
5740 mta_reg = WMREG_MTA;
5741
5742 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5743
5744 if (ifp->if_flags & IFF_BROADCAST)
5745 sc->sc_rctl |= RCTL_BAM;
5746 if (ifp->if_flags & IFF_PROMISC) {
5747 sc->sc_rctl |= RCTL_UPE;
5748 goto allmulti;
5749 }
5750
5751 /*
5752 * Set the station address in the first RAL slot, and
5753 * clear the remaining slots.
5754 */
5755 if (sc->sc_type == WM_T_ICH8)
5756 size = WM_RAL_TABSIZE_ICH8 -1;
5757 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5758 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5759 || (sc->sc_type == WM_T_PCH_LPT))
5760 size = WM_RAL_TABSIZE_ICH8;
5761 else if (sc->sc_type == WM_T_82575)
5762 size = WM_RAL_TABSIZE_82575;
5763 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5764 size = WM_RAL_TABSIZE_82576;
5765 else if (sc->sc_type == WM_T_I350)
5766 size = WM_RAL_TABSIZE_I350;
5767 else
5768 size = WM_RAL_TABSIZE;
5769 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5770 for (i = 1; i < size; i++)
5771 wm_set_ral(sc, NULL, i);
5772
5773 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5774 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5775 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5776 size = WM_ICH8_MC_TABSIZE;
5777 else
5778 size = WM_MC_TABSIZE;
5779 /* Clear out the multicast table. */
5780 for (i = 0; i < size; i++)
5781 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5782
5783 ETHER_FIRST_MULTI(step, ec, enm);
5784 while (enm != NULL) {
5785 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5786 /*
5787 * We must listen to a range of multicast addresses.
5788 * For now, just accept all multicasts, rather than
5789 * trying to set only those filter bits needed to match
5790 * the range. (At this time, the only use of address
5791 * ranges is for IP multicast routing, for which the
5792 * range is big enough to require all bits set.)
5793 */
5794 goto allmulti;
5795 }
5796
5797 hash = wm_mchash(sc, enm->enm_addrlo);
5798
5799 reg = (hash >> 5);
5800 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5801 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5802 || (sc->sc_type == WM_T_PCH2)
5803 || (sc->sc_type == WM_T_PCH_LPT))
5804 reg &= 0x1f;
5805 else
5806 reg &= 0x7f;
5807 bit = hash & 0x1f;
5808
5809 hash = CSR_READ(sc, mta_reg + (reg << 2));
5810 hash |= 1U << bit;
5811
5812 /* XXX Hardware bug?? */
5813 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5814 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5815 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5816 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5817 } else
5818 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5819
5820 ETHER_NEXT_MULTI(step, enm);
5821 }
5822
5823 ifp->if_flags &= ~IFF_ALLMULTI;
5824 goto setit;
5825
5826 allmulti:
5827 ifp->if_flags |= IFF_ALLMULTI;
5828 sc->sc_rctl |= RCTL_MPE;
5829
5830 setit:
5831 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5832 }
5833
5834 /*
5835 * wm_tbi_mediainit:
5836 *
5837 * Initialize media for use on 1000BASE-X devices.
5838 */
5839 static void
5840 wm_tbi_mediainit(struct wm_softc *sc)
5841 {
5842 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5843 const char *sep = "";
5844
5845 if (sc->sc_type < WM_T_82543)
5846 sc->sc_tipg = TIPG_WM_DFLT;
5847 else
5848 sc->sc_tipg = TIPG_LG_DFLT;
5849
5850 sc->sc_tbi_anegticks = 5;
5851
5852 /* Initialize our media structures */
5853 sc->sc_mii.mii_ifp = ifp;
5854
5855 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5856 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5857 wm_tbi_mediastatus);
5858
5859 /*
5860 * SWD Pins:
5861 *
5862 * 0 = Link LED (output)
5863 * 1 = Loss Of Signal (input)
5864 */
5865 sc->sc_ctrl |= CTRL_SWDPIO(0);
5866 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5867
5868 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5869
5870 #define ADD(ss, mm, dd) \
5871 do { \
5872 aprint_normal("%s%s", sep, ss); \
5873 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5874 sep = ", "; \
5875 } while (/*CONSTCOND*/0)
5876
5877 aprint_normal_dev(sc->sc_dev, "");
5878 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5879 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5880 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5881 aprint_normal("\n");
5882
5883 #undef ADD
5884
5885 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5886 }
5887
5888 /*
5889 * wm_tbi_mediastatus: [ifmedia interface function]
5890 *
5891 * Get the current interface media status on a 1000BASE-X device.
5892 */
5893 static void
5894 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5895 {
5896 struct wm_softc *sc = ifp->if_softc;
5897 uint32_t ctrl, status;
5898
5899 ifmr->ifm_status = IFM_AVALID;
5900 ifmr->ifm_active = IFM_ETHER;
5901
5902 status = CSR_READ(sc, WMREG_STATUS);
5903 if ((status & STATUS_LU) == 0) {
5904 ifmr->ifm_active |= IFM_NONE;
5905 return;
5906 }
5907
5908 ifmr->ifm_status |= IFM_ACTIVE;
5909 ifmr->ifm_active |= IFM_1000_SX;
5910 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5911 ifmr->ifm_active |= IFM_FDX;
5912 ctrl = CSR_READ(sc, WMREG_CTRL);
5913 if (ctrl & CTRL_RFCE)
5914 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5915 if (ctrl & CTRL_TFCE)
5916 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5917 }
5918
5919 /*
5920 * wm_tbi_mediachange: [ifmedia interface function]
5921 *
5922 * Set hardware to newly-selected media on a 1000BASE-X device.
5923 */
5924 static int
5925 wm_tbi_mediachange(struct ifnet *ifp)
5926 {
5927 struct wm_softc *sc = ifp->if_softc;
5928 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5929 uint32_t status;
5930 int i;
5931
5932 sc->sc_txcw = 0;
5933 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5934 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5935 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5936 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5937 sc->sc_txcw |= TXCW_ANE;
5938 } else {
5939 /*
5940 * If autonegotiation is turned off, force link up and turn on
5941 * full duplex
5942 */
5943 sc->sc_txcw &= ~TXCW_ANE;
5944 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5945 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5946 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5947 delay(1000);
5948 }
5949
5950 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5951 device_xname(sc->sc_dev),sc->sc_txcw));
5952 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5953 delay(10000);
5954
5955 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5956 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5957
5958 /*
5959 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5960 * optics detect a signal, 0 if they don't.
5961 */
5962 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5963 /* Have signal; wait for the link to come up. */
5964
5965 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5966 /*
5967 * Reset the link, and let autonegotiation do its thing
5968 */
5969 sc->sc_ctrl |= CTRL_LRST;
5970 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5971 delay(1000);
5972 sc->sc_ctrl &= ~CTRL_LRST;
5973 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5974 delay(1000);
5975 }
5976
5977 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5978 delay(10000);
5979 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5980 break;
5981 }
5982
5983 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5984 device_xname(sc->sc_dev),i));
5985
5986 status = CSR_READ(sc, WMREG_STATUS);
5987 DPRINTF(WM_DEBUG_LINK,
5988 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5989 device_xname(sc->sc_dev),status, STATUS_LU));
5990 if (status & STATUS_LU) {
5991 /* Link is up. */
5992 DPRINTF(WM_DEBUG_LINK,
5993 ("%s: LINK: set media -> link up %s\n",
5994 device_xname(sc->sc_dev),
5995 (status & STATUS_FD) ? "FDX" : "HDX"));
5996
5997 /*
5998 * NOTE: CTRL will update TFCE and RFCE automatically,
5999 * so we should update sc->sc_ctrl
6000 */
6001 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6002 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6003 sc->sc_fcrtl &= ~FCRTL_XONE;
6004 if (status & STATUS_FD)
6005 sc->sc_tctl |=
6006 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6007 else
6008 sc->sc_tctl |=
6009 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6010 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6011 sc->sc_fcrtl |= FCRTL_XONE;
6012 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6013 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6014 WMREG_OLD_FCRTL : WMREG_FCRTL,
6015 sc->sc_fcrtl);
6016 sc->sc_tbi_linkup = 1;
6017 } else {
6018 if (i == WM_LINKUP_TIMEOUT)
6019 wm_check_for_link(sc);
6020 /* Link is down. */
6021 DPRINTF(WM_DEBUG_LINK,
6022 ("%s: LINK: set media -> link down\n",
6023 device_xname(sc->sc_dev)));
6024 sc->sc_tbi_linkup = 0;
6025 }
6026 } else {
6027 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6028 device_xname(sc->sc_dev)));
6029 sc->sc_tbi_linkup = 0;
6030 }
6031
6032 wm_tbi_set_linkled(sc);
6033
6034 return 0;
6035 }
6036
6037 /*
6038 * wm_tbi_set_linkled:
6039 *
6040 * Update the link LED on 1000BASE-X devices.
6041 */
6042 static void
6043 wm_tbi_set_linkled(struct wm_softc *sc)
6044 {
6045
6046 if (sc->sc_tbi_linkup)
6047 sc->sc_ctrl |= CTRL_SWDPIN(0);
6048 else
6049 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6050
6051 /* 82540 or newer devices are active low */
6052 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6053
6054 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6055 }
6056
6057 /*
6058 * wm_tbi_check_link:
6059 *
6060 * Check the link on 1000BASE-X devices.
6061 */
6062 static void
6063 wm_tbi_check_link(struct wm_softc *sc)
6064 {
6065 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6066 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6067 uint32_t rxcw, ctrl, status;
6068
6069 status = CSR_READ(sc, WMREG_STATUS);
6070
6071 rxcw = CSR_READ(sc, WMREG_RXCW);
6072 ctrl = CSR_READ(sc, WMREG_CTRL);
6073
6074 /* set link status */
6075 if ((status & STATUS_LU) == 0) {
6076 DPRINTF(WM_DEBUG_LINK,
6077 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6078 sc->sc_tbi_linkup = 0;
6079 } else if (sc->sc_tbi_linkup == 0) {
6080 DPRINTF(WM_DEBUG_LINK,
6081 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6082 (status & STATUS_FD) ? "FDX" : "HDX"));
6083 sc->sc_tbi_linkup = 1;
6084 }
6085
6086 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6087 && ((status & STATUS_LU) == 0)) {
6088 sc->sc_tbi_linkup = 0;
6089 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6090 /* RXCFG storm! */
6091 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6092 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6093 wm_init(ifp);
6094 ifp->if_start(ifp);
6095 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6096 /* If the timer expired, retry autonegotiation */
6097 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6098 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6099 sc->sc_tbi_ticks = 0;
6100 /*
6101 * Reset the link, and let autonegotiation do
6102 * its thing
6103 */
6104 sc->sc_ctrl |= CTRL_LRST;
6105 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6106 delay(1000);
6107 sc->sc_ctrl &= ~CTRL_LRST;
6108 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6109 delay(1000);
6110 CSR_WRITE(sc, WMREG_TXCW,
6111 sc->sc_txcw & ~TXCW_ANE);
6112 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6113 }
6114 }
6115 }
6116
6117 wm_tbi_set_linkled(sc);
6118 }
6119
6120 /*
6121 * wm_gmii_reset:
6122 *
6123 * Reset the PHY.
6124 */
6125 static void
6126 wm_gmii_reset(struct wm_softc *sc)
6127 {
6128 uint32_t reg;
6129 int rv;
6130
6131 /* get phy semaphore */
6132 switch (sc->sc_type) {
6133 case WM_T_82571:
6134 case WM_T_82572:
6135 case WM_T_82573:
6136 case WM_T_82574:
6137 case WM_T_82583:
6138 /* XXX should get sw semaphore, too */
6139 rv = wm_get_swsm_semaphore(sc);
6140 break;
6141 case WM_T_82575:
6142 case WM_T_82576:
6143 case WM_T_82580:
6144 case WM_T_82580ER:
6145 case WM_T_I350:
6146 case WM_T_I210:
6147 case WM_T_I211:
6148 case WM_T_80003:
6149 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6150 break;
6151 case WM_T_ICH8:
6152 case WM_T_ICH9:
6153 case WM_T_ICH10:
6154 case WM_T_PCH:
6155 case WM_T_PCH2:
6156 case WM_T_PCH_LPT:
6157 rv = wm_get_swfwhw_semaphore(sc);
6158 break;
6159 default:
6160 /* nothing to do*/
6161 rv = 0;
6162 break;
6163 }
6164 if (rv != 0) {
6165 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6166 __func__);
6167 return;
6168 }
6169
6170 switch (sc->sc_type) {
6171 case WM_T_82542_2_0:
6172 case WM_T_82542_2_1:
6173 /* null */
6174 break;
6175 case WM_T_82543:
6176 /*
6177 * With 82543, we need to force speed and duplex on the MAC
6178 * equal to what the PHY speed and duplex configuration is.
6179 * In addition, we need to perform a hardware reset on the PHY
6180 * to take it out of reset.
6181 */
6182 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6183 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6184
6185 /* The PHY reset pin is active-low. */
6186 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6187 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6188 CTRL_EXT_SWDPIN(4));
6189 reg |= CTRL_EXT_SWDPIO(4);
6190
6191 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6192 delay(10*1000);
6193
6194 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6195 delay(150);
6196 #if 0
6197 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6198 #endif
6199 delay(20*1000); /* XXX extra delay to get PHY ID? */
6200 break;
6201 case WM_T_82544: /* reset 10000us */
6202 case WM_T_82540:
6203 case WM_T_82545:
6204 case WM_T_82545_3:
6205 case WM_T_82546:
6206 case WM_T_82546_3:
6207 case WM_T_82541:
6208 case WM_T_82541_2:
6209 case WM_T_82547:
6210 case WM_T_82547_2:
6211 case WM_T_82571: /* reset 100us */
6212 case WM_T_82572:
6213 case WM_T_82573:
6214 case WM_T_82574:
6215 case WM_T_82575:
6216 case WM_T_82576:
6217 case WM_T_82580:
6218 case WM_T_82580ER:
6219 case WM_T_I350:
6220 case WM_T_I210:
6221 case WM_T_I211:
6222 case WM_T_82583:
6223 case WM_T_80003:
6224 /* generic reset */
6225 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6226 delay(20000);
6227 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6228 delay(20000);
6229
6230 if ((sc->sc_type == WM_T_82541)
6231 || (sc->sc_type == WM_T_82541_2)
6232 || (sc->sc_type == WM_T_82547)
6233 || (sc->sc_type == WM_T_82547_2)) {
6234 /* workaround for igp are done in igp_reset() */
6235 /* XXX add code to set LED after phy reset */
6236 }
6237 break;
6238 case WM_T_ICH8:
6239 case WM_T_ICH9:
6240 case WM_T_ICH10:
6241 case WM_T_PCH:
6242 case WM_T_PCH2:
6243 case WM_T_PCH_LPT:
6244 /* generic reset */
6245 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6246 delay(100);
6247 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6248 delay(150);
6249 break;
6250 default:
6251 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6252 __func__);
6253 break;
6254 }
6255
6256 /* release PHY semaphore */
6257 switch (sc->sc_type) {
6258 case WM_T_82571:
6259 case WM_T_82572:
6260 case WM_T_82573:
6261 case WM_T_82574:
6262 case WM_T_82583:
6263 /* XXX should put sw semaphore, too */
6264 wm_put_swsm_semaphore(sc);
6265 break;
6266 case WM_T_82575:
6267 case WM_T_82576:
6268 case WM_T_82580:
6269 case WM_T_82580ER:
6270 case WM_T_I350:
6271 case WM_T_I210:
6272 case WM_T_I211:
6273 case WM_T_80003:
6274 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6275 break;
6276 case WM_T_ICH8:
6277 case WM_T_ICH9:
6278 case WM_T_ICH10:
6279 case WM_T_PCH:
6280 case WM_T_PCH2:
6281 case WM_T_PCH_LPT:
6282 wm_put_swfwhw_semaphore(sc);
6283 break;
6284 default:
6285 /* nothing to do*/
6286 rv = 0;
6287 break;
6288 }
6289
6290 /* get_cfg_done */
6291 wm_get_cfg_done(sc);
6292
6293 /* extra setup */
6294 switch (sc->sc_type) {
6295 case WM_T_82542_2_0:
6296 case WM_T_82542_2_1:
6297 case WM_T_82543:
6298 case WM_T_82544:
6299 case WM_T_82540:
6300 case WM_T_82545:
6301 case WM_T_82545_3:
6302 case WM_T_82546:
6303 case WM_T_82546_3:
6304 case WM_T_82541_2:
6305 case WM_T_82547_2:
6306 case WM_T_82571:
6307 case WM_T_82572:
6308 case WM_T_82573:
6309 case WM_T_82574:
6310 case WM_T_82575:
6311 case WM_T_82576:
6312 case WM_T_82580:
6313 case WM_T_82580ER:
6314 case WM_T_I350:
6315 case WM_T_I210:
6316 case WM_T_I211:
6317 case WM_T_82583:
6318 case WM_T_80003:
6319 /* null */
6320 break;
6321 case WM_T_82541:
6322 case WM_T_82547:
6323 /* XXX Configure actively LED after PHY reset */
6324 break;
6325 case WM_T_ICH8:
6326 case WM_T_ICH9:
6327 case WM_T_ICH10:
6328 case WM_T_PCH:
6329 case WM_T_PCH2:
6330 case WM_T_PCH_LPT:
6331 /* Allow time for h/w to get to a quiescent state afer reset */
6332 delay(10*1000);
6333
6334 if (sc->sc_type == WM_T_PCH)
6335 wm_hv_phy_workaround_ich8lan(sc);
6336
6337 if (sc->sc_type == WM_T_PCH2)
6338 wm_lv_phy_workaround_ich8lan(sc);
6339
6340 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6341 /*
6342 * dummy read to clear the phy wakeup bit after lcd
6343 * reset
6344 */
6345 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6346 }
6347
6348 /*
6349 * XXX Configure the LCD with th extended configuration region
6350 * in NVM
6351 */
6352
6353 /* Configure the LCD with the OEM bits in NVM */
6354 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6355 /*
6356 * Disable LPLU.
6357 * XXX It seems that 82567 has LPLU, too.
6358 */
6359 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6360 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6361 reg |= HV_OEM_BITS_ANEGNOW;
6362 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6363 }
6364 break;
6365 default:
6366 panic("%s: unknown type\n", __func__);
6367 break;
6368 }
6369 }
6370
6371 /*
6372 * wm_gmii_mediainit:
6373 *
6374 * Initialize media for use on 1000BASE-T devices.
6375 */
6376 static void
6377 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6378 {
6379 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6380 struct mii_data *mii = &sc->sc_mii;
6381
6382 /* We have MII. */
6383 sc->sc_flags |= WM_F_HAS_MII;
6384
6385 if (sc->sc_type == WM_T_80003)
6386 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6387 else
6388 sc->sc_tipg = TIPG_1000T_DFLT;
6389
6390 /*
6391 * Let the chip set speed/duplex on its own based on
6392 * signals from the PHY.
6393 * XXXbouyer - I'm not sure this is right for the 80003,
6394 * the em driver only sets CTRL_SLU here - but it seems to work.
6395 */
6396 sc->sc_ctrl |= CTRL_SLU;
6397 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6398
6399 /* Initialize our media structures and probe the GMII. */
6400 mii->mii_ifp = ifp;
6401
6402 /*
6403 * Determine the PHY access method.
6404 *
6405 * For SGMII, use SGMII specific method.
6406 *
6407 * For some devices, we can determine the PHY access method
6408 * from sc_type.
6409 *
6410 * For ICH8 variants, it's difficult to detemine the PHY access
6411 * method by sc_type, so use the PCI product ID for some devices.
6412 * For other ICH8 variants, try to use igp's method. If the PHY
6413 * can't detect, then use bm's method.
6414 */
6415 switch (prodid) {
6416 case PCI_PRODUCT_INTEL_PCH_M_LM:
6417 case PCI_PRODUCT_INTEL_PCH_M_LC:
6418 /* 82577 */
6419 sc->sc_phytype = WMPHY_82577;
6420 mii->mii_readreg = wm_gmii_hv_readreg;
6421 mii->mii_writereg = wm_gmii_hv_writereg;
6422 break;
6423 case PCI_PRODUCT_INTEL_PCH_D_DM:
6424 case PCI_PRODUCT_INTEL_PCH_D_DC:
6425 /* 82578 */
6426 sc->sc_phytype = WMPHY_82578;
6427 mii->mii_readreg = wm_gmii_hv_readreg;
6428 mii->mii_writereg = wm_gmii_hv_writereg;
6429 break;
6430 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6431 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6432 /* 82579 */
6433 sc->sc_phytype = WMPHY_82579;
6434 mii->mii_readreg = wm_gmii_hv_readreg;
6435 mii->mii_writereg = wm_gmii_hv_writereg;
6436 break;
6437 case PCI_PRODUCT_INTEL_82801I_BM:
6438 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6439 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6440 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6441 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6442 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6443 /* 82567 */
6444 sc->sc_phytype = WMPHY_BM;
6445 mii->mii_readreg = wm_gmii_bm_readreg;
6446 mii->mii_writereg = wm_gmii_bm_writereg;
6447 break;
6448 default:
6449 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6450 mii->mii_readreg = wm_sgmii_readreg;
6451 mii->mii_writereg = wm_sgmii_writereg;
6452 } else if (sc->sc_type >= WM_T_80003) {
6453 mii->mii_readreg = wm_gmii_i80003_readreg;
6454 mii->mii_writereg = wm_gmii_i80003_writereg;
6455 } else if (sc->sc_type >= WM_T_I210) {
6456 mii->mii_readreg = wm_gmii_i82544_readreg;
6457 mii->mii_writereg = wm_gmii_i82544_writereg;
6458 } else if (sc->sc_type >= WM_T_82580) {
6459 sc->sc_phytype = WMPHY_82580;
6460 mii->mii_readreg = wm_gmii_82580_readreg;
6461 mii->mii_writereg = wm_gmii_82580_writereg;
6462 } else if (sc->sc_type >= WM_T_82544) {
6463 mii->mii_readreg = wm_gmii_i82544_readreg;
6464 mii->mii_writereg = wm_gmii_i82544_writereg;
6465 } else {
6466 mii->mii_readreg = wm_gmii_i82543_readreg;
6467 mii->mii_writereg = wm_gmii_i82543_writereg;
6468 }
6469 break;
6470 }
6471 mii->mii_statchg = wm_gmii_statchg;
6472
6473 wm_gmii_reset(sc);
6474
6475 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6476 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6477 wm_gmii_mediastatus);
6478
6479 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6480 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6481 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6482 || (sc->sc_type == WM_T_I211)) {
6483 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6484 /* Attach only one port */
6485 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6486 MII_OFFSET_ANY, MIIF_DOPAUSE);
6487 } else {
6488 int i;
6489 uint32_t ctrl_ext;
6490
6491 /* Power on sgmii phy if it is disabled */
6492 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6493 CSR_WRITE(sc, WMREG_CTRL_EXT,
6494 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6495 CSR_WRITE_FLUSH(sc);
6496 delay(300*1000); /* XXX too long */
6497
6498 /* from 1 to 8 */
6499 for (i = 1; i < 8; i++)
6500 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6501 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6502
6503 /* restore previous sfp cage power state */
6504 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6505 }
6506 } else {
6507 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6508 MII_OFFSET_ANY, MIIF_DOPAUSE);
6509 }
6510
6511 /*
6512 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6513 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6514 */
6515 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6516 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6517 wm_set_mdio_slow_mode_hv(sc);
6518 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6519 MII_OFFSET_ANY, MIIF_DOPAUSE);
6520 }
6521
6522 /*
6523 * (For ICH8 variants)
6524 * If PHY detection failed, use BM's r/w function and retry.
6525 */
6526 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6527 /* if failed, retry with *_bm_* */
6528 mii->mii_readreg = wm_gmii_bm_readreg;
6529 mii->mii_writereg = wm_gmii_bm_writereg;
6530
6531 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6532 MII_OFFSET_ANY, MIIF_DOPAUSE);
6533 }
6534
6535 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6536 /* Any PHY wasn't find */
6537 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6538 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6539 sc->sc_phytype = WMPHY_NONE;
6540 } else {
6541 /*
6542 * PHY Found!
6543 * Check PHY type.
6544 */
6545 uint32_t model;
6546 struct mii_softc *child;
6547
6548 child = LIST_FIRST(&mii->mii_phys);
6549 if (device_is_a(child->mii_dev, "igphy")) {
6550 struct igphy_softc *isc = (struct igphy_softc *)child;
6551
6552 model = isc->sc_mii.mii_mpd_model;
6553 if (model == MII_MODEL_yyINTEL_I82566)
6554 sc->sc_phytype = WMPHY_IGP_3;
6555 }
6556
6557 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6558 }
6559 }
6560
6561 /*
6562 * wm_gmii_mediastatus: [ifmedia interface function]
6563 *
6564 * Get the current interface media status on a 1000BASE-T device.
6565 */
6566 static void
6567 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6568 {
6569 struct wm_softc *sc = ifp->if_softc;
6570
6571 ether_mediastatus(ifp, ifmr);
6572 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6573 | sc->sc_flowflags;
6574 }
6575
6576 /*
6577 * wm_gmii_mediachange: [ifmedia interface function]
6578 *
6579 * Set hardware to newly-selected media on a 1000BASE-T device.
6580 */
6581 static int
6582 wm_gmii_mediachange(struct ifnet *ifp)
6583 {
6584 struct wm_softc *sc = ifp->if_softc;
6585 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6586 int rc;
6587
6588 if ((ifp->if_flags & IFF_UP) == 0)
6589 return 0;
6590
6591 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6592 sc->sc_ctrl |= CTRL_SLU;
6593 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6594 || (sc->sc_type > WM_T_82543)) {
6595 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6596 } else {
6597 sc->sc_ctrl &= ~CTRL_ASDE;
6598 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6599 if (ife->ifm_media & IFM_FDX)
6600 sc->sc_ctrl |= CTRL_FD;
6601 switch (IFM_SUBTYPE(ife->ifm_media)) {
6602 case IFM_10_T:
6603 sc->sc_ctrl |= CTRL_SPEED_10;
6604 break;
6605 case IFM_100_TX:
6606 sc->sc_ctrl |= CTRL_SPEED_100;
6607 break;
6608 case IFM_1000_T:
6609 sc->sc_ctrl |= CTRL_SPEED_1000;
6610 break;
6611 default:
6612 panic("wm_gmii_mediachange: bad media 0x%x",
6613 ife->ifm_media);
6614 }
6615 }
6616 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6617 if (sc->sc_type <= WM_T_82543)
6618 wm_gmii_reset(sc);
6619
6620 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6621 return 0;
6622 return rc;
6623 }
6624
6625 #define MDI_IO CTRL_SWDPIN(2)
6626 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6627 #define MDI_CLK CTRL_SWDPIN(3)
6628
6629 static void
6630 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6631 {
6632 uint32_t i, v;
6633
6634 v = CSR_READ(sc, WMREG_CTRL);
6635 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6636 v |= MDI_DIR | CTRL_SWDPIO(3);
6637
6638 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6639 if (data & i)
6640 v |= MDI_IO;
6641 else
6642 v &= ~MDI_IO;
6643 CSR_WRITE(sc, WMREG_CTRL, v);
6644 delay(10);
6645 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6646 delay(10);
6647 CSR_WRITE(sc, WMREG_CTRL, v);
6648 delay(10);
6649 }
6650 }
6651
6652 static uint32_t
6653 i82543_mii_recvbits(struct wm_softc *sc)
6654 {
6655 uint32_t v, i, data = 0;
6656
6657 v = CSR_READ(sc, WMREG_CTRL);
6658 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6659 v |= CTRL_SWDPIO(3);
6660
6661 CSR_WRITE(sc, WMREG_CTRL, v);
6662 delay(10);
6663 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6664 delay(10);
6665 CSR_WRITE(sc, WMREG_CTRL, v);
6666 delay(10);
6667
6668 for (i = 0; i < 16; i++) {
6669 data <<= 1;
6670 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6671 delay(10);
6672 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6673 data |= 1;
6674 CSR_WRITE(sc, WMREG_CTRL, v);
6675 delay(10);
6676 }
6677
6678 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6679 delay(10);
6680 CSR_WRITE(sc, WMREG_CTRL, v);
6681 delay(10);
6682
6683 return data;
6684 }
6685
6686 #undef MDI_IO
6687 #undef MDI_DIR
6688 #undef MDI_CLK
6689
6690 /*
6691 * wm_gmii_i82543_readreg: [mii interface function]
6692 *
6693 * Read a PHY register on the GMII (i82543 version).
6694 */
6695 static int
6696 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6697 {
6698 struct wm_softc *sc = device_private(self);
6699 int rv;
6700
6701 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6702 i82543_mii_sendbits(sc, reg | (phy << 5) |
6703 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6704 rv = i82543_mii_recvbits(sc) & 0xffff;
6705
6706 DPRINTF(WM_DEBUG_GMII,
6707 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6708 device_xname(sc->sc_dev), phy, reg, rv));
6709
6710 return rv;
6711 }
6712
6713 /*
6714 * wm_gmii_i82543_writereg: [mii interface function]
6715 *
6716 * Write a PHY register on the GMII (i82543 version).
6717 */
6718 static void
6719 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6720 {
6721 struct wm_softc *sc = device_private(self);
6722
6723 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6724 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6725 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6726 (MII_COMMAND_START << 30), 32);
6727 }
6728
6729 /*
6730 * wm_gmii_i82544_readreg: [mii interface function]
6731 *
6732 * Read a PHY register on the GMII.
6733 */
6734 static int
6735 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6736 {
6737 struct wm_softc *sc = device_private(self);
6738 uint32_t mdic = 0;
6739 int i, rv;
6740
6741 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6742 MDIC_REGADD(reg));
6743
6744 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6745 mdic = CSR_READ(sc, WMREG_MDIC);
6746 if (mdic & MDIC_READY)
6747 break;
6748 delay(50);
6749 }
6750
6751 if ((mdic & MDIC_READY) == 0) {
6752 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6753 device_xname(sc->sc_dev), phy, reg);
6754 rv = 0;
6755 } else if (mdic & MDIC_E) {
6756 #if 0 /* This is normal if no PHY is present. */
6757 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6758 device_xname(sc->sc_dev), phy, reg);
6759 #endif
6760 rv = 0;
6761 } else {
6762 rv = MDIC_DATA(mdic);
6763 if (rv == 0xffff)
6764 rv = 0;
6765 }
6766
6767 return rv;
6768 }
6769
6770 /*
6771 * wm_gmii_i82544_writereg: [mii interface function]
6772 *
6773 * Write a PHY register on the GMII.
6774 */
6775 static void
6776 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6777 {
6778 struct wm_softc *sc = device_private(self);
6779 uint32_t mdic = 0;
6780 int i;
6781
6782 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6783 MDIC_REGADD(reg) | MDIC_DATA(val));
6784
6785 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6786 mdic = CSR_READ(sc, WMREG_MDIC);
6787 if (mdic & MDIC_READY)
6788 break;
6789 delay(50);
6790 }
6791
6792 if ((mdic & MDIC_READY) == 0)
6793 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6794 device_xname(sc->sc_dev), phy, reg);
6795 else if (mdic & MDIC_E)
6796 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6797 device_xname(sc->sc_dev), phy, reg);
6798 }
6799
6800 /*
6801 * wm_gmii_i80003_readreg: [mii interface function]
6802 *
6803 * Read a PHY register on the kumeran
6804 * This could be handled by the PHY layer if we didn't have to lock the
6805 * ressource ...
6806 */
6807 static int
6808 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6809 {
6810 struct wm_softc *sc = device_private(self);
6811 int sem;
6812 int rv;
6813
6814 if (phy != 1) /* only one PHY on kumeran bus */
6815 return 0;
6816
6817 sem = swfwphysem[sc->sc_funcid];
6818 if (wm_get_swfw_semaphore(sc, sem)) {
6819 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6820 __func__);
6821 return 0;
6822 }
6823
6824 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6825 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6826 reg >> GG82563_PAGE_SHIFT);
6827 } else {
6828 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6829 reg >> GG82563_PAGE_SHIFT);
6830 }
6831 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6832 delay(200);
6833 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6834 delay(200);
6835
6836 wm_put_swfw_semaphore(sc, sem);
6837 return rv;
6838 }
6839
6840 /*
6841 * wm_gmii_i80003_writereg: [mii interface function]
6842 *
6843 * Write a PHY register on the kumeran.
6844 * This could be handled by the PHY layer if we didn't have to lock the
6845 * ressource ...
6846 */
6847 static void
6848 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6849 {
6850 struct wm_softc *sc = device_private(self);
6851 int sem;
6852
6853 if (phy != 1) /* only one PHY on kumeran bus */
6854 return;
6855
6856 sem = swfwphysem[sc->sc_funcid];
6857 if (wm_get_swfw_semaphore(sc, sem)) {
6858 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6859 __func__);
6860 return;
6861 }
6862
6863 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6864 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6865 reg >> GG82563_PAGE_SHIFT);
6866 } else {
6867 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6868 reg >> GG82563_PAGE_SHIFT);
6869 }
6870 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6871 delay(200);
6872 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6873 delay(200);
6874
6875 wm_put_swfw_semaphore(sc, sem);
6876 }
6877
6878 /*
6879 * wm_gmii_bm_readreg: [mii interface function]
6880 *
6881 * Read a PHY register on the kumeran
6882 * This could be handled by the PHY layer if we didn't have to lock the
6883 * ressource ...
6884 */
6885 static int
6886 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6887 {
6888 struct wm_softc *sc = device_private(self);
6889 int sem;
6890 int rv;
6891
6892 sem = swfwphysem[sc->sc_funcid];
6893 if (wm_get_swfw_semaphore(sc, sem)) {
6894 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6895 __func__);
6896 return 0;
6897 }
6898
6899 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6900 if (phy == 1)
6901 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6902 reg);
6903 else
6904 wm_gmii_i82544_writereg(self, phy,
6905 GG82563_PHY_PAGE_SELECT,
6906 reg >> GG82563_PAGE_SHIFT);
6907 }
6908
6909 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6910 wm_put_swfw_semaphore(sc, sem);
6911 return rv;
6912 }
6913
6914 /*
6915 * wm_gmii_bm_writereg: [mii interface function]
6916 *
6917 * Write a PHY register on the kumeran.
6918 * This could be handled by the PHY layer if we didn't have to lock the
6919 * ressource ...
6920 */
6921 static void
6922 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6923 {
6924 struct wm_softc *sc = device_private(self);
6925 int sem;
6926
6927 sem = swfwphysem[sc->sc_funcid];
6928 if (wm_get_swfw_semaphore(sc, sem)) {
6929 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6930 __func__);
6931 return;
6932 }
6933
6934 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6935 if (phy == 1)
6936 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6937 reg);
6938 else
6939 wm_gmii_i82544_writereg(self, phy,
6940 GG82563_PHY_PAGE_SELECT,
6941 reg >> GG82563_PAGE_SHIFT);
6942 }
6943
6944 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6945 wm_put_swfw_semaphore(sc, sem);
6946 }
6947
6948 static void
6949 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6950 {
6951 struct wm_softc *sc = device_private(self);
6952 uint16_t regnum = BM_PHY_REG_NUM(offset);
6953 uint16_t wuce;
6954
6955 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6956 if (sc->sc_type == WM_T_PCH) {
6957 /* XXX e1000 driver do nothing... why? */
6958 }
6959
6960 /* Set page 769 */
6961 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6962 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6963
6964 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6965
6966 wuce &= ~BM_WUC_HOST_WU_BIT;
6967 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6968 wuce | BM_WUC_ENABLE_BIT);
6969
6970 /* Select page 800 */
6971 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6972 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6973
6974 /* Write page 800 */
6975 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6976
6977 if (rd)
6978 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6979 else
6980 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6981
6982 /* Set page 769 */
6983 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6984 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6985
6986 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6987 }
6988
6989 /*
6990 * wm_gmii_hv_readreg: [mii interface function]
6991 *
6992 * Read a PHY register on the kumeran
6993 * This could be handled by the PHY layer if we didn't have to lock the
6994 * ressource ...
6995 */
6996 static int
6997 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6998 {
6999 struct wm_softc *sc = device_private(self);
7000 uint16_t page = BM_PHY_REG_PAGE(reg);
7001 uint16_t regnum = BM_PHY_REG_NUM(reg);
7002 uint16_t val;
7003 int rv;
7004
7005 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
7006 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7007 __func__);
7008 return 0;
7009 }
7010
7011 /* XXX Workaround failure in MDIO access while cable is disconnected */
7012 if (sc->sc_phytype == WMPHY_82577) {
7013 /* XXX must write */
7014 }
7015
7016 /* Page 800 works differently than the rest so it has its own func */
7017 if (page == BM_WUC_PAGE) {
7018 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7019 return val;
7020 }
7021
7022 /*
7023 * Lower than page 768 works differently than the rest so it has its
7024 * own func
7025 */
7026 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7027 printf("gmii_hv_readreg!!!\n");
7028 return 0;
7029 }
7030
7031 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7032 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7033 page << BME1000_PAGE_SHIFT);
7034 }
7035
7036 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7037 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7038 return rv;
7039 }
7040
7041 /*
7042 * wm_gmii_hv_writereg: [mii interface function]
7043 *
7044 * Write a PHY register on the kumeran.
7045 * This could be handled by the PHY layer if we didn't have to lock the
7046 * ressource ...
7047 */
7048 static void
7049 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7050 {
7051 struct wm_softc *sc = device_private(self);
7052 uint16_t page = BM_PHY_REG_PAGE(reg);
7053 uint16_t regnum = BM_PHY_REG_NUM(reg);
7054
7055 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
7056 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7057 __func__);
7058 return;
7059 }
7060
7061 /* XXX Workaround failure in MDIO access while cable is disconnected */
7062
7063 /* Page 800 works differently than the rest so it has its own func */
7064 if (page == BM_WUC_PAGE) {
7065 uint16_t tmp;
7066
7067 tmp = val;
7068 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7069 return;
7070 }
7071
7072 /*
7073 * Lower than page 768 works differently than the rest so it has its
7074 * own func
7075 */
7076 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7077 printf("gmii_hv_writereg!!!\n");
7078 return;
7079 }
7080
7081 /*
7082 * XXX Workaround MDIO accesses being disabled after entering IEEE
7083 * Power Down (whenever bit 11 of the PHY control register is set)
7084 */
7085
7086 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7087 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7088 page << BME1000_PAGE_SHIFT);
7089 }
7090
7091 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7092 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7093 }
7094
7095 /*
7096 * wm_sgmii_readreg: [mii interface function]
7097 *
7098 * Read a PHY register on the SGMII
7099 * This could be handled by the PHY layer if we didn't have to lock the
7100 * ressource ...
7101 */
7102 static int
7103 wm_sgmii_readreg(device_t self, int phy, int reg)
7104 {
7105 struct wm_softc *sc = device_private(self);
7106 uint32_t i2ccmd;
7107 int i, rv;
7108
7109 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7110 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7111 __func__);
7112 return 0;
7113 }
7114
7115 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7116 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7117 | I2CCMD_OPCODE_READ;
7118 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7119
7120 /* Poll the ready bit */
7121 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7122 delay(50);
7123 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7124 if (i2ccmd & I2CCMD_READY)
7125 break;
7126 }
7127 if ((i2ccmd & I2CCMD_READY) == 0)
7128 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7129 if ((i2ccmd & I2CCMD_ERROR) != 0)
7130 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7131
7132 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7133
7134 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7135 return rv;
7136 }
7137
7138 /*
7139 * wm_sgmii_writereg: [mii interface function]
7140 *
7141 * Write a PHY register on the SGMII.
7142 * This could be handled by the PHY layer if we didn't have to lock the
7143 * ressource ...
7144 */
7145 static void
7146 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7147 {
7148 struct wm_softc *sc = device_private(self);
7149 uint32_t i2ccmd;
7150 int i;
7151
7152 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7153 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7154 __func__);
7155 return;
7156 }
7157
7158 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7159 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7160 | I2CCMD_OPCODE_WRITE;
7161 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7162
7163 /* Poll the ready bit */
7164 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7165 delay(50);
7166 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7167 if (i2ccmd & I2CCMD_READY)
7168 break;
7169 }
7170 if ((i2ccmd & I2CCMD_READY) == 0)
7171 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7172 if ((i2ccmd & I2CCMD_ERROR) != 0)
7173 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7174
7175 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7176 }
7177
7178 /*
7179 * wm_gmii_82580_readreg: [mii interface function]
7180 *
7181 * Read a PHY register on the 82580 and I350.
7182 * This could be handled by the PHY layer if we didn't have to lock the
7183 * ressource ...
7184 */
7185 static int
7186 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7187 {
7188 struct wm_softc *sc = device_private(self);
7189 int sem;
7190 int rv;
7191
7192 sem = swfwphysem[sc->sc_funcid];
7193 if (wm_get_swfw_semaphore(sc, sem)) {
7194 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7195 __func__);
7196 return 0;
7197 }
7198
7199 rv = wm_gmii_i82544_readreg(self, phy, reg);
7200
7201 wm_put_swfw_semaphore(sc, sem);
7202 return rv;
7203 }
7204
7205 /*
7206 * wm_gmii_82580_writereg: [mii interface function]
7207 *
7208 * Write a PHY register on the 82580 and I350.
7209 * This could be handled by the PHY layer if we didn't have to lock the
7210 * ressource ...
7211 */
7212 static void
7213 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7214 {
7215 struct wm_softc *sc = device_private(self);
7216 int sem;
7217
7218 sem = swfwphysem[sc->sc_funcid];
7219 if (wm_get_swfw_semaphore(sc, sem)) {
7220 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7221 __func__);
7222 return;
7223 }
7224
7225 wm_gmii_i82544_writereg(self, phy, reg, val);
7226
7227 wm_put_swfw_semaphore(sc, sem);
7228 }
7229
7230 /*
7231 * wm_gmii_statchg: [mii interface function]
7232 *
7233 * Callback from MII layer when media changes.
7234 */
7235 static void
7236 wm_gmii_statchg(struct ifnet *ifp)
7237 {
7238 struct wm_softc *sc = ifp->if_softc;
7239 struct mii_data *mii = &sc->sc_mii;
7240
7241 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7242 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7243 sc->sc_fcrtl &= ~FCRTL_XONE;
7244
7245 /*
7246 * Get flow control negotiation result.
7247 */
7248 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7249 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7250 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7251 mii->mii_media_active &= ~IFM_ETH_FMASK;
7252 }
7253
7254 if (sc->sc_flowflags & IFM_FLOW) {
7255 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7256 sc->sc_ctrl |= CTRL_TFCE;
7257 sc->sc_fcrtl |= FCRTL_XONE;
7258 }
7259 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7260 sc->sc_ctrl |= CTRL_RFCE;
7261 }
7262
7263 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7264 DPRINTF(WM_DEBUG_LINK,
7265 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7266 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7267 } else {
7268 DPRINTF(WM_DEBUG_LINK,
7269 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7270 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7271 }
7272
7273 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7274 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7275 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7276 : WMREG_FCRTL, sc->sc_fcrtl);
7277 if (sc->sc_type == WM_T_80003) {
7278 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7279 case IFM_1000_T:
7280 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7281 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7282 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7283 break;
7284 default:
7285 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7286 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7287 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7288 break;
7289 }
7290 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7291 }
7292 }
7293
7294 /*
7295 * wm_kmrn_readreg:
7296 *
7297 * Read a kumeran register
7298 */
7299 static int
7300 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7301 {
7302 int rv;
7303
7304 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7305 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7306 aprint_error_dev(sc->sc_dev,
7307 "%s: failed to get semaphore\n", __func__);
7308 return 0;
7309 }
7310 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7311 if (wm_get_swfwhw_semaphore(sc)) {
7312 aprint_error_dev(sc->sc_dev,
7313 "%s: failed to get semaphore\n", __func__);
7314 return 0;
7315 }
7316 }
7317
7318 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7319 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7320 KUMCTRLSTA_REN);
7321 delay(2);
7322
7323 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7324
7325 if (sc->sc_flags == WM_F_SWFW_SYNC)
7326 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7327 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7328 wm_put_swfwhw_semaphore(sc);
7329
7330 return rv;
7331 }
7332
7333 /*
7334 * wm_kmrn_writereg:
7335 *
7336 * Write a kumeran register
7337 */
7338 static void
7339 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7340 {
7341
7342 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7343 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7344 aprint_error_dev(sc->sc_dev,
7345 "%s: failed to get semaphore\n", __func__);
7346 return;
7347 }
7348 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7349 if (wm_get_swfwhw_semaphore(sc)) {
7350 aprint_error_dev(sc->sc_dev,
7351 "%s: failed to get semaphore\n", __func__);
7352 return;
7353 }
7354 }
7355
7356 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7357 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7358 (val & KUMCTRLSTA_MASK));
7359
7360 if (sc->sc_flags == WM_F_SWFW_SYNC)
7361 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7362 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7363 wm_put_swfwhw_semaphore(sc);
7364 }
7365
7366 static int
7367 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7368 {
7369 uint32_t eecd = 0;
7370
7371 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7372 || sc->sc_type == WM_T_82583) {
7373 eecd = CSR_READ(sc, WMREG_EECD);
7374
7375 /* Isolate bits 15 & 16 */
7376 eecd = ((eecd >> 15) & 0x03);
7377
7378 /* If both bits are set, device is Flash type */
7379 if (eecd == 0x03)
7380 return 0;
7381 }
7382 return 1;
7383 }
7384
7385 static int
7386 wm_get_swsm_semaphore(struct wm_softc *sc)
7387 {
7388 int32_t timeout;
7389 uint32_t swsm;
7390
7391 /* Get the FW semaphore. */
7392 timeout = 1000 + 1; /* XXX */
7393 while (timeout) {
7394 swsm = CSR_READ(sc, WMREG_SWSM);
7395 swsm |= SWSM_SWESMBI;
7396 CSR_WRITE(sc, WMREG_SWSM, swsm);
7397 /* if we managed to set the bit we got the semaphore. */
7398 swsm = CSR_READ(sc, WMREG_SWSM);
7399 if (swsm & SWSM_SWESMBI)
7400 break;
7401
7402 delay(50);
7403 timeout--;
7404 }
7405
7406 if (timeout == 0) {
7407 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7408 /* Release semaphores */
7409 wm_put_swsm_semaphore(sc);
7410 return 1;
7411 }
7412 return 0;
7413 }
7414
7415 static void
7416 wm_put_swsm_semaphore(struct wm_softc *sc)
7417 {
7418 uint32_t swsm;
7419
7420 swsm = CSR_READ(sc, WMREG_SWSM);
7421 swsm &= ~(SWSM_SWESMBI);
7422 CSR_WRITE(sc, WMREG_SWSM, swsm);
7423 }
7424
7425 static int
7426 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7427 {
7428 uint32_t swfw_sync;
7429 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7430 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7431 int timeout = 200;
7432
7433 for (timeout = 0; timeout < 200; timeout++) {
7434 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7435 if (wm_get_swsm_semaphore(sc)) {
7436 aprint_error_dev(sc->sc_dev,
7437 "%s: failed to get semaphore\n",
7438 __func__);
7439 return 1;
7440 }
7441 }
7442 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7443 if ((swfw_sync & (swmask | fwmask)) == 0) {
7444 swfw_sync |= swmask;
7445 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7446 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7447 wm_put_swsm_semaphore(sc);
7448 return 0;
7449 }
7450 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7451 wm_put_swsm_semaphore(sc);
7452 delay(5000);
7453 }
7454 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7455 device_xname(sc->sc_dev), mask, swfw_sync);
7456 return 1;
7457 }
7458
7459 static void
7460 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7461 {
7462 uint32_t swfw_sync;
7463
7464 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7465 while (wm_get_swsm_semaphore(sc) != 0)
7466 continue;
7467 }
7468 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7469 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7470 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7471 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7472 wm_put_swsm_semaphore(sc);
7473 }
7474
7475 static int
7476 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7477 {
7478 uint32_t ext_ctrl;
7479 int timeout = 200;
7480
7481 for (timeout = 0; timeout < 200; timeout++) {
7482 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7483 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7484 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7485
7486 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7487 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7488 return 0;
7489 delay(5000);
7490 }
7491 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7492 device_xname(sc->sc_dev), ext_ctrl);
7493 return 1;
7494 }
7495
7496 static void
7497 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7498 {
7499 uint32_t ext_ctrl;
7500 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7501 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7502 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7503 }
7504
7505 static int
7506 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7507 {
7508 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7509 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7510
7511 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7512 /* Value of bit 22 corresponds to the flash bank we're on. */
7513 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7514 } else {
7515 uint8_t sig_byte;
7516 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7517 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE)
7518 *bank = 0;
7519 else {
7520 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7521 &sig_byte);
7522 if ((sig_byte & ICH_NVM_VALID_SIG_MASK)
7523 == ICH_NVM_SIG_VALUE)
7524 *bank = 1;
7525 else {
7526 aprint_error_dev(sc->sc_dev,
7527 "EEPROM not present\n");
7528 return -1;
7529 }
7530 }
7531 }
7532
7533 return 0;
7534 }
7535
7536 /******************************************************************************
7537 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7538 * register.
7539 *
7540 * sc - Struct containing variables accessed by shared code
7541 * offset - offset of word in the EEPROM to read
7542 * data - word read from the EEPROM
7543 * words - number of words to read
7544 *****************************************************************************/
7545 static int
7546 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7547 {
7548 int32_t error = 0;
7549 uint32_t flash_bank = 0;
7550 uint32_t act_offset = 0;
7551 uint32_t bank_offset = 0;
7552 uint16_t word = 0;
7553 uint16_t i = 0;
7554
7555 /* We need to know which is the valid flash bank. In the event
7556 * that we didn't allocate eeprom_shadow_ram, we may not be
7557 * managing flash_bank. So it cannot be trusted and needs
7558 * to be updated with each read.
7559 */
7560 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7561 if (error) {
7562 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7563 __func__);
7564 return error;
7565 }
7566
7567 /*
7568 * Adjust offset appropriately if we're on bank 1 - adjust for word
7569 * size
7570 */
7571 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7572
7573 error = wm_get_swfwhw_semaphore(sc);
7574 if (error) {
7575 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7576 __func__);
7577 return error;
7578 }
7579
7580 for (i = 0; i < words; i++) {
7581 /* The NVM part needs a byte offset, hence * 2 */
7582 act_offset = bank_offset + ((offset + i) * 2);
7583 error = wm_read_ich8_word(sc, act_offset, &word);
7584 if (error) {
7585 aprint_error_dev(sc->sc_dev,
7586 "%s: failed to read NVM\n", __func__);
7587 break;
7588 }
7589 data[i] = word;
7590 }
7591
7592 wm_put_swfwhw_semaphore(sc);
7593 return error;
7594 }
7595
7596 /******************************************************************************
7597 * This function does initial flash setup so that a new read/write/erase cycle
7598 * can be started.
7599 *
7600 * sc - The pointer to the hw structure
7601 ****************************************************************************/
7602 static int32_t
7603 wm_ich8_cycle_init(struct wm_softc *sc)
7604 {
7605 uint16_t hsfsts;
7606 int32_t error = 1;
7607 int32_t i = 0;
7608
7609 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7610
7611 /* May be check the Flash Des Valid bit in Hw status */
7612 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7613 return error;
7614 }
7615
7616 /* Clear FCERR in Hw status by writing 1 */
7617 /* Clear DAEL in Hw status by writing a 1 */
7618 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7619
7620 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7621
7622 /*
7623 * Either we should have a hardware SPI cycle in progress bit to check
7624 * against, in order to start a new cycle or FDONE bit should be
7625 * changed in the hardware so that it is 1 after harware reset, which
7626 * can then be used as an indication whether a cycle is in progress or
7627 * has been completed .. we should also have some software semaphore
7628 * mechanism to guard FDONE or the cycle in progress bit so that two
7629 * threads access to those bits can be sequentiallized or a way so that
7630 * 2 threads dont start the cycle at the same time
7631 */
7632
7633 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7634 /*
7635 * There is no cycle running at present, so we can start a
7636 * cycle
7637 */
7638
7639 /* Begin by setting Flash Cycle Done. */
7640 hsfsts |= HSFSTS_DONE;
7641 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7642 error = 0;
7643 } else {
7644 /*
7645 * otherwise poll for sometime so the current cycle has a
7646 * chance to end before giving up.
7647 */
7648 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7649 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7650 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7651 error = 0;
7652 break;
7653 }
7654 delay(1);
7655 }
7656 if (error == 0) {
7657 /*
7658 * Successful in waiting for previous cycle to timeout,
7659 * now set the Flash Cycle Done.
7660 */
7661 hsfsts |= HSFSTS_DONE;
7662 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7663 }
7664 }
7665 return error;
7666 }
7667
7668 /******************************************************************************
7669 * This function starts a flash cycle and waits for its completion
7670 *
7671 * sc - The pointer to the hw structure
7672 ****************************************************************************/
7673 static int32_t
7674 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7675 {
7676 uint16_t hsflctl;
7677 uint16_t hsfsts;
7678 int32_t error = 1;
7679 uint32_t i = 0;
7680
7681 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7682 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7683 hsflctl |= HSFCTL_GO;
7684 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7685
7686 /* wait till FDONE bit is set to 1 */
7687 do {
7688 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7689 if (hsfsts & HSFSTS_DONE)
7690 break;
7691 delay(1);
7692 i++;
7693 } while (i < timeout);
7694 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7695 error = 0;
7696
7697 return error;
7698 }
7699
7700 /******************************************************************************
7701 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7702 *
7703 * sc - The pointer to the hw structure
7704 * index - The index of the byte or word to read.
7705 * size - Size of data to read, 1=byte 2=word
7706 * data - Pointer to the word to store the value read.
7707 *****************************************************************************/
7708 static int32_t
7709 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7710 uint32_t size, uint16_t* data)
7711 {
7712 uint16_t hsfsts;
7713 uint16_t hsflctl;
7714 uint32_t flash_linear_address;
7715 uint32_t flash_data = 0;
7716 int32_t error = 1;
7717 int32_t count = 0;
7718
7719 if (size < 1 || size > 2 || data == 0x0 ||
7720 index > ICH_FLASH_LINEAR_ADDR_MASK)
7721 return error;
7722
7723 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7724 sc->sc_ich8_flash_base;
7725
7726 do {
7727 delay(1);
7728 /* Steps */
7729 error = wm_ich8_cycle_init(sc);
7730 if (error)
7731 break;
7732
7733 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7734 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7735 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7736 & HSFCTL_BCOUNT_MASK;
7737 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7738 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7739
7740 /*
7741 * Write the last 24 bits of index into Flash Linear address
7742 * field in Flash Address
7743 */
7744 /* TODO: TBD maybe check the index against the size of flash */
7745
7746 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7747
7748 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7749
7750 /*
7751 * Check if FCERR is set to 1, if set to 1, clear it and try
7752 * the whole sequence a few more times, else read in (shift in)
7753 * the Flash Data0, the order is least significant byte first
7754 * msb to lsb
7755 */
7756 if (error == 0) {
7757 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7758 if (size == 1)
7759 *data = (uint8_t)(flash_data & 0x000000FF);
7760 else if (size == 2)
7761 *data = (uint16_t)(flash_data & 0x0000FFFF);
7762 break;
7763 } else {
7764 /*
7765 * If we've gotten here, then things are probably
7766 * completely hosed, but if the error condition is
7767 * detected, it won't hurt to give it another try...
7768 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7769 */
7770 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7771 if (hsfsts & HSFSTS_ERR) {
7772 /* Repeat for some time before giving up. */
7773 continue;
7774 } else if ((hsfsts & HSFSTS_DONE) == 0)
7775 break;
7776 }
7777 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7778
7779 return error;
7780 }
7781
7782 /******************************************************************************
7783 * Reads a single byte from the NVM using the ICH8 flash access registers.
7784 *
7785 * sc - pointer to wm_hw structure
7786 * index - The index of the byte to read.
7787 * data - Pointer to a byte to store the value read.
7788 *****************************************************************************/
7789 static int32_t
7790 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7791 {
7792 int32_t status;
7793 uint16_t word = 0;
7794
7795 status = wm_read_ich8_data(sc, index, 1, &word);
7796 if (status == 0)
7797 *data = (uint8_t)word;
7798 else
7799 *data = 0;
7800
7801 return status;
7802 }
7803
7804 /******************************************************************************
7805 * Reads a word from the NVM using the ICH8 flash access registers.
7806 *
7807 * sc - pointer to wm_hw structure
7808 * index - The starting byte index of the word to read.
7809 * data - Pointer to a word to store the value read.
7810 *****************************************************************************/
7811 static int32_t
7812 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7813 {
7814 int32_t status;
7815
7816 status = wm_read_ich8_data(sc, index, 2, data);
7817 return status;
7818 }
7819
7820 static int
7821 wm_check_mng_mode(struct wm_softc *sc)
7822 {
7823 int rv;
7824
7825 switch (sc->sc_type) {
7826 case WM_T_ICH8:
7827 case WM_T_ICH9:
7828 case WM_T_ICH10:
7829 case WM_T_PCH:
7830 case WM_T_PCH2:
7831 case WM_T_PCH_LPT:
7832 rv = wm_check_mng_mode_ich8lan(sc);
7833 break;
7834 case WM_T_82574:
7835 case WM_T_82583:
7836 rv = wm_check_mng_mode_82574(sc);
7837 break;
7838 case WM_T_82571:
7839 case WM_T_82572:
7840 case WM_T_82573:
7841 case WM_T_80003:
7842 rv = wm_check_mng_mode_generic(sc);
7843 break;
7844 default:
7845 /* noting to do */
7846 rv = 0;
7847 break;
7848 }
7849
7850 return rv;
7851 }
7852
7853 static int
7854 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7855 {
7856 uint32_t fwsm;
7857
7858 fwsm = CSR_READ(sc, WMREG_FWSM);
7859
7860 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7861 return 1;
7862
7863 return 0;
7864 }
7865
7866 static int
7867 wm_check_mng_mode_82574(struct wm_softc *sc)
7868 {
7869 uint16_t data;
7870
7871 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7872
7873 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7874 return 1;
7875
7876 return 0;
7877 }
7878
7879 static int
7880 wm_check_mng_mode_generic(struct wm_softc *sc)
7881 {
7882 uint32_t fwsm;
7883
7884 fwsm = CSR_READ(sc, WMREG_FWSM);
7885
7886 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7887 return 1;
7888
7889 return 0;
7890 }
7891
7892 static int
7893 wm_enable_mng_pass_thru(struct wm_softc *sc)
7894 {
7895 uint32_t manc, fwsm, factps;
7896
7897 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7898 return 0;
7899
7900 manc = CSR_READ(sc, WMREG_MANC);
7901
7902 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7903 device_xname(sc->sc_dev), manc));
7904 if (((manc & MANC_RECV_TCO_EN) == 0)
7905 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7906 return 0;
7907
7908 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7909 fwsm = CSR_READ(sc, WMREG_FWSM);
7910 factps = CSR_READ(sc, WMREG_FACTPS);
7911 if (((factps & FACTPS_MNGCG) == 0)
7912 && ((fwsm & FWSM_MODE_MASK)
7913 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7914 return 1;
7915 } else if (((manc & MANC_SMBUS_EN) != 0)
7916 && ((manc & MANC_ASF_EN) == 0))
7917 return 1;
7918
7919 return 0;
7920 }
7921
7922 static int
7923 wm_check_reset_block(struct wm_softc *sc)
7924 {
7925 uint32_t reg;
7926
7927 switch (sc->sc_type) {
7928 case WM_T_ICH8:
7929 case WM_T_ICH9:
7930 case WM_T_ICH10:
7931 case WM_T_PCH:
7932 case WM_T_PCH2:
7933 case WM_T_PCH_LPT:
7934 reg = CSR_READ(sc, WMREG_FWSM);
7935 if ((reg & FWSM_RSPCIPHY) != 0)
7936 return 0;
7937 else
7938 return -1;
7939 break;
7940 case WM_T_82571:
7941 case WM_T_82572:
7942 case WM_T_82573:
7943 case WM_T_82574:
7944 case WM_T_82583:
7945 case WM_T_80003:
7946 reg = CSR_READ(sc, WMREG_MANC);
7947 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7948 return -1;
7949 else
7950 return 0;
7951 break;
7952 default:
7953 /* no problem */
7954 break;
7955 }
7956
7957 return 0;
7958 }
7959
7960 static void
7961 wm_get_hw_control(struct wm_softc *sc)
7962 {
7963 uint32_t reg;
7964
7965 switch (sc->sc_type) {
7966 case WM_T_82573:
7967 reg = CSR_READ(sc, WMREG_SWSM);
7968 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7969 break;
7970 case WM_T_82571:
7971 case WM_T_82572:
7972 case WM_T_82574:
7973 case WM_T_82583:
7974 case WM_T_80003:
7975 case WM_T_ICH8:
7976 case WM_T_ICH9:
7977 case WM_T_ICH10:
7978 case WM_T_PCH:
7979 case WM_T_PCH2:
7980 case WM_T_PCH_LPT:
7981 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7982 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7983 break;
7984 default:
7985 break;
7986 }
7987 }
7988
7989 static void
7990 wm_release_hw_control(struct wm_softc *sc)
7991 {
7992 uint32_t reg;
7993
7994 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7995 return;
7996
7997 if (sc->sc_type == WM_T_82573) {
7998 reg = CSR_READ(sc, WMREG_SWSM);
7999 reg &= ~SWSM_DRV_LOAD;
8000 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8001 } else {
8002 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8003 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8004 }
8005 }
8006
8007 /* XXX Currently TBI only */
8008 static int
8009 wm_check_for_link(struct wm_softc *sc)
8010 {
8011 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8012 uint32_t rxcw;
8013 uint32_t ctrl;
8014 uint32_t status;
8015 uint32_t sig;
8016
8017 rxcw = CSR_READ(sc, WMREG_RXCW);
8018 ctrl = CSR_READ(sc, WMREG_CTRL);
8019 status = CSR_READ(sc, WMREG_STATUS);
8020
8021 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8022
8023 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8024 device_xname(sc->sc_dev), __func__,
8025 ((ctrl & CTRL_SWDPIN(1)) == sig),
8026 ((status & STATUS_LU) != 0),
8027 ((rxcw & RXCW_C) != 0)
8028 ));
8029
8030 /*
8031 * SWDPIN LU RXCW
8032 * 0 0 0
8033 * 0 0 1 (should not happen)
8034 * 0 1 0 (should not happen)
8035 * 0 1 1 (should not happen)
8036 * 1 0 0 Disable autonego and force linkup
8037 * 1 0 1 got /C/ but not linkup yet
8038 * 1 1 0 (linkup)
8039 * 1 1 1 If IFM_AUTO, back to autonego
8040 *
8041 */
8042 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8043 && ((status & STATUS_LU) == 0)
8044 && ((rxcw & RXCW_C) == 0)) {
8045 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8046 __func__));
8047 sc->sc_tbi_linkup = 0;
8048 /* Disable auto-negotiation in the TXCW register */
8049 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8050
8051 /*
8052 * Force link-up and also force full-duplex.
8053 *
8054 * NOTE: CTRL was updated TFCE and RFCE automatically,
8055 * so we should update sc->sc_ctrl
8056 */
8057 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8058 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8059 } else if (((status & STATUS_LU) != 0)
8060 && ((rxcw & RXCW_C) != 0)
8061 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8062 sc->sc_tbi_linkup = 1;
8063 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8064 __func__));
8065 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8066 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8067 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8068 && ((rxcw & RXCW_C) != 0)) {
8069 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8070 } else {
8071 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8072 status));
8073 }
8074
8075 return 0;
8076 }
8077
8078 /* Work-around for 82566 Kumeran PCS lock loss */
8079 static void
8080 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8081 {
8082 int miistatus, active, i;
8083 int reg;
8084
8085 miistatus = sc->sc_mii.mii_media_status;
8086
8087 /* If the link is not up, do nothing */
8088 if ((miistatus & IFM_ACTIVE) != 0)
8089 return;
8090
8091 active = sc->sc_mii.mii_media_active;
8092
8093 /* Nothing to do if the link is other than 1Gbps */
8094 if (IFM_SUBTYPE(active) != IFM_1000_T)
8095 return;
8096
8097 for (i = 0; i < 10; i++) {
8098 /* read twice */
8099 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8100 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8101 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8102 goto out; /* GOOD! */
8103
8104 /* Reset the PHY */
8105 wm_gmii_reset(sc);
8106 delay(5*1000);
8107 }
8108
8109 /* Disable GigE link negotiation */
8110 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8111 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8112 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8113
8114 /*
8115 * Call gig speed drop workaround on Gig disable before accessing
8116 * any PHY registers.
8117 */
8118 wm_gig_downshift_workaround_ich8lan(sc);
8119
8120 out:
8121 return;
8122 }
8123
8124 /* WOL from S5 stops working */
8125 static void
8126 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8127 {
8128 uint16_t kmrn_reg;
8129
8130 /* Only for igp3 */
8131 if (sc->sc_phytype == WMPHY_IGP_3) {
8132 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8133 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8134 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8135 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8136 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8137 }
8138 }
8139
8140 #ifdef WM_WOL
8141 /* Power down workaround on D3 */
8142 static void
8143 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8144 {
8145 uint32_t reg;
8146 int i;
8147
8148 for (i = 0; i < 2; i++) {
8149 /* Disable link */
8150 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8151 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8152 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8153
8154 /*
8155 * Call gig speed drop workaround on Gig disable before
8156 * accessing any PHY registers
8157 */
8158 if (sc->sc_type == WM_T_ICH8)
8159 wm_gig_downshift_workaround_ich8lan(sc);
8160
8161 /* Write VR power-down enable */
8162 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8163 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8164 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8165 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8166
8167 /* Read it back and test */
8168 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8169 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8170 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8171 break;
8172
8173 /* Issue PHY reset and repeat at most one more time */
8174 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8175 }
8176 }
8177 #endif /* WM_WOL */
8178
8179 /*
8180 * Workaround for pch's PHYs
8181 * XXX should be moved to new PHY driver?
8182 */
8183 static void
8184 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8185 {
8186 if (sc->sc_phytype == WMPHY_82577)
8187 wm_set_mdio_slow_mode_hv(sc);
8188
8189 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8190
8191 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8192
8193 /* 82578 */
8194 if (sc->sc_phytype == WMPHY_82578) {
8195 /* PCH rev. < 3 */
8196 if (sc->sc_rev < 3) {
8197 /* XXX 6 bit shift? Why? Is it page2? */
8198 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8199 0x66c0);
8200 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8201 0xffff);
8202 }
8203
8204 /* XXX phy rev. < 2 */
8205 }
8206
8207 /* Select page 0 */
8208
8209 /* XXX acquire semaphore */
8210 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8211 /* XXX release semaphore */
8212
8213 /*
8214 * Configure the K1 Si workaround during phy reset assuming there is
8215 * link so that it disables K1 if link is in 1Gbps.
8216 */
8217 wm_k1_gig_workaround_hv(sc, 1);
8218 }
8219
8220 static void
8221 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8222 {
8223
8224 wm_set_mdio_slow_mode_hv(sc);
8225 }
8226
8227 static void
8228 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8229 {
8230 int k1_enable = sc->sc_nvm_k1_enabled;
8231
8232 /* XXX acquire semaphore */
8233
8234 if (link) {
8235 k1_enable = 0;
8236
8237 /* Link stall fix for link up */
8238 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8239 } else {
8240 /* Link stall fix for link down */
8241 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8242 }
8243
8244 wm_configure_k1_ich8lan(sc, k1_enable);
8245
8246 /* XXX release semaphore */
8247 }
8248
8249 static void
8250 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8251 {
8252 uint32_t reg;
8253
8254 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8255 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8256 reg | HV_KMRN_MDIO_SLOW);
8257 }
8258
8259 static void
8260 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8261 {
8262 uint32_t ctrl, ctrl_ext, tmp;
8263 uint16_t kmrn_reg;
8264
8265 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8266
8267 if (k1_enable)
8268 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8269 else
8270 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8271
8272 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8273
8274 delay(20);
8275
8276 ctrl = CSR_READ(sc, WMREG_CTRL);
8277 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8278
8279 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8280 tmp |= CTRL_FRCSPD;
8281
8282 CSR_WRITE(sc, WMREG_CTRL, tmp);
8283 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8284 delay(20);
8285
8286 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8287 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8288 delay(20);
8289 }
8290
8291 static void
8292 wm_smbustopci(struct wm_softc *sc)
8293 {
8294 uint32_t fwsm;
8295
8296 fwsm = CSR_READ(sc, WMREG_FWSM);
8297 if (((fwsm & FWSM_FW_VALID) == 0)
8298 && ((wm_check_reset_block(sc) == 0))) {
8299 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8300 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8301 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8302 delay(10);
8303 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8304 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8305 delay(50*1000);
8306
8307 /*
8308 * Gate automatic PHY configuration by hardware on non-managed
8309 * 82579
8310 */
8311 if (sc->sc_type == WM_T_PCH2)
8312 wm_gate_hw_phy_config_ich8lan(sc, 1);
8313 }
8314 }
8315
8316 static void
8317 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8318 {
8319 uint32_t gcr;
8320 pcireg_t ctrl2;
8321
8322 gcr = CSR_READ(sc, WMREG_GCR);
8323
8324 /* Only take action if timeout value is defaulted to 0 */
8325 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8326 goto out;
8327
8328 if ((gcr & GCR_CAP_VER2) == 0) {
8329 gcr |= GCR_CMPL_TMOUT_10MS;
8330 goto out;
8331 }
8332
8333 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8334 sc->sc_pcixe_capoff + PCIE_DCSR2);
8335 ctrl2 |= WM_PCIE_DCSR2_16MS;
8336 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8337 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8338
8339 out:
8340 /* Disable completion timeout resend */
8341 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8342
8343 CSR_WRITE(sc, WMREG_GCR, gcr);
8344 }
8345
8346 /* special case - for 82575 - need to do manual init ... */
8347 static void
8348 wm_reset_init_script_82575(struct wm_softc *sc)
8349 {
8350 /*
8351 * remark: this is untested code - we have no board without EEPROM
8352 * same setup as mentioned int the freeBSD driver for the i82575
8353 */
8354
8355 /* SerDes configuration via SERDESCTRL */
8356 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8357 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8358 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8359 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8360
8361 /* CCM configuration via CCMCTL register */
8362 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8363 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8364
8365 /* PCIe lanes configuration */
8366 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8367 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8368 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8369 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8370
8371 /* PCIe PLL Configuration */
8372 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8373 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8374 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8375 }
8376
8377 static void
8378 wm_init_manageability(struct wm_softc *sc)
8379 {
8380
8381 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8382 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8383 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8384
8385 /* disabl hardware interception of ARP */
8386 manc &= ~MANC_ARP_EN;
8387
8388 /* enable receiving management packets to the host */
8389 if (sc->sc_type >= WM_T_82571) {
8390 manc |= MANC_EN_MNG2HOST;
8391 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8392 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8393
8394 }
8395
8396 CSR_WRITE(sc, WMREG_MANC, manc);
8397 }
8398 }
8399
8400 static void
8401 wm_release_manageability(struct wm_softc *sc)
8402 {
8403
8404 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8405 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8406
8407 if (sc->sc_type >= WM_T_82571)
8408 manc &= ~MANC_EN_MNG2HOST;
8409
8410 CSR_WRITE(sc, WMREG_MANC, manc);
8411 }
8412 }
8413
8414 static void
8415 wm_get_wakeup(struct wm_softc *sc)
8416 {
8417
8418 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8419 switch (sc->sc_type) {
8420 case WM_T_82573:
8421 case WM_T_82583:
8422 sc->sc_flags |= WM_F_HAS_AMT;
8423 /* FALLTHROUGH */
8424 case WM_T_80003:
8425 case WM_T_82541:
8426 case WM_T_82547:
8427 case WM_T_82571:
8428 case WM_T_82572:
8429 case WM_T_82574:
8430 case WM_T_82575:
8431 case WM_T_82576:
8432 #if 0 /* XXX */
8433 case WM_T_82580:
8434 case WM_T_82580ER:
8435 case WM_T_I350:
8436 #endif
8437 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8438 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8439 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8440 break;
8441 case WM_T_ICH8:
8442 case WM_T_ICH9:
8443 case WM_T_ICH10:
8444 case WM_T_PCH:
8445 case WM_T_PCH2:
8446 case WM_T_PCH_LPT:
8447 sc->sc_flags |= WM_F_HAS_AMT;
8448 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8449 break;
8450 default:
8451 break;
8452 }
8453
8454 /* 1: HAS_MANAGE */
8455 if (wm_enable_mng_pass_thru(sc) != 0)
8456 sc->sc_flags |= WM_F_HAS_MANAGE;
8457
8458 #ifdef WM_DEBUG
8459 printf("\n");
8460 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8461 printf("HAS_AMT,");
8462 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8463 printf("ARC_SUBSYS_VALID,");
8464 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8465 printf("ASF_FIRMWARE_PRES,");
8466 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8467 printf("HAS_MANAGE,");
8468 printf("\n");
8469 #endif
8470 /*
8471 * Note that the WOL flags is set after the resetting of the eeprom
8472 * stuff
8473 */
8474 }
8475
8476 #ifdef WM_WOL
8477 /* WOL in the newer chipset interfaces (pchlan) */
8478 static void
8479 wm_enable_phy_wakeup(struct wm_softc *sc)
8480 {
8481 #if 0
8482 uint16_t preg;
8483
8484 /* Copy MAC RARs to PHY RARs */
8485
8486 /* Copy MAC MTA to PHY MTA */
8487
8488 /* Configure PHY Rx Control register */
8489
8490 /* Enable PHY wakeup in MAC register */
8491
8492 /* Configure and enable PHY wakeup in PHY registers */
8493
8494 /* Activate PHY wakeup */
8495
8496 /* XXX */
8497 #endif
8498 }
8499
8500 static void
8501 wm_enable_wakeup(struct wm_softc *sc)
8502 {
8503 uint32_t reg, pmreg;
8504 pcireg_t pmode;
8505
8506 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8507 &pmreg, NULL) == 0)
8508 return;
8509
8510 /* Advertise the wakeup capability */
8511 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8512 | CTRL_SWDPIN(3));
8513 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8514
8515 /* ICH workaround */
8516 switch (sc->sc_type) {
8517 case WM_T_ICH8:
8518 case WM_T_ICH9:
8519 case WM_T_ICH10:
8520 case WM_T_PCH:
8521 case WM_T_PCH2:
8522 case WM_T_PCH_LPT:
8523 /* Disable gig during WOL */
8524 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8525 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8526 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8527 if (sc->sc_type == WM_T_PCH)
8528 wm_gmii_reset(sc);
8529
8530 /* Power down workaround */
8531 if (sc->sc_phytype == WMPHY_82577) {
8532 struct mii_softc *child;
8533
8534 /* Assume that the PHY is copper */
8535 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8536 if (child->mii_mpd_rev <= 2)
8537 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8538 (768 << 5) | 25, 0x0444); /* magic num */
8539 }
8540 break;
8541 default:
8542 break;
8543 }
8544
8545 /* Keep the laser running on fiber adapters */
8546 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8547 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8548 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8549 reg |= CTRL_EXT_SWDPIN(3);
8550 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8551 }
8552
8553 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8554 #if 0 /* for the multicast packet */
8555 reg |= WUFC_MC;
8556 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8557 #endif
8558
8559 if (sc->sc_type == WM_T_PCH) {
8560 wm_enable_phy_wakeup(sc);
8561 } else {
8562 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8563 CSR_WRITE(sc, WMREG_WUFC, reg);
8564 }
8565
8566 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8567 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8568 || (sc->sc_type == WM_T_PCH2))
8569 && (sc->sc_phytype == WMPHY_IGP_3))
8570 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8571
8572 /* Request PME */
8573 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8574 #if 0
8575 /* Disable WOL */
8576 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8577 #else
8578 /* For WOL */
8579 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8580 #endif
8581 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8582 }
8583 #endif /* WM_WOL */
8584
8585 static bool
8586 wm_suspend(device_t self, const pmf_qual_t *qual)
8587 {
8588 struct wm_softc *sc = device_private(self);
8589
8590 wm_release_manageability(sc);
8591 wm_release_hw_control(sc);
8592 #ifdef WM_WOL
8593 wm_enable_wakeup(sc);
8594 #endif
8595
8596 return true;
8597 }
8598
8599 static bool
8600 wm_resume(device_t self, const pmf_qual_t *qual)
8601 {
8602 struct wm_softc *sc = device_private(self);
8603
8604 wm_init_manageability(sc);
8605
8606 return true;
8607 }
8608
8609 static void
8610 wm_set_eee_i350(struct wm_softc * sc)
8611 {
8612 uint32_t ipcnfg, eeer;
8613
8614 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8615 eeer = CSR_READ(sc, WMREG_EEER);
8616
8617 if ((sc->sc_flags & WM_F_EEE) != 0) {
8618 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8619 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8620 | EEER_LPI_FC);
8621 } else {
8622 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8623 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8624 | EEER_LPI_FC);
8625 }
8626
8627 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8628 CSR_WRITE(sc, WMREG_EEER, eeer);
8629 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8630 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8631 }
8632