if_wm.c revision 1.256 1 /* $NetBSD: if_wm.c,v 1.256 2013/06/19 10:27:08 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.256 2013/06/19 10:27:08 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 /*
145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155 #define WM_NTXSEGS 256
156 #define WM_IFQUEUELEN 256
157 #define WM_TXQUEUELEN_MAX 64
158 #define WM_TXQUEUELEN_MAX_82547 16
159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162 #define WM_NTXDESC_82542 256
163 #define WM_NTXDESC_82544 4096
164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171
172 /*
173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178 #define WM_NRXDESC 256
179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182
183 /*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188 struct wm_control_data_82544 {
189 /*
190 * The receive descriptors.
191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194 /*
195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them.
197 */
198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u;
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257
258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */
260
261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak;
275
276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */
278
279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled;
283
284 /*
285 * Software state for the transmit and receive descriptors.
286 */
287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291 /*
292 * Control data structures.
293 */
294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 krndsource_t rnd_source; /* random source */
383 };
384
385 #define WM_RXCHAIN_RESET(sc) \
386 do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \
390 } while (/*CONSTCOND*/0)
391
392 #define WM_RXCHAIN_LINK(sc, m) \
393 do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \
396 } while (/*CONSTCOND*/0)
397
398 #ifdef WM_EVENT_COUNTERS
399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401 #else
402 #define WM_EVCNT_INCR(ev) /* nothing */
403 #define WM_EVCNT_ADD(ev, val) /* nothing */
404 #endif
405
406 #define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS)
412
413 #define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436 #define WM_CDTXSYNC(sc, x, n, ops) \
437 do { \
438 int __x, __n; \
439 \
440 __x = (x); \
441 __n = (n); \
442 \
443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \
450 } \
451 \
452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455 } while (/*CONSTCOND*/0)
456
457 #define WM_CDRXSYNC(sc, x, ops) \
458 do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_INIT_RXDESC(sc, x) \
464 do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \
469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \
473 * \
474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \
482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495 } while (/*CONSTCOND*/0)
496
497 static void wm_start(struct ifnet *);
498 static void wm_nq_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ifflags_cb(struct ethercom *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_check_alt_mac_addr(struct wm_softc *);
514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void wm_tick(void *);
516
517 static void wm_set_filter(struct wm_softc *);
518 static void wm_set_vlan(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_gmii_82580_readreg(device_t, int, int);
545 static void wm_gmii_82580_writereg(device_t, int, int, int);
546 static int wm_sgmii_readreg(device_t, int, int);
547 static void wm_sgmii_writereg(device_t, int, int, int);
548
549 static void wm_gmii_statchg(struct ifnet *);
550
551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int wm_gmii_mediachange(struct ifnet *);
553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554
555 static int wm_kmrn_readreg(struct wm_softc *, int);
556 static void wm_kmrn_writereg(struct wm_softc *, int, int);
557
558 static void wm_set_spiaddrbits(struct wm_softc *);
559 static int wm_match(device_t, cfdata_t, void *);
560 static void wm_attach(device_t, device_t, void *);
561 static int wm_detach(device_t, int);
562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void wm_get_auto_rd_done(struct wm_softc *);
564 static void wm_lan_init_done(struct wm_softc *);
565 static void wm_get_cfg_done(struct wm_softc *);
566 static int wm_get_swsm_semaphore(struct wm_softc *);
567 static void wm_put_swsm_semaphore(struct wm_softc *);
568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void wm_put_swfwhw_semaphore(struct wm_softc *);
573
574 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
578 uint32_t, uint16_t *);
579 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void wm_82547_txfifo_stall(void *);
582 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int wm_check_mng_mode(struct wm_softc *);
584 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int wm_check_mng_mode_82574(struct wm_softc *);
586 static int wm_check_mng_mode_generic(struct wm_softc *);
587 static int wm_enable_mng_pass_thru(struct wm_softc *);
588 static int wm_check_reset_block(struct wm_softc *);
589 static void wm_get_hw_control(struct wm_softc *);
590 static int wm_check_for_link(struct wm_softc *);
591 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void wm_smbustopci(struct wm_softc *);
602 static void wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void wm_reset_init_script_82575(struct wm_softc *);
604 static void wm_release_manageability(struct wm_softc *);
605 static void wm_release_hw_control(struct wm_softc *);
606 static void wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void wm_enable_phy_wakeup(struct wm_softc *);
609 static void wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void wm_init_manageability(struct wm_softc *);
612 static void wm_set_eee_i350(struct wm_softc *);
613
614 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
615 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
616
617 /*
618 * Devices supported by this driver.
619 */
620 static const struct wm_product {
621 pci_vendor_id_t wmp_vendor;
622 pci_product_id_t wmp_product;
623 const char *wmp_name;
624 wm_chip_type wmp_type;
625 int wmp_flags;
626 #define WMP_F_1000X 0x01
627 #define WMP_F_1000T 0x02
628 #define WMP_F_SERDES 0x04
629 } wm_products[] = {
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
631 "Intel i82542 1000BASE-X Ethernet",
632 WM_T_82542_2_1, WMP_F_1000X },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
635 "Intel i82543GC 1000BASE-X Ethernet",
636 WM_T_82543, WMP_F_1000X },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
639 "Intel i82543GC 1000BASE-T Ethernet",
640 WM_T_82543, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
643 "Intel i82544EI 1000BASE-T Ethernet",
644 WM_T_82544, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
647 "Intel i82544EI 1000BASE-X Ethernet",
648 WM_T_82544, WMP_F_1000X },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
651 "Intel i82544GC 1000BASE-T Ethernet",
652 WM_T_82544, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
655 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
659 "Intel i82540EM 1000BASE-T Ethernet",
660 WM_T_82540, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
663 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
667 "Intel i82540EP 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
679 "Intel i82545EM 1000BASE-T Ethernet",
680 WM_T_82545, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
683 "Intel i82545GM 1000BASE-T Ethernet",
684 WM_T_82545_3, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
687 "Intel i82545GM 1000BASE-X Ethernet",
688 WM_T_82545_3, WMP_F_1000X },
689 #if 0
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
691 "Intel i82545GM Gigabit Ethernet (SERDES)",
692 WM_T_82545_3, WMP_F_SERDES },
693 #endif
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
695 "Intel i82546EB 1000BASE-T Ethernet",
696 WM_T_82546, WMP_F_1000T },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
703 "Intel i82545EM 1000BASE-X Ethernet",
704 WM_T_82545, WMP_F_1000X },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
707 "Intel i82546EB 1000BASE-X Ethernet",
708 WM_T_82546, WMP_F_1000X },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
711 "Intel i82546GB 1000BASE-T Ethernet",
712 WM_T_82546_3, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
715 "Intel i82546GB 1000BASE-X Ethernet",
716 WM_T_82546_3, WMP_F_1000X },
717 #if 0
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
719 "Intel i82546GB Gigabit Ethernet (SERDES)",
720 WM_T_82546_3, WMP_F_SERDES },
721 #endif
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
723 "i82546GB quad-port Gigabit Ethernet",
724 WM_T_82546_3, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
727 "i82546GB quad-port Gigabit Ethernet (KSP3)",
728 WM_T_82546_3, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
731 "Intel PRO/1000MT (82546GB)",
732 WM_T_82546_3, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
735 "Intel i82541EI 1000BASE-T Ethernet",
736 WM_T_82541, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
739 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
743 "Intel i82541EI Mobile 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
747 "Intel i82541ER 1000BASE-T Ethernet",
748 WM_T_82541_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
751 "Intel i82541GI 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
755 "Intel i82541GI Mobile 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
759 "Intel i82541PI 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
763 "Intel i82547EI 1000BASE-T Ethernet",
764 WM_T_82547, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
767 "Intel i82547EI Mobile 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
771 "Intel i82547GI 1000BASE-T Ethernet",
772 WM_T_82547_2, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
775 "Intel PRO/1000 PT (82571EB)",
776 WM_T_82571, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
779 "Intel PRO/1000 PF (82571EB)",
780 WM_T_82571, WMP_F_1000X },
781 #if 0
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
783 "Intel PRO/1000 PB (82571EB)",
784 WM_T_82571, WMP_F_SERDES },
785 #endif
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
787 "Intel PRO/1000 QT (82571EB)",
788 WM_T_82571, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
791 "Intel i82572EI 1000baseT Ethernet",
792 WM_T_82572, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
795 "Intel PRO/1000 PT Quad Port Server Adapter",
796 WM_T_82571, WMP_F_1000T, },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
799 "Intel i82572EI 1000baseX Ethernet",
800 WM_T_82572, WMP_F_1000X },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
803 "Intel i82572EI Gigabit Ethernet (SERDES)",
804 WM_T_82572, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
808 "Intel i82572EI 1000baseT Ethernet",
809 WM_T_82572, WMP_F_1000T },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
812 "Intel i82573E",
813 WM_T_82573, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
816 "Intel i82573E IAMT",
817 WM_T_82573, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
820 "Intel i82573L Gigabit Ethernet",
821 WM_T_82573, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
824 "Intel i82574L",
825 WM_T_82574, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
828 "Intel i82583V",
829 WM_T_82583, WMP_F_1000T },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
832 "i80003 dual 1000baseT Ethernet",
833 WM_T_80003, WMP_F_1000T },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
836 "i80003 dual 1000baseX Ethernet",
837 WM_T_80003, WMP_F_1000T },
838 #if 0
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
840 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
841 WM_T_80003, WMP_F_SERDES },
842 #endif
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
845 "Intel i80003 1000baseT Ethernet",
846 WM_T_80003, WMP_F_1000T },
847 #if 0
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
849 "Intel i80003 Gigabit Ethernet (SERDES)",
850 WM_T_80003, WMP_F_SERDES },
851 #endif
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
853 "Intel i82801H (M_AMT) LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
856 "Intel i82801H (AMT) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
859 "Intel i82801H LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
862 "Intel i82801H (IFE) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
865 "Intel i82801H (M) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
868 "Intel i82801H IFE (GT) LAN Controller",
869 WM_T_ICH8, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
871 "Intel i82801H IFE (G) LAN Controller",
872 WM_T_ICH8, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
874 "82801I (AMT) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
877 "82801I LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
880 "82801I (G) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
883 "82801I (GT) LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
886 "82801I (C) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
889 "82801I mobile LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
892 "82801I mobile (V) LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
895 "82801I mobile (AMT) LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
898 "82567LM-4 LAN Controller",
899 WM_T_ICH9, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
901 "82567V-3 LAN Controller",
902 WM_T_ICH9, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
904 "82567LM-2 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
907 "82567LF-2 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
910 "82567LM-3 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
913 "82567LF-3 LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
916 "82567V-2 LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
919 "82567V-3? LAN Controller",
920 WM_T_ICH10, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
922 "HANKSVILLE LAN Controller",
923 WM_T_ICH10, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
925 "PCH LAN (82577LM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
928 "PCH LAN (82577LC) Controller",
929 WM_T_PCH, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
931 "PCH LAN (82578DM) Controller",
932 WM_T_PCH, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
934 "PCH LAN (82578DC) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
937 "PCH2 LAN (82579LM) Controller",
938 WM_T_PCH2, WMP_F_1000T },
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
940 "PCH2 LAN (82579V) Controller",
941 WM_T_PCH2, WMP_F_1000T },
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
943 "82575EB dual-1000baseT Ethernet",
944 WM_T_82575, WMP_F_1000T },
945 #if 0
946 /*
947 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
948 * disabled for now ...
949 */
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
951 "82575EB dual-1000baseX Ethernet (SERDES)",
952 WM_T_82575, WMP_F_SERDES },
953 #endif
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
955 "82575GB quad-1000baseT Ethernet",
956 WM_T_82575, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
958 "82575GB quad-1000baseT Ethernet (PM)",
959 WM_T_82575, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
961 "82576 1000BaseT Ethernet",
962 WM_T_82576, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
964 "82576 1000BaseX Ethernet",
965 WM_T_82576, WMP_F_1000X },
966 #if 0
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
968 "82576 gigabit Ethernet (SERDES)",
969 WM_T_82576, WMP_F_SERDES },
970 #endif
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
972 "82576 quad-1000BaseT Ethernet",
973 WM_T_82576, WMP_F_1000T },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
975 "82576 gigabit Ethernet",
976 WM_T_82576, WMP_F_1000T },
977 #if 0
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
979 "82576 gigabit Ethernet (SERDES)",
980 WM_T_82576, WMP_F_SERDES },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
982 "82576 quad-gigabit Ethernet (SERDES)",
983 WM_T_82576, WMP_F_SERDES },
984 #endif
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
986 "82580 1000BaseT Ethernet",
987 WM_T_82580, WMP_F_1000T },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
989 "82580 1000BaseX Ethernet",
990 WM_T_82580, WMP_F_1000X },
991 #if 0
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
993 "82580 1000BaseT Ethernet (SERDES)",
994 WM_T_82580, WMP_F_SERDES },
995 #endif
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
997 "82580 gigabit Ethernet (SGMII)",
998 WM_T_82580, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1003 "82580 1000BaseT Ethernet",
1004 WM_T_82580ER, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1006 "82580 dual-1000BaseT Ethernet",
1007 WM_T_82580ER, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1009 "82580 quad-1000BaseX Ethernet",
1010 WM_T_82580, WMP_F_1000X },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1012 "I350 Gigabit Network Connection",
1013 WM_T_I350, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1015 "I350 Gigabit Fiber Network Connection",
1016 WM_T_I350, WMP_F_1000X },
1017 #if 0
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1019 "I350 Gigabit Backplane Connection",
1020 WM_T_I350, WMP_F_SERDES },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1022 "I350 Gigabit Connection",
1023 WM_T_I350, WMP_F_1000T },
1024 #endif
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1026 "I210-T1 Ethernet Server Adapter",
1027 WM_T_I210, WMP_F_1000T },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1029 "I210 Ethernet (Copper OEM)",
1030 WM_T_I210, WMP_F_1000T },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1032 "I210 Ethernet (Copper IT)",
1033 WM_T_I210, WMP_F_1000T },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1035 "I210 Gigabit Ethernet (Fiber)",
1036 WM_T_I210, WMP_F_1000X },
1037 #if 0
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1039 "I210 Gigabit Ethernet (SERDES)",
1040 WM_T_I210, WMP_F_SERDES },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1042 "I210 Gigabit Ethernet (SGMII)",
1043 WM_T_I210, WMP_F_SERDES },
1044 #endif
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1046 "I211 Ethernet (COPPER)",
1047 WM_T_I211, WMP_F_1000T },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1049 "I217 V Ethernet Connection",
1050 WM_T_PCH_LPT, WMP_F_1000T },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1052 "I217 LM Ethernet Connection",
1053 WM_T_PCH_LPT, WMP_F_1000T },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1055 "I218 V Ethernet Connection",
1056 WM_T_PCH_LPT, WMP_F_1000T },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1058 "I218 LM Ethernet Connection",
1059 WM_T_PCH_LPT, WMP_F_1000T },
1060 { 0, 0,
1061 NULL,
1062 0, 0 },
1063 };
1064
1065 #ifdef WM_EVENT_COUNTERS
1066 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1067 #endif /* WM_EVENT_COUNTERS */
1068
1069 #if 0 /* Not currently used */
1070 static inline uint32_t
1071 wm_io_read(struct wm_softc *sc, int reg)
1072 {
1073
1074 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1075 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1076 }
1077 #endif
1078
1079 static inline void
1080 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1081 {
1082
1083 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1084 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1085 }
1086
1087 static inline void
1088 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1089 uint32_t data)
1090 {
1091 uint32_t regval;
1092 int i;
1093
1094 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1095
1096 CSR_WRITE(sc, reg, regval);
1097
1098 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1099 delay(5);
1100 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1101 break;
1102 }
1103 if (i == SCTL_CTL_POLL_TIMEOUT) {
1104 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1105 device_xname(sc->sc_dev), reg);
1106 }
1107 }
1108
1109 static inline void
1110 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1111 {
1112 wa->wa_low = htole32(v & 0xffffffffU);
1113 if (sizeof(bus_addr_t) == 8)
1114 wa->wa_high = htole32((uint64_t) v >> 32);
1115 else
1116 wa->wa_high = 0;
1117 }
1118
1119 static void
1120 wm_set_spiaddrbits(struct wm_softc *sc)
1121 {
1122 uint32_t reg;
1123
1124 sc->sc_flags |= WM_F_EEPROM_SPI;
1125 reg = CSR_READ(sc, WMREG_EECD);
1126 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1127 }
1128
1129 static const struct wm_product *
1130 wm_lookup(const struct pci_attach_args *pa)
1131 {
1132 const struct wm_product *wmp;
1133
1134 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1135 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1136 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1137 return wmp;
1138 }
1139 return NULL;
1140 }
1141
1142 static int
1143 wm_match(device_t parent, cfdata_t cf, void *aux)
1144 {
1145 struct pci_attach_args *pa = aux;
1146
1147 if (wm_lookup(pa) != NULL)
1148 return 1;
1149
1150 return 0;
1151 }
1152
1153 static void
1154 wm_attach(device_t parent, device_t self, void *aux)
1155 {
1156 struct wm_softc *sc = device_private(self);
1157 struct pci_attach_args *pa = aux;
1158 prop_dictionary_t dict;
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160 pci_chipset_tag_t pc = pa->pa_pc;
1161 pci_intr_handle_t ih;
1162 const char *intrstr = NULL;
1163 const char *eetype, *xname;
1164 bus_space_tag_t memt;
1165 bus_space_handle_t memh;
1166 bus_size_t memsize;
1167 int memh_valid;
1168 int i, error;
1169 const struct wm_product *wmp;
1170 prop_data_t ea;
1171 prop_number_t pn;
1172 uint8_t enaddr[ETHER_ADDR_LEN];
1173 uint16_t cfg1, cfg2, swdpin, io3;
1174 pcireg_t preg, memtype;
1175 uint16_t eeprom_data, apme_mask;
1176 uint32_t reg;
1177
1178 sc->sc_dev = self;
1179 callout_init(&sc->sc_tick_ch, 0);
1180
1181 sc->sc_wmp = wmp = wm_lookup(pa);
1182 if (wmp == NULL) {
1183 printf("\n");
1184 panic("wm_attach: impossible");
1185 }
1186
1187 sc->sc_pc = pa->pa_pc;
1188 sc->sc_pcitag = pa->pa_tag;
1189
1190 if (pci_dma64_available(pa))
1191 sc->sc_dmat = pa->pa_dmat64;
1192 else
1193 sc->sc_dmat = pa->pa_dmat;
1194
1195 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1196 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1197
1198 sc->sc_type = wmp->wmp_type;
1199 if (sc->sc_type < WM_T_82543) {
1200 if (sc->sc_rev < 2) {
1201 aprint_error_dev(sc->sc_dev,
1202 "i82542 must be at least rev. 2\n");
1203 return;
1204 }
1205 if (sc->sc_rev < 3)
1206 sc->sc_type = WM_T_82542_2_0;
1207 }
1208
1209 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1210 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1211 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
1212 || (sc->sc_type == WM_T_I211))
1213 sc->sc_flags |= WM_F_NEWQUEUE;
1214
1215 /* Set device properties (mactype) */
1216 dict = device_properties(sc->sc_dev);
1217 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1218
1219 /*
1220 * Map the device. All devices support memory-mapped acccess,
1221 * and it is really required for normal operation.
1222 */
1223 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1224 switch (memtype) {
1225 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1226 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1227 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1228 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1229 break;
1230 default:
1231 memh_valid = 0;
1232 break;
1233 }
1234
1235 if (memh_valid) {
1236 sc->sc_st = memt;
1237 sc->sc_sh = memh;
1238 sc->sc_ss = memsize;
1239 } else {
1240 aprint_error_dev(sc->sc_dev,
1241 "unable to map device registers\n");
1242 return;
1243 }
1244
1245 wm_get_wakeup(sc);
1246
1247 /*
1248 * In addition, i82544 and later support I/O mapped indirect
1249 * register access. It is not desirable (nor supported in
1250 * this driver) to use it for normal operation, though it is
1251 * required to work around bugs in some chip versions.
1252 */
1253 if (sc->sc_type >= WM_T_82544) {
1254 /* First we have to find the I/O BAR. */
1255 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1256 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1257 if (memtype == PCI_MAPREG_TYPE_IO)
1258 break;
1259 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1260 PCI_MAPREG_MEM_TYPE_64BIT)
1261 i += 4; /* skip high bits, too */
1262 }
1263 if (i < PCI_MAPREG_END) {
1264 /*
1265 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1266 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1267 * It's no problem because newer chips has no this
1268 * bug.
1269 *
1270 * The i8254x doesn't apparently respond when the
1271 * I/O BAR is 0, which looks somewhat like it's not
1272 * been configured.
1273 */
1274 preg = pci_conf_read(pc, pa->pa_tag, i);
1275 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1276 aprint_error_dev(sc->sc_dev,
1277 "WARNING: I/O BAR at zero.\n");
1278 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1279 0, &sc->sc_iot, &sc->sc_ioh,
1280 NULL, &sc->sc_ios) == 0) {
1281 sc->sc_flags |= WM_F_IOH_VALID;
1282 } else {
1283 aprint_error_dev(sc->sc_dev,
1284 "WARNING: unable to map I/O space\n");
1285 }
1286 }
1287
1288 }
1289
1290 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1291 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1292 preg |= PCI_COMMAND_MASTER_ENABLE;
1293 if (sc->sc_type < WM_T_82542_2_1)
1294 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1295 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1296
1297 /* power up chip */
1298 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1299 NULL)) && error != EOPNOTSUPP) {
1300 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1301 return;
1302 }
1303
1304 /*
1305 * Map and establish our interrupt.
1306 */
1307 if (pci_intr_map(pa, &ih)) {
1308 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1309 return;
1310 }
1311 intrstr = pci_intr_string(pc, ih);
1312 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1313 if (sc->sc_ih == NULL) {
1314 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1315 if (intrstr != NULL)
1316 aprint_error(" at %s", intrstr);
1317 aprint_error("\n");
1318 return;
1319 }
1320 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1321
1322 /*
1323 * Check the function ID (unit number of the chip).
1324 */
1325 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1326 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1327 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1328 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1329 || (sc->sc_type == WM_T_I350))
1330 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1331 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1332 else
1333 sc->sc_funcid = 0;
1334
1335 /*
1336 * Determine a few things about the bus we're connected to.
1337 */
1338 if (sc->sc_type < WM_T_82543) {
1339 /* We don't really know the bus characteristics here. */
1340 sc->sc_bus_speed = 33;
1341 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1342 /*
1343 * CSA (Communication Streaming Architecture) is about as fast
1344 * a 32-bit 66MHz PCI Bus.
1345 */
1346 sc->sc_flags |= WM_F_CSA;
1347 sc->sc_bus_speed = 66;
1348 aprint_verbose_dev(sc->sc_dev,
1349 "Communication Streaming Architecture\n");
1350 if (sc->sc_type == WM_T_82547) {
1351 callout_init(&sc->sc_txfifo_ch, 0);
1352 callout_setfunc(&sc->sc_txfifo_ch,
1353 wm_82547_txfifo_stall, sc);
1354 aprint_verbose_dev(sc->sc_dev,
1355 "using 82547 Tx FIFO stall work-around\n");
1356 }
1357 } else if (sc->sc_type >= WM_T_82571) {
1358 sc->sc_flags |= WM_F_PCIE;
1359 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1360 && (sc->sc_type != WM_T_ICH10)
1361 && (sc->sc_type != WM_T_PCH)
1362 && (sc->sc_type != WM_T_PCH2)
1363 && (sc->sc_type != WM_T_PCH_LPT)) {
1364 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1365 /* ICH* and PCH* have no PCIe capability registers */
1366 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1367 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1368 NULL) == 0)
1369 aprint_error_dev(sc->sc_dev,
1370 "unable to find PCIe capability\n");
1371 }
1372 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1373 } else {
1374 reg = CSR_READ(sc, WMREG_STATUS);
1375 if (reg & STATUS_BUS64)
1376 sc->sc_flags |= WM_F_BUS64;
1377 if ((reg & STATUS_PCIX_MODE) != 0) {
1378 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1379
1380 sc->sc_flags |= WM_F_PCIX;
1381 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1382 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1383 aprint_error_dev(sc->sc_dev,
1384 "unable to find PCIX capability\n");
1385 else if (sc->sc_type != WM_T_82545_3 &&
1386 sc->sc_type != WM_T_82546_3) {
1387 /*
1388 * Work around a problem caused by the BIOS
1389 * setting the max memory read byte count
1390 * incorrectly.
1391 */
1392 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1393 sc->sc_pcixe_capoff + PCIX_CMD);
1394 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1395 sc->sc_pcixe_capoff + PCIX_STATUS);
1396
1397 bytecnt =
1398 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1399 PCIX_CMD_BYTECNT_SHIFT;
1400 maxb =
1401 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1402 PCIX_STATUS_MAXB_SHIFT;
1403 if (bytecnt > maxb) {
1404 aprint_verbose_dev(sc->sc_dev,
1405 "resetting PCI-X MMRBC: %d -> %d\n",
1406 512 << bytecnt, 512 << maxb);
1407 pcix_cmd = (pcix_cmd &
1408 ~PCIX_CMD_BYTECNT_MASK) |
1409 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1410 pci_conf_write(pa->pa_pc, pa->pa_tag,
1411 sc->sc_pcixe_capoff + PCIX_CMD,
1412 pcix_cmd);
1413 }
1414 }
1415 }
1416 /*
1417 * The quad port adapter is special; it has a PCIX-PCIX
1418 * bridge on the board, and can run the secondary bus at
1419 * a higher speed.
1420 */
1421 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1422 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1423 : 66;
1424 } else if (sc->sc_flags & WM_F_PCIX) {
1425 switch (reg & STATUS_PCIXSPD_MASK) {
1426 case STATUS_PCIXSPD_50_66:
1427 sc->sc_bus_speed = 66;
1428 break;
1429 case STATUS_PCIXSPD_66_100:
1430 sc->sc_bus_speed = 100;
1431 break;
1432 case STATUS_PCIXSPD_100_133:
1433 sc->sc_bus_speed = 133;
1434 break;
1435 default:
1436 aprint_error_dev(sc->sc_dev,
1437 "unknown PCIXSPD %d; assuming 66MHz\n",
1438 reg & STATUS_PCIXSPD_MASK);
1439 sc->sc_bus_speed = 66;
1440 break;
1441 }
1442 } else
1443 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1444 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1445 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1446 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1447 }
1448
1449 /*
1450 * Allocate the control data structures, and create and load the
1451 * DMA map for it.
1452 *
1453 * NOTE: All Tx descriptors must be in the same 4G segment of
1454 * memory. So must Rx descriptors. We simplify by allocating
1455 * both sets within the same 4G segment.
1456 */
1457 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1458 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1459 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1460 sizeof(struct wm_control_data_82542) :
1461 sizeof(struct wm_control_data_82544);
1462 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1463 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1464 &sc->sc_cd_rseg, 0)) != 0) {
1465 aprint_error_dev(sc->sc_dev,
1466 "unable to allocate control data, error = %d\n",
1467 error);
1468 goto fail_0;
1469 }
1470
1471 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1472 sc->sc_cd_rseg, sc->sc_cd_size,
1473 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1474 aprint_error_dev(sc->sc_dev,
1475 "unable to map control data, error = %d\n", error);
1476 goto fail_1;
1477 }
1478
1479 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1480 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1481 aprint_error_dev(sc->sc_dev,
1482 "unable to create control data DMA map, error = %d\n",
1483 error);
1484 goto fail_2;
1485 }
1486
1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1488 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1489 aprint_error_dev(sc->sc_dev,
1490 "unable to load control data DMA map, error = %d\n",
1491 error);
1492 goto fail_3;
1493 }
1494
1495 /*
1496 * Create the transmit buffer DMA maps.
1497 */
1498 WM_TXQUEUELEN(sc) =
1499 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1500 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1501 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1502 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1503 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1504 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1505 aprint_error_dev(sc->sc_dev,
1506 "unable to create Tx DMA map %d, error = %d\n",
1507 i, error);
1508 goto fail_4;
1509 }
1510 }
1511
1512 /*
1513 * Create the receive buffer DMA maps.
1514 */
1515 for (i = 0; i < WM_NRXDESC; i++) {
1516 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1517 MCLBYTES, 0, 0,
1518 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1519 aprint_error_dev(sc->sc_dev,
1520 "unable to create Rx DMA map %d error = %d\n",
1521 i, error);
1522 goto fail_5;
1523 }
1524 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1525 }
1526
1527 /* clear interesting stat counters */
1528 CSR_READ(sc, WMREG_COLC);
1529 CSR_READ(sc, WMREG_RXERRC);
1530
1531 /* get PHY control from SMBus to PCIe */
1532 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1533 || (sc->sc_type == WM_T_PCH_LPT))
1534 wm_smbustopci(sc);
1535
1536 /*
1537 * Reset the chip to a known state.
1538 */
1539 wm_reset(sc);
1540
1541 switch (sc->sc_type) {
1542 case WM_T_82571:
1543 case WM_T_82572:
1544 case WM_T_82573:
1545 case WM_T_82574:
1546 case WM_T_82583:
1547 case WM_T_80003:
1548 case WM_T_ICH8:
1549 case WM_T_ICH9:
1550 case WM_T_ICH10:
1551 case WM_T_PCH:
1552 case WM_T_PCH2:
1553 case WM_T_PCH_LPT:
1554 if (wm_check_mng_mode(sc) != 0)
1555 wm_get_hw_control(sc);
1556 break;
1557 default:
1558 break;
1559 }
1560
1561 /*
1562 * Get some information about the EEPROM.
1563 */
1564 switch (sc->sc_type) {
1565 case WM_T_82542_2_0:
1566 case WM_T_82542_2_1:
1567 case WM_T_82543:
1568 case WM_T_82544:
1569 /* Microwire */
1570 sc->sc_ee_addrbits = 6;
1571 break;
1572 case WM_T_82540:
1573 case WM_T_82545:
1574 case WM_T_82545_3:
1575 case WM_T_82546:
1576 case WM_T_82546_3:
1577 /* Microwire */
1578 reg = CSR_READ(sc, WMREG_EECD);
1579 if (reg & EECD_EE_SIZE)
1580 sc->sc_ee_addrbits = 8;
1581 else
1582 sc->sc_ee_addrbits = 6;
1583 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1584 break;
1585 case WM_T_82541:
1586 case WM_T_82541_2:
1587 case WM_T_82547:
1588 case WM_T_82547_2:
1589 reg = CSR_READ(sc, WMREG_EECD);
1590 if (reg & EECD_EE_TYPE) {
1591 /* SPI */
1592 wm_set_spiaddrbits(sc);
1593 } else
1594 /* Microwire */
1595 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1596 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1597 break;
1598 case WM_T_82571:
1599 case WM_T_82572:
1600 /* SPI */
1601 wm_set_spiaddrbits(sc);
1602 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1603 break;
1604 case WM_T_82573:
1605 case WM_T_82574:
1606 case WM_T_82583:
1607 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1608 sc->sc_flags |= WM_F_EEPROM_FLASH;
1609 else {
1610 /* SPI */
1611 wm_set_spiaddrbits(sc);
1612 }
1613 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1614 break;
1615 case WM_T_82575:
1616 case WM_T_82576:
1617 case WM_T_82580:
1618 case WM_T_82580ER:
1619 case WM_T_I350:
1620 case WM_T_80003:
1621 /* SPI */
1622 wm_set_spiaddrbits(sc);
1623 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1624 break;
1625 case WM_T_ICH8:
1626 case WM_T_ICH9:
1627 case WM_T_ICH10:
1628 case WM_T_PCH:
1629 case WM_T_PCH2:
1630 case WM_T_PCH_LPT:
1631 /* FLASH */
1632 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1633 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1634 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1635 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1636 aprint_error_dev(sc->sc_dev,
1637 "can't map FLASH registers\n");
1638 return;
1639 }
1640 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1641 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1642 ICH_FLASH_SECTOR_SIZE;
1643 sc->sc_ich8_flash_bank_size =
1644 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1645 sc->sc_ich8_flash_bank_size -=
1646 (reg & ICH_GFPREG_BASE_MASK);
1647 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1648 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1649 break;
1650 case WM_T_I210:
1651 case WM_T_I211:
1652 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1653 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1654 break;
1655 default:
1656 break;
1657 }
1658
1659 /*
1660 * Defer printing the EEPROM type until after verifying the checksum
1661 * This allows the EEPROM type to be printed correctly in the case
1662 * that no EEPROM is attached.
1663 */
1664 /*
1665 * Validate the EEPROM checksum. If the checksum fails, flag
1666 * this for later, so we can fail future reads from the EEPROM.
1667 */
1668 if (wm_validate_eeprom_checksum(sc)) {
1669 /*
1670 * Read twice again because some PCI-e parts fail the
1671 * first check due to the link being in sleep state.
1672 */
1673 if (wm_validate_eeprom_checksum(sc))
1674 sc->sc_flags |= WM_F_EEPROM_INVALID;
1675 }
1676
1677 /* Set device properties (macflags) */
1678 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1679
1680 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1681 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1682 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1683 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1684 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1685 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1686 } else {
1687 if (sc->sc_flags & WM_F_EEPROM_SPI)
1688 eetype = "SPI";
1689 else
1690 eetype = "MicroWire";
1691 aprint_verbose_dev(sc->sc_dev,
1692 "%u word (%d address bits) %s EEPROM\n",
1693 1U << sc->sc_ee_addrbits,
1694 sc->sc_ee_addrbits, eetype);
1695 }
1696
1697 /*
1698 * Read the Ethernet address from the EEPROM, if not first found
1699 * in device properties.
1700 */
1701 ea = prop_dictionary_get(dict, "mac-address");
1702 if (ea != NULL) {
1703 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1704 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1705 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1706 } else {
1707 if (wm_read_mac_addr(sc, enaddr) != 0) {
1708 aprint_error_dev(sc->sc_dev,
1709 "unable to read Ethernet address\n");
1710 return;
1711 }
1712 }
1713
1714 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1715 ether_sprintf(enaddr));
1716
1717 /*
1718 * Read the config info from the EEPROM, and set up various
1719 * bits in the control registers based on their contents.
1720 */
1721 pn = prop_dictionary_get(dict, "i82543-cfg1");
1722 if (pn != NULL) {
1723 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1724 cfg1 = (uint16_t) prop_number_integer_value(pn);
1725 } else {
1726 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1727 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1728 return;
1729 }
1730 }
1731
1732 pn = prop_dictionary_get(dict, "i82543-cfg2");
1733 if (pn != NULL) {
1734 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1735 cfg2 = (uint16_t) prop_number_integer_value(pn);
1736 } else {
1737 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1738 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1739 return;
1740 }
1741 }
1742
1743 /* check for WM_F_WOL */
1744 switch (sc->sc_type) {
1745 case WM_T_82542_2_0:
1746 case WM_T_82542_2_1:
1747 case WM_T_82543:
1748 /* dummy? */
1749 eeprom_data = 0;
1750 apme_mask = EEPROM_CFG3_APME;
1751 break;
1752 case WM_T_82544:
1753 apme_mask = EEPROM_CFG2_82544_APM_EN;
1754 eeprom_data = cfg2;
1755 break;
1756 case WM_T_82546:
1757 case WM_T_82546_3:
1758 case WM_T_82571:
1759 case WM_T_82572:
1760 case WM_T_82573:
1761 case WM_T_82574:
1762 case WM_T_82583:
1763 case WM_T_80003:
1764 default:
1765 apme_mask = EEPROM_CFG3_APME;
1766 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1767 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1768 break;
1769 case WM_T_82575:
1770 case WM_T_82576:
1771 case WM_T_82580:
1772 case WM_T_82580ER:
1773 case WM_T_I350:
1774 case WM_T_ICH8:
1775 case WM_T_ICH9:
1776 case WM_T_ICH10:
1777 case WM_T_PCH:
1778 case WM_T_PCH2:
1779 case WM_T_PCH_LPT:
1780 /* XXX The funcid should be checked on some devices */
1781 apme_mask = WUC_APME;
1782 eeprom_data = CSR_READ(sc, WMREG_WUC);
1783 break;
1784 }
1785
1786 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1787 if ((eeprom_data & apme_mask) != 0)
1788 sc->sc_flags |= WM_F_WOL;
1789 #ifdef WM_DEBUG
1790 if ((sc->sc_flags & WM_F_WOL) != 0)
1791 printf("WOL\n");
1792 #endif
1793
1794 /*
1795 * XXX need special handling for some multiple port cards
1796 * to disable a paticular port.
1797 */
1798
1799 if (sc->sc_type >= WM_T_82544) {
1800 pn = prop_dictionary_get(dict, "i82543-swdpin");
1801 if (pn != NULL) {
1802 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1803 swdpin = (uint16_t) prop_number_integer_value(pn);
1804 } else {
1805 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1806 aprint_error_dev(sc->sc_dev,
1807 "unable to read SWDPIN\n");
1808 return;
1809 }
1810 }
1811 }
1812
1813 if (cfg1 & EEPROM_CFG1_ILOS)
1814 sc->sc_ctrl |= CTRL_ILOS;
1815 if (sc->sc_type >= WM_T_82544) {
1816 sc->sc_ctrl |=
1817 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1818 CTRL_SWDPIO_SHIFT;
1819 sc->sc_ctrl |=
1820 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1821 CTRL_SWDPINS_SHIFT;
1822 } else {
1823 sc->sc_ctrl |=
1824 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1825 CTRL_SWDPIO_SHIFT;
1826 }
1827
1828 #if 0
1829 if (sc->sc_type >= WM_T_82544) {
1830 if (cfg1 & EEPROM_CFG1_IPS0)
1831 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1832 if (cfg1 & EEPROM_CFG1_IPS1)
1833 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1834 sc->sc_ctrl_ext |=
1835 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1836 CTRL_EXT_SWDPIO_SHIFT;
1837 sc->sc_ctrl_ext |=
1838 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1839 CTRL_EXT_SWDPINS_SHIFT;
1840 } else {
1841 sc->sc_ctrl_ext |=
1842 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1843 CTRL_EXT_SWDPIO_SHIFT;
1844 }
1845 #endif
1846
1847 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1848 #if 0
1849 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1850 #endif
1851
1852 /*
1853 * Set up some register offsets that are different between
1854 * the i82542 and the i82543 and later chips.
1855 */
1856 if (sc->sc_type < WM_T_82543) {
1857 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1858 sc->sc_tdt_reg = WMREG_OLD_TDT;
1859 } else {
1860 sc->sc_rdt_reg = WMREG_RDT;
1861 sc->sc_tdt_reg = WMREG_TDT;
1862 }
1863
1864 if (sc->sc_type == WM_T_PCH) {
1865 uint16_t val;
1866
1867 /* Save the NVM K1 bit setting */
1868 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1869
1870 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1871 sc->sc_nvm_k1_enabled = 1;
1872 else
1873 sc->sc_nvm_k1_enabled = 0;
1874 }
1875
1876 /*
1877 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1878 * media structures accordingly.
1879 */
1880 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1881 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1882 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1883 || sc->sc_type == WM_T_82573
1884 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1885 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1886 wm_gmii_mediainit(sc, wmp->wmp_product);
1887 } else if (sc->sc_type < WM_T_82543 ||
1888 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1889 if (wmp->wmp_flags & WMP_F_1000T)
1890 aprint_error_dev(sc->sc_dev,
1891 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1892 wm_tbi_mediainit(sc);
1893 } else {
1894 switch (sc->sc_type) {
1895 case WM_T_82575:
1896 case WM_T_82576:
1897 case WM_T_82580:
1898 case WM_T_82580ER:
1899 case WM_T_I350:
1900 case WM_T_I210:
1901 case WM_T_I211:
1902 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1903 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1904 case CTRL_EXT_LINK_MODE_SGMII:
1905 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1906 sc->sc_flags |= WM_F_SGMII;
1907 CSR_WRITE(sc, WMREG_CTRL_EXT,
1908 reg | CTRL_EXT_I2C_ENA);
1909 wm_gmii_mediainit(sc, wmp->wmp_product);
1910 break;
1911 case CTRL_EXT_LINK_MODE_1000KX:
1912 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1913 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1914 CSR_WRITE(sc, WMREG_CTRL_EXT,
1915 reg | CTRL_EXT_I2C_ENA);
1916 panic("not supported yet\n");
1917 break;
1918 case CTRL_EXT_LINK_MODE_GMII:
1919 default:
1920 CSR_WRITE(sc, WMREG_CTRL_EXT,
1921 reg & ~CTRL_EXT_I2C_ENA);
1922 wm_gmii_mediainit(sc, wmp->wmp_product);
1923 break;
1924 }
1925 break;
1926 default:
1927 if (wmp->wmp_flags & WMP_F_1000X)
1928 aprint_error_dev(sc->sc_dev,
1929 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1930 wm_gmii_mediainit(sc, wmp->wmp_product);
1931 }
1932 }
1933
1934 ifp = &sc->sc_ethercom.ec_if;
1935 xname = device_xname(sc->sc_dev);
1936 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1937 ifp->if_softc = sc;
1938 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1939 ifp->if_ioctl = wm_ioctl;
1940 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1941 ifp->if_start = wm_nq_start;
1942 else
1943 ifp->if_start = wm_start;
1944 ifp->if_watchdog = wm_watchdog;
1945 ifp->if_init = wm_init;
1946 ifp->if_stop = wm_stop;
1947 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1948 IFQ_SET_READY(&ifp->if_snd);
1949
1950 /* Check for jumbo frame */
1951 switch (sc->sc_type) {
1952 case WM_T_82573:
1953 /* XXX limited to 9234 if ASPM is disabled */
1954 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1955 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1956 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1957 break;
1958 case WM_T_82571:
1959 case WM_T_82572:
1960 case WM_T_82574:
1961 case WM_T_82575:
1962 case WM_T_82576:
1963 case WM_T_82580:
1964 case WM_T_82580ER:
1965 case WM_T_I350:
1966 case WM_T_I210:
1967 case WM_T_I211:
1968 case WM_T_80003:
1969 case WM_T_ICH9:
1970 case WM_T_ICH10:
1971 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1972 case WM_T_PCH_LPT:
1973 /* XXX limited to 9234 */
1974 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1975 break;
1976 case WM_T_PCH:
1977 /* XXX limited to 4096 */
1978 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1979 break;
1980 case WM_T_82542_2_0:
1981 case WM_T_82542_2_1:
1982 case WM_T_82583:
1983 case WM_T_ICH8:
1984 /* No support for jumbo frame */
1985 break;
1986 default:
1987 /* ETHER_MAX_LEN_JUMBO */
1988 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1989 break;
1990 }
1991
1992 /*
1993 * If we're a i82543 or greater, we can support VLANs.
1994 */
1995 if (sc->sc_type >= WM_T_82543)
1996 sc->sc_ethercom.ec_capabilities |=
1997 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1998
1999 /*
2000 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2001 * on i82543 and later.
2002 */
2003 if (sc->sc_type >= WM_T_82543) {
2004 ifp->if_capabilities |=
2005 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2006 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2007 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2008 IFCAP_CSUM_TCPv6_Tx |
2009 IFCAP_CSUM_UDPv6_Tx;
2010 }
2011
2012 /*
2013 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2014 *
2015 * 82541GI (8086:1076) ... no
2016 * 82572EI (8086:10b9) ... yes
2017 */
2018 if (sc->sc_type >= WM_T_82571) {
2019 ifp->if_capabilities |=
2020 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2021 }
2022
2023 /*
2024 * If we're a i82544 or greater (except i82547), we can do
2025 * TCP segmentation offload.
2026 */
2027 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2028 ifp->if_capabilities |= IFCAP_TSOv4;
2029 }
2030
2031 if (sc->sc_type >= WM_T_82571) {
2032 ifp->if_capabilities |= IFCAP_TSOv6;
2033 }
2034
2035 /*
2036 * Attach the interface.
2037 */
2038 if_attach(ifp);
2039 ether_ifattach(ifp, enaddr);
2040 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2041 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2042
2043 #ifdef WM_EVENT_COUNTERS
2044 /* Attach event counters. */
2045 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2046 NULL, xname, "txsstall");
2047 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2048 NULL, xname, "txdstall");
2049 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2050 NULL, xname, "txfifo_stall");
2051 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2052 NULL, xname, "txdw");
2053 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2054 NULL, xname, "txqe");
2055 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2056 NULL, xname, "rxintr");
2057 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2058 NULL, xname, "linkintr");
2059
2060 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2061 NULL, xname, "rxipsum");
2062 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2063 NULL, xname, "rxtusum");
2064 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2065 NULL, xname, "txipsum");
2066 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2067 NULL, xname, "txtusum");
2068 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2069 NULL, xname, "txtusum6");
2070
2071 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2072 NULL, xname, "txtso");
2073 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2074 NULL, xname, "txtso6");
2075 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2076 NULL, xname, "txtsopain");
2077
2078 for (i = 0; i < WM_NTXSEGS; i++) {
2079 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2080 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2081 NULL, xname, wm_txseg_evcnt_names[i]);
2082 }
2083
2084 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2085 NULL, xname, "txdrop");
2086
2087 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2088 NULL, xname, "tu");
2089
2090 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2091 NULL, xname, "tx_xoff");
2092 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2093 NULL, xname, "tx_xon");
2094 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2095 NULL, xname, "rx_xoff");
2096 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2097 NULL, xname, "rx_xon");
2098 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2099 NULL, xname, "rx_macctl");
2100 #endif /* WM_EVENT_COUNTERS */
2101
2102 if (pmf_device_register(self, wm_suspend, wm_resume))
2103 pmf_class_network_register(self, ifp);
2104 else
2105 aprint_error_dev(self, "couldn't establish power handler\n");
2106
2107 return;
2108
2109 /*
2110 * Free any resources we've allocated during the failed attach
2111 * attempt. Do this in reverse order and fall through.
2112 */
2113 fail_5:
2114 for (i = 0; i < WM_NRXDESC; i++) {
2115 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2116 bus_dmamap_destroy(sc->sc_dmat,
2117 sc->sc_rxsoft[i].rxs_dmamap);
2118 }
2119 fail_4:
2120 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2121 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2122 bus_dmamap_destroy(sc->sc_dmat,
2123 sc->sc_txsoft[i].txs_dmamap);
2124 }
2125 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2126 fail_3:
2127 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2128 fail_2:
2129 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2130 sc->sc_cd_size);
2131 fail_1:
2132 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2133 fail_0:
2134 return;
2135 }
2136
2137 static int
2138 wm_detach(device_t self, int flags __unused)
2139 {
2140 struct wm_softc *sc = device_private(self);
2141 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2142 int i, s;
2143
2144 s = splnet();
2145 /* Stop the interface. Callouts are stopped in it. */
2146 wm_stop(ifp, 1);
2147 splx(s);
2148
2149 pmf_device_deregister(self);
2150
2151 /* Tell the firmware about the release */
2152 wm_release_manageability(sc);
2153 wm_release_hw_control(sc);
2154
2155 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2156
2157 /* Delete all remaining media. */
2158 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2159
2160 ether_ifdetach(ifp);
2161 if_detach(ifp);
2162
2163
2164 /* Unload RX dmamaps and free mbufs */
2165 wm_rxdrain(sc);
2166
2167 /* Free dmamap. It's the same as the end of the wm_attach() function */
2168 for (i = 0; i < WM_NRXDESC; i++) {
2169 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2170 bus_dmamap_destroy(sc->sc_dmat,
2171 sc->sc_rxsoft[i].rxs_dmamap);
2172 }
2173 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2174 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2175 bus_dmamap_destroy(sc->sc_dmat,
2176 sc->sc_txsoft[i].txs_dmamap);
2177 }
2178 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2179 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2180 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2181 sc->sc_cd_size);
2182 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2183
2184 /* Disestablish the interrupt handler */
2185 if (sc->sc_ih != NULL) {
2186 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2187 sc->sc_ih = NULL;
2188 }
2189
2190 /* Unmap the registers */
2191 if (sc->sc_ss) {
2192 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2193 sc->sc_ss = 0;
2194 }
2195
2196 if (sc->sc_ios) {
2197 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2198 sc->sc_ios = 0;
2199 }
2200
2201 return 0;
2202 }
2203
2204 /*
2205 * wm_tx_offload:
2206 *
2207 * Set up TCP/IP checksumming parameters for the
2208 * specified packet.
2209 */
2210 static int
2211 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2212 uint8_t *fieldsp)
2213 {
2214 struct mbuf *m0 = txs->txs_mbuf;
2215 struct livengood_tcpip_ctxdesc *t;
2216 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2217 uint32_t ipcse;
2218 struct ether_header *eh;
2219 int offset, iphl;
2220 uint8_t fields;
2221
2222 /*
2223 * XXX It would be nice if the mbuf pkthdr had offset
2224 * fields for the protocol headers.
2225 */
2226
2227 eh = mtod(m0, struct ether_header *);
2228 switch (htons(eh->ether_type)) {
2229 case ETHERTYPE_IP:
2230 case ETHERTYPE_IPV6:
2231 offset = ETHER_HDR_LEN;
2232 break;
2233
2234 case ETHERTYPE_VLAN:
2235 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2236 break;
2237
2238 default:
2239 /*
2240 * Don't support this protocol or encapsulation.
2241 */
2242 *fieldsp = 0;
2243 *cmdp = 0;
2244 return 0;
2245 }
2246
2247 if ((m0->m_pkthdr.csum_flags &
2248 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2249 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2250 } else {
2251 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2252 }
2253 ipcse = offset + iphl - 1;
2254
2255 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2256 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2257 seg = 0;
2258 fields = 0;
2259
2260 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2261 int hlen = offset + iphl;
2262 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2263
2264 if (__predict_false(m0->m_len <
2265 (hlen + sizeof(struct tcphdr)))) {
2266 /*
2267 * TCP/IP headers are not in the first mbuf; we need
2268 * to do this the slow and painful way. Let's just
2269 * hope this doesn't happen very often.
2270 */
2271 struct tcphdr th;
2272
2273 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2274
2275 m_copydata(m0, hlen, sizeof(th), &th);
2276 if (v4) {
2277 struct ip ip;
2278
2279 m_copydata(m0, offset, sizeof(ip), &ip);
2280 ip.ip_len = 0;
2281 m_copyback(m0,
2282 offset + offsetof(struct ip, ip_len),
2283 sizeof(ip.ip_len), &ip.ip_len);
2284 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2285 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2286 } else {
2287 struct ip6_hdr ip6;
2288
2289 m_copydata(m0, offset, sizeof(ip6), &ip6);
2290 ip6.ip6_plen = 0;
2291 m_copyback(m0,
2292 offset + offsetof(struct ip6_hdr, ip6_plen),
2293 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2294 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2295 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2296 }
2297 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2298 sizeof(th.th_sum), &th.th_sum);
2299
2300 hlen += th.th_off << 2;
2301 } else {
2302 /*
2303 * TCP/IP headers are in the first mbuf; we can do
2304 * this the easy way.
2305 */
2306 struct tcphdr *th;
2307
2308 if (v4) {
2309 struct ip *ip =
2310 (void *)(mtod(m0, char *) + offset);
2311 th = (void *)(mtod(m0, char *) + hlen);
2312
2313 ip->ip_len = 0;
2314 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2315 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2316 } else {
2317 struct ip6_hdr *ip6 =
2318 (void *)(mtod(m0, char *) + offset);
2319 th = (void *)(mtod(m0, char *) + hlen);
2320
2321 ip6->ip6_plen = 0;
2322 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2323 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2324 }
2325 hlen += th->th_off << 2;
2326 }
2327
2328 if (v4) {
2329 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2330 cmdlen |= WTX_TCPIP_CMD_IP;
2331 } else {
2332 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2333 ipcse = 0;
2334 }
2335 cmd |= WTX_TCPIP_CMD_TSE;
2336 cmdlen |= WTX_TCPIP_CMD_TSE |
2337 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2338 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2339 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2340 }
2341
2342 /*
2343 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2344 * offload feature, if we load the context descriptor, we
2345 * MUST provide valid values for IPCSS and TUCSS fields.
2346 */
2347
2348 ipcs = WTX_TCPIP_IPCSS(offset) |
2349 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2350 WTX_TCPIP_IPCSE(ipcse);
2351 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2352 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2353 fields |= WTX_IXSM;
2354 }
2355
2356 offset += iphl;
2357
2358 if (m0->m_pkthdr.csum_flags &
2359 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2360 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2361 fields |= WTX_TXSM;
2362 tucs = WTX_TCPIP_TUCSS(offset) |
2363 WTX_TCPIP_TUCSO(offset +
2364 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2365 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2366 } else if ((m0->m_pkthdr.csum_flags &
2367 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2368 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2369 fields |= WTX_TXSM;
2370 tucs = WTX_TCPIP_TUCSS(offset) |
2371 WTX_TCPIP_TUCSO(offset +
2372 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2373 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2374 } else {
2375 /* Just initialize it to a valid TCP context. */
2376 tucs = WTX_TCPIP_TUCSS(offset) |
2377 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2378 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2379 }
2380
2381 /* Fill in the context descriptor. */
2382 t = (struct livengood_tcpip_ctxdesc *)
2383 &sc->sc_txdescs[sc->sc_txnext];
2384 t->tcpip_ipcs = htole32(ipcs);
2385 t->tcpip_tucs = htole32(tucs);
2386 t->tcpip_cmdlen = htole32(cmdlen);
2387 t->tcpip_seg = htole32(seg);
2388 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2389
2390 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2391 txs->txs_ndesc++;
2392
2393 *cmdp = cmd;
2394 *fieldsp = fields;
2395
2396 return 0;
2397 }
2398
2399 static void
2400 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2401 {
2402 struct mbuf *m;
2403 int i;
2404
2405 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2406 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2407 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2408 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2409 m->m_data, m->m_len, m->m_flags);
2410 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2411 i, i == 1 ? "" : "s");
2412 }
2413
2414 /*
2415 * wm_82547_txfifo_stall:
2416 *
2417 * Callout used to wait for the 82547 Tx FIFO to drain,
2418 * reset the FIFO pointers, and restart packet transmission.
2419 */
2420 static void
2421 wm_82547_txfifo_stall(void *arg)
2422 {
2423 struct wm_softc *sc = arg;
2424 int s;
2425
2426 s = splnet();
2427
2428 if (sc->sc_txfifo_stall) {
2429 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2430 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2431 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2432 /*
2433 * Packets have drained. Stop transmitter, reset
2434 * FIFO pointers, restart transmitter, and kick
2435 * the packet queue.
2436 */
2437 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2438 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2439 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2440 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2441 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2442 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2443 CSR_WRITE(sc, WMREG_TCTL, tctl);
2444 CSR_WRITE_FLUSH(sc);
2445
2446 sc->sc_txfifo_head = 0;
2447 sc->sc_txfifo_stall = 0;
2448 wm_start(&sc->sc_ethercom.ec_if);
2449 } else {
2450 /*
2451 * Still waiting for packets to drain; try again in
2452 * another tick.
2453 */
2454 callout_schedule(&sc->sc_txfifo_ch, 1);
2455 }
2456 }
2457
2458 splx(s);
2459 }
2460
2461 static void
2462 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2463 {
2464 uint32_t reg;
2465
2466 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2467
2468 if (on != 0)
2469 reg |= EXTCNFCTR_GATE_PHY_CFG;
2470 else
2471 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2472
2473 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2474 }
2475
2476 /*
2477 * wm_82547_txfifo_bugchk:
2478 *
2479 * Check for bug condition in the 82547 Tx FIFO. We need to
2480 * prevent enqueueing a packet that would wrap around the end
2481 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2482 *
2483 * We do this by checking the amount of space before the end
2484 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2485 * the Tx FIFO, wait for all remaining packets to drain, reset
2486 * the internal FIFO pointers to the beginning, and restart
2487 * transmission on the interface.
2488 */
2489 #define WM_FIFO_HDR 0x10
2490 #define WM_82547_PAD_LEN 0x3e0
2491 static int
2492 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2493 {
2494 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2495 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2496
2497 /* Just return if already stalled. */
2498 if (sc->sc_txfifo_stall)
2499 return 1;
2500
2501 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2502 /* Stall only occurs in half-duplex mode. */
2503 goto send_packet;
2504 }
2505
2506 if (len >= WM_82547_PAD_LEN + space) {
2507 sc->sc_txfifo_stall = 1;
2508 callout_schedule(&sc->sc_txfifo_ch, 1);
2509 return 1;
2510 }
2511
2512 send_packet:
2513 sc->sc_txfifo_head += len;
2514 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2515 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2516
2517 return 0;
2518 }
2519
2520 /*
2521 * wm_start: [ifnet interface function]
2522 *
2523 * Start packet transmission on the interface.
2524 */
2525 static void
2526 wm_start(struct ifnet *ifp)
2527 {
2528 struct wm_softc *sc = ifp->if_softc;
2529 struct mbuf *m0;
2530 struct m_tag *mtag;
2531 struct wm_txsoft *txs;
2532 bus_dmamap_t dmamap;
2533 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2534 bus_addr_t curaddr;
2535 bus_size_t seglen, curlen;
2536 uint32_t cksumcmd;
2537 uint8_t cksumfields;
2538
2539 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2540 return;
2541
2542 /*
2543 * Remember the previous number of free descriptors.
2544 */
2545 ofree = sc->sc_txfree;
2546
2547 /*
2548 * Loop through the send queue, setting up transmit descriptors
2549 * until we drain the queue, or use up all available transmit
2550 * descriptors.
2551 */
2552 for (;;) {
2553 /* Grab a packet off the queue. */
2554 IFQ_POLL(&ifp->if_snd, m0);
2555 if (m0 == NULL)
2556 break;
2557
2558 DPRINTF(WM_DEBUG_TX,
2559 ("%s: TX: have packet to transmit: %p\n",
2560 device_xname(sc->sc_dev), m0));
2561
2562 /* Get a work queue entry. */
2563 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2564 wm_txintr(sc);
2565 if (sc->sc_txsfree == 0) {
2566 DPRINTF(WM_DEBUG_TX,
2567 ("%s: TX: no free job descriptors\n",
2568 device_xname(sc->sc_dev)));
2569 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2570 break;
2571 }
2572 }
2573
2574 txs = &sc->sc_txsoft[sc->sc_txsnext];
2575 dmamap = txs->txs_dmamap;
2576
2577 use_tso = (m0->m_pkthdr.csum_flags &
2578 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2579
2580 /*
2581 * So says the Linux driver:
2582 * The controller does a simple calculation to make sure
2583 * there is enough room in the FIFO before initiating the
2584 * DMA for each buffer. The calc is:
2585 * 4 = ceil(buffer len / MSS)
2586 * To make sure we don't overrun the FIFO, adjust the max
2587 * buffer len if the MSS drops.
2588 */
2589 dmamap->dm_maxsegsz =
2590 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2591 ? m0->m_pkthdr.segsz << 2
2592 : WTX_MAX_LEN;
2593
2594 /*
2595 * Load the DMA map. If this fails, the packet either
2596 * didn't fit in the allotted number of segments, or we
2597 * were short on resources. For the too-many-segments
2598 * case, we simply report an error and drop the packet,
2599 * since we can't sanely copy a jumbo packet to a single
2600 * buffer.
2601 */
2602 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2603 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2604 if (error) {
2605 if (error == EFBIG) {
2606 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2607 log(LOG_ERR, "%s: Tx packet consumes too many "
2608 "DMA segments, dropping...\n",
2609 device_xname(sc->sc_dev));
2610 IFQ_DEQUEUE(&ifp->if_snd, m0);
2611 wm_dump_mbuf_chain(sc, m0);
2612 m_freem(m0);
2613 continue;
2614 }
2615 /*
2616 * Short on resources, just stop for now.
2617 */
2618 DPRINTF(WM_DEBUG_TX,
2619 ("%s: TX: dmamap load failed: %d\n",
2620 device_xname(sc->sc_dev), error));
2621 break;
2622 }
2623
2624 segs_needed = dmamap->dm_nsegs;
2625 if (use_tso) {
2626 /* For sentinel descriptor; see below. */
2627 segs_needed++;
2628 }
2629
2630 /*
2631 * Ensure we have enough descriptors free to describe
2632 * the packet. Note, we always reserve one descriptor
2633 * at the end of the ring due to the semantics of the
2634 * TDT register, plus one more in the event we need
2635 * to load offload context.
2636 */
2637 if (segs_needed > sc->sc_txfree - 2) {
2638 /*
2639 * Not enough free descriptors to transmit this
2640 * packet. We haven't committed anything yet,
2641 * so just unload the DMA map, put the packet
2642 * pack on the queue, and punt. Notify the upper
2643 * layer that there are no more slots left.
2644 */
2645 DPRINTF(WM_DEBUG_TX,
2646 ("%s: TX: need %d (%d) descriptors, have %d\n",
2647 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2648 segs_needed, sc->sc_txfree - 1));
2649 ifp->if_flags |= IFF_OACTIVE;
2650 bus_dmamap_unload(sc->sc_dmat, dmamap);
2651 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2652 break;
2653 }
2654
2655 /*
2656 * Check for 82547 Tx FIFO bug. We need to do this
2657 * once we know we can transmit the packet, since we
2658 * do some internal FIFO space accounting here.
2659 */
2660 if (sc->sc_type == WM_T_82547 &&
2661 wm_82547_txfifo_bugchk(sc, m0)) {
2662 DPRINTF(WM_DEBUG_TX,
2663 ("%s: TX: 82547 Tx FIFO bug detected\n",
2664 device_xname(sc->sc_dev)));
2665 ifp->if_flags |= IFF_OACTIVE;
2666 bus_dmamap_unload(sc->sc_dmat, dmamap);
2667 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2668 break;
2669 }
2670
2671 IFQ_DEQUEUE(&ifp->if_snd, m0);
2672
2673 /*
2674 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2675 */
2676
2677 DPRINTF(WM_DEBUG_TX,
2678 ("%s: TX: packet has %d (%d) DMA segments\n",
2679 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2680
2681 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2682
2683 /*
2684 * Store a pointer to the packet so that we can free it
2685 * later.
2686 *
2687 * Initially, we consider the number of descriptors the
2688 * packet uses the number of DMA segments. This may be
2689 * incremented by 1 if we do checksum offload (a descriptor
2690 * is used to set the checksum context).
2691 */
2692 txs->txs_mbuf = m0;
2693 txs->txs_firstdesc = sc->sc_txnext;
2694 txs->txs_ndesc = segs_needed;
2695
2696 /* Set up offload parameters for this packet. */
2697 if (m0->m_pkthdr.csum_flags &
2698 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2699 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2700 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2701 if (wm_tx_offload(sc, txs, &cksumcmd,
2702 &cksumfields) != 0) {
2703 /* Error message already displayed. */
2704 bus_dmamap_unload(sc->sc_dmat, dmamap);
2705 continue;
2706 }
2707 } else {
2708 cksumcmd = 0;
2709 cksumfields = 0;
2710 }
2711
2712 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2713
2714 /* Sync the DMA map. */
2715 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2716 BUS_DMASYNC_PREWRITE);
2717
2718 /*
2719 * Initialize the transmit descriptor.
2720 */
2721 for (nexttx = sc->sc_txnext, seg = 0;
2722 seg < dmamap->dm_nsegs; seg++) {
2723 for (seglen = dmamap->dm_segs[seg].ds_len,
2724 curaddr = dmamap->dm_segs[seg].ds_addr;
2725 seglen != 0;
2726 curaddr += curlen, seglen -= curlen,
2727 nexttx = WM_NEXTTX(sc, nexttx)) {
2728 curlen = seglen;
2729
2730 /*
2731 * So says the Linux driver:
2732 * Work around for premature descriptor
2733 * write-backs in TSO mode. Append a
2734 * 4-byte sentinel descriptor.
2735 */
2736 if (use_tso &&
2737 seg == dmamap->dm_nsegs - 1 &&
2738 curlen > 8)
2739 curlen -= 4;
2740
2741 wm_set_dma_addr(
2742 &sc->sc_txdescs[nexttx].wtx_addr,
2743 curaddr);
2744 sc->sc_txdescs[nexttx].wtx_cmdlen =
2745 htole32(cksumcmd | curlen);
2746 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2747 0;
2748 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2749 cksumfields;
2750 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2751 lasttx = nexttx;
2752
2753 DPRINTF(WM_DEBUG_TX,
2754 ("%s: TX: desc %d: low %#" PRIx64 ", "
2755 "len %#04zx\n",
2756 device_xname(sc->sc_dev), nexttx,
2757 (uint64_t)curaddr, curlen));
2758 }
2759 }
2760
2761 KASSERT(lasttx != -1);
2762
2763 /*
2764 * Set up the command byte on the last descriptor of
2765 * the packet. If we're in the interrupt delay window,
2766 * delay the interrupt.
2767 */
2768 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2769 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2770
2771 /*
2772 * If VLANs are enabled and the packet has a VLAN tag, set
2773 * up the descriptor to encapsulate the packet for us.
2774 *
2775 * This is only valid on the last descriptor of the packet.
2776 */
2777 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2778 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2779 htole32(WTX_CMD_VLE);
2780 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2781 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2782 }
2783
2784 txs->txs_lastdesc = lasttx;
2785
2786 DPRINTF(WM_DEBUG_TX,
2787 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2788 device_xname(sc->sc_dev),
2789 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2790
2791 /* Sync the descriptors we're using. */
2792 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2793 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2794
2795 /* Give the packet to the chip. */
2796 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2797
2798 DPRINTF(WM_DEBUG_TX,
2799 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2800
2801 DPRINTF(WM_DEBUG_TX,
2802 ("%s: TX: finished transmitting packet, job %d\n",
2803 device_xname(sc->sc_dev), sc->sc_txsnext));
2804
2805 /* Advance the tx pointer. */
2806 sc->sc_txfree -= txs->txs_ndesc;
2807 sc->sc_txnext = nexttx;
2808
2809 sc->sc_txsfree--;
2810 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2811
2812 /* Pass the packet to any BPF listeners. */
2813 bpf_mtap(ifp, m0);
2814 }
2815
2816 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2817 /* No more slots; notify upper layer. */
2818 ifp->if_flags |= IFF_OACTIVE;
2819 }
2820
2821 if (sc->sc_txfree != ofree) {
2822 /* Set a watchdog timer in case the chip flakes out. */
2823 ifp->if_timer = 5;
2824 }
2825 }
2826
2827 /*
2828 * wm_nq_tx_offload:
2829 *
2830 * Set up TCP/IP checksumming parameters for the
2831 * specified packet, for NEWQUEUE devices
2832 */
2833 static int
2834 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2835 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2836 {
2837 struct mbuf *m0 = txs->txs_mbuf;
2838 struct m_tag *mtag;
2839 uint32_t vl_len, mssidx, cmdc;
2840 struct ether_header *eh;
2841 int offset, iphl;
2842
2843 /*
2844 * XXX It would be nice if the mbuf pkthdr had offset
2845 * fields for the protocol headers.
2846 */
2847 *cmdlenp = 0;
2848 *fieldsp = 0;
2849
2850 eh = mtod(m0, struct ether_header *);
2851 switch (htons(eh->ether_type)) {
2852 case ETHERTYPE_IP:
2853 case ETHERTYPE_IPV6:
2854 offset = ETHER_HDR_LEN;
2855 break;
2856
2857 case ETHERTYPE_VLAN:
2858 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2859 break;
2860
2861 default:
2862 /*
2863 * Don't support this protocol or encapsulation.
2864 */
2865 *do_csum = false;
2866 return 0;
2867 }
2868 *do_csum = true;
2869 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2870 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2871
2872 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2873 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2874
2875 if ((m0->m_pkthdr.csum_flags &
2876 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2877 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2878 } else {
2879 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2880 }
2881 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2882 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2883
2884 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2885 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2886 << NQTXC_VLLEN_VLAN_SHIFT);
2887 *cmdlenp |= NQTX_CMD_VLE;
2888 }
2889
2890 mssidx = 0;
2891
2892 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2893 int hlen = offset + iphl;
2894 int tcp_hlen;
2895 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2896
2897 if (__predict_false(m0->m_len <
2898 (hlen + sizeof(struct tcphdr)))) {
2899 /*
2900 * TCP/IP headers are not in the first mbuf; we need
2901 * to do this the slow and painful way. Let's just
2902 * hope this doesn't happen very often.
2903 */
2904 struct tcphdr th;
2905
2906 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2907
2908 m_copydata(m0, hlen, sizeof(th), &th);
2909 if (v4) {
2910 struct ip ip;
2911
2912 m_copydata(m0, offset, sizeof(ip), &ip);
2913 ip.ip_len = 0;
2914 m_copyback(m0,
2915 offset + offsetof(struct ip, ip_len),
2916 sizeof(ip.ip_len), &ip.ip_len);
2917 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2918 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2919 } else {
2920 struct ip6_hdr ip6;
2921
2922 m_copydata(m0, offset, sizeof(ip6), &ip6);
2923 ip6.ip6_plen = 0;
2924 m_copyback(m0,
2925 offset + offsetof(struct ip6_hdr, ip6_plen),
2926 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2927 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2928 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2929 }
2930 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2931 sizeof(th.th_sum), &th.th_sum);
2932
2933 tcp_hlen = th.th_off << 2;
2934 } else {
2935 /*
2936 * TCP/IP headers are in the first mbuf; we can do
2937 * this the easy way.
2938 */
2939 struct tcphdr *th;
2940
2941 if (v4) {
2942 struct ip *ip =
2943 (void *)(mtod(m0, char *) + offset);
2944 th = (void *)(mtod(m0, char *) + hlen);
2945
2946 ip->ip_len = 0;
2947 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2948 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2949 } else {
2950 struct ip6_hdr *ip6 =
2951 (void *)(mtod(m0, char *) + offset);
2952 th = (void *)(mtod(m0, char *) + hlen);
2953
2954 ip6->ip6_plen = 0;
2955 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2956 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2957 }
2958 tcp_hlen = th->th_off << 2;
2959 }
2960 hlen += tcp_hlen;
2961 *cmdlenp |= NQTX_CMD_TSE;
2962
2963 if (v4) {
2964 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2965 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2966 } else {
2967 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2968 *fieldsp |= NQTXD_FIELDS_TUXSM;
2969 }
2970 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2971 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2972 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2973 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2974 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2975 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2976 } else {
2977 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2978 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2979 }
2980
2981 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2982 *fieldsp |= NQTXD_FIELDS_IXSM;
2983 cmdc |= NQTXC_CMD_IP4;
2984 }
2985
2986 if (m0->m_pkthdr.csum_flags &
2987 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2988 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2989 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2990 cmdc |= NQTXC_CMD_TCP;
2991 } else {
2992 cmdc |= NQTXC_CMD_UDP;
2993 }
2994 cmdc |= NQTXC_CMD_IP4;
2995 *fieldsp |= NQTXD_FIELDS_TUXSM;
2996 }
2997 if (m0->m_pkthdr.csum_flags &
2998 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2999 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3000 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3001 cmdc |= NQTXC_CMD_TCP;
3002 } else {
3003 cmdc |= NQTXC_CMD_UDP;
3004 }
3005 cmdc |= NQTXC_CMD_IP6;
3006 *fieldsp |= NQTXD_FIELDS_TUXSM;
3007 }
3008
3009 /* Fill in the context descriptor. */
3010 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3011 htole32(vl_len);
3012 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3013 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3014 htole32(cmdc);
3015 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3016 htole32(mssidx);
3017 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3018 DPRINTF(WM_DEBUG_TX,
3019 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3020 sc->sc_txnext, 0, vl_len));
3021 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3022 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3023 txs->txs_ndesc++;
3024 return 0;
3025 }
3026
3027 /*
3028 * wm_nq_start: [ifnet interface function]
3029 *
3030 * Start packet transmission on the interface for NEWQUEUE devices
3031 */
3032 static void
3033 wm_nq_start(struct ifnet *ifp)
3034 {
3035 struct wm_softc *sc = ifp->if_softc;
3036 struct mbuf *m0;
3037 struct m_tag *mtag;
3038 struct wm_txsoft *txs;
3039 bus_dmamap_t dmamap;
3040 int error, nexttx, lasttx = -1, seg, segs_needed;
3041 bool do_csum, sent;
3042
3043 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3044 return;
3045
3046 sent = false;
3047
3048 /*
3049 * Loop through the send queue, setting up transmit descriptors
3050 * until we drain the queue, or use up all available transmit
3051 * descriptors.
3052 */
3053 for (;;) {
3054 /* Grab a packet off the queue. */
3055 IFQ_POLL(&ifp->if_snd, m0);
3056 if (m0 == NULL)
3057 break;
3058
3059 DPRINTF(WM_DEBUG_TX,
3060 ("%s: TX: have packet to transmit: %p\n",
3061 device_xname(sc->sc_dev), m0));
3062
3063 /* Get a work queue entry. */
3064 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3065 wm_txintr(sc);
3066 if (sc->sc_txsfree == 0) {
3067 DPRINTF(WM_DEBUG_TX,
3068 ("%s: TX: no free job descriptors\n",
3069 device_xname(sc->sc_dev)));
3070 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3071 break;
3072 }
3073 }
3074
3075 txs = &sc->sc_txsoft[sc->sc_txsnext];
3076 dmamap = txs->txs_dmamap;
3077
3078 /*
3079 * Load the DMA map. If this fails, the packet either
3080 * didn't fit in the allotted number of segments, or we
3081 * were short on resources. For the too-many-segments
3082 * case, we simply report an error and drop the packet,
3083 * since we can't sanely copy a jumbo packet to a single
3084 * buffer.
3085 */
3086 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3087 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3088 if (error) {
3089 if (error == EFBIG) {
3090 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3091 log(LOG_ERR, "%s: Tx packet consumes too many "
3092 "DMA segments, dropping...\n",
3093 device_xname(sc->sc_dev));
3094 IFQ_DEQUEUE(&ifp->if_snd, m0);
3095 wm_dump_mbuf_chain(sc, m0);
3096 m_freem(m0);
3097 continue;
3098 }
3099 /*
3100 * Short on resources, just stop for now.
3101 */
3102 DPRINTF(WM_DEBUG_TX,
3103 ("%s: TX: dmamap load failed: %d\n",
3104 device_xname(sc->sc_dev), error));
3105 break;
3106 }
3107
3108 segs_needed = dmamap->dm_nsegs;
3109
3110 /*
3111 * Ensure we have enough descriptors free to describe
3112 * the packet. Note, we always reserve one descriptor
3113 * at the end of the ring due to the semantics of the
3114 * TDT register, plus one more in the event we need
3115 * to load offload context.
3116 */
3117 if (segs_needed > sc->sc_txfree - 2) {
3118 /*
3119 * Not enough free descriptors to transmit this
3120 * packet. We haven't committed anything yet,
3121 * so just unload the DMA map, put the packet
3122 * pack on the queue, and punt. Notify the upper
3123 * layer that there are no more slots left.
3124 */
3125 DPRINTF(WM_DEBUG_TX,
3126 ("%s: TX: need %d (%d) descriptors, have %d\n",
3127 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3128 segs_needed, sc->sc_txfree - 1));
3129 ifp->if_flags |= IFF_OACTIVE;
3130 bus_dmamap_unload(sc->sc_dmat, dmamap);
3131 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3132 break;
3133 }
3134
3135 IFQ_DEQUEUE(&ifp->if_snd, m0);
3136
3137 /*
3138 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3139 */
3140
3141 DPRINTF(WM_DEBUG_TX,
3142 ("%s: TX: packet has %d (%d) DMA segments\n",
3143 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3144
3145 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3146
3147 /*
3148 * Store a pointer to the packet so that we can free it
3149 * later.
3150 *
3151 * Initially, we consider the number of descriptors the
3152 * packet uses the number of DMA segments. This may be
3153 * incremented by 1 if we do checksum offload (a descriptor
3154 * is used to set the checksum context).
3155 */
3156 txs->txs_mbuf = m0;
3157 txs->txs_firstdesc = sc->sc_txnext;
3158 txs->txs_ndesc = segs_needed;
3159
3160 /* Set up offload parameters for this packet. */
3161 uint32_t cmdlen, fields, dcmdlen;
3162 if (m0->m_pkthdr.csum_flags &
3163 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3164 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3165 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3166 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3167 &do_csum) != 0) {
3168 /* Error message already displayed. */
3169 bus_dmamap_unload(sc->sc_dmat, dmamap);
3170 continue;
3171 }
3172 } else {
3173 do_csum = false;
3174 cmdlen = 0;
3175 fields = 0;
3176 }
3177
3178 /* Sync the DMA map. */
3179 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3180 BUS_DMASYNC_PREWRITE);
3181
3182 /*
3183 * Initialize the first transmit descriptor.
3184 */
3185 nexttx = sc->sc_txnext;
3186 if (!do_csum) {
3187 /* setup a legacy descriptor */
3188 wm_set_dma_addr(
3189 &sc->sc_txdescs[nexttx].wtx_addr,
3190 dmamap->dm_segs[0].ds_addr);
3191 sc->sc_txdescs[nexttx].wtx_cmdlen =
3192 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3193 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3194 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3195 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3196 NULL) {
3197 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3198 htole32(WTX_CMD_VLE);
3199 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3200 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3201 } else {
3202 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3203 }
3204 dcmdlen = 0;
3205 } else {
3206 /* setup an advanced data descriptor */
3207 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3208 htole64(dmamap->dm_segs[0].ds_addr);
3209 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3210 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3211 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3212 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3213 htole32(fields);
3214 DPRINTF(WM_DEBUG_TX,
3215 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3216 device_xname(sc->sc_dev), nexttx,
3217 (uint64_t)dmamap->dm_segs[0].ds_addr));
3218 DPRINTF(WM_DEBUG_TX,
3219 ("\t 0x%08x%08x\n", fields,
3220 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3221 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3222 }
3223
3224 lasttx = nexttx;
3225 nexttx = WM_NEXTTX(sc, nexttx);
3226 /*
3227 * fill in the next descriptors. legacy or adcanced format
3228 * is the same here
3229 */
3230 for (seg = 1; seg < dmamap->dm_nsegs;
3231 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3232 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3233 htole64(dmamap->dm_segs[seg].ds_addr);
3234 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3235 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3236 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3237 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3238 lasttx = nexttx;
3239
3240 DPRINTF(WM_DEBUG_TX,
3241 ("%s: TX: desc %d: %#" PRIx64 ", "
3242 "len %#04zx\n",
3243 device_xname(sc->sc_dev), nexttx,
3244 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3245 dmamap->dm_segs[seg].ds_len));
3246 }
3247
3248 KASSERT(lasttx != -1);
3249
3250 /*
3251 * Set up the command byte on the last descriptor of
3252 * the packet. If we're in the interrupt delay window,
3253 * delay the interrupt.
3254 */
3255 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3256 (NQTX_CMD_EOP | NQTX_CMD_RS));
3257 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3258 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3259
3260 txs->txs_lastdesc = lasttx;
3261
3262 DPRINTF(WM_DEBUG_TX,
3263 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3264 device_xname(sc->sc_dev),
3265 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3266
3267 /* Sync the descriptors we're using. */
3268 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3269 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3270
3271 /* Give the packet to the chip. */
3272 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3273 sent = true;
3274
3275 DPRINTF(WM_DEBUG_TX,
3276 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3277
3278 DPRINTF(WM_DEBUG_TX,
3279 ("%s: TX: finished transmitting packet, job %d\n",
3280 device_xname(sc->sc_dev), sc->sc_txsnext));
3281
3282 /* Advance the tx pointer. */
3283 sc->sc_txfree -= txs->txs_ndesc;
3284 sc->sc_txnext = nexttx;
3285
3286 sc->sc_txsfree--;
3287 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3288
3289 /* Pass the packet to any BPF listeners. */
3290 bpf_mtap(ifp, m0);
3291 }
3292
3293 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3294 /* No more slots; notify upper layer. */
3295 ifp->if_flags |= IFF_OACTIVE;
3296 }
3297
3298 if (sent) {
3299 /* Set a watchdog timer in case the chip flakes out. */
3300 ifp->if_timer = 5;
3301 }
3302 }
3303
3304 /*
3305 * wm_watchdog: [ifnet interface function]
3306 *
3307 * Watchdog timer handler.
3308 */
3309 static void
3310 wm_watchdog(struct ifnet *ifp)
3311 {
3312 struct wm_softc *sc = ifp->if_softc;
3313
3314 /*
3315 * Since we're using delayed interrupts, sweep up
3316 * before we report an error.
3317 */
3318 wm_txintr(sc);
3319
3320 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3321 #ifdef WM_DEBUG
3322 int i, j;
3323 struct wm_txsoft *txs;
3324 #endif
3325 log(LOG_ERR,
3326 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3327 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3328 sc->sc_txnext);
3329 ifp->if_oerrors++;
3330 #ifdef WM_DEBUG
3331 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3332 i = WM_NEXTTXS(sc, i)) {
3333 txs = &sc->sc_txsoft[i];
3334 printf("txs %d tx %d -> %d\n",
3335 i, txs->txs_firstdesc, txs->txs_lastdesc);
3336 for (j = txs->txs_firstdesc; ;
3337 j = WM_NEXTTX(sc, j)) {
3338 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3339 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3340 printf("\t %#08x%08x\n",
3341 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3342 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3343 if (j == txs->txs_lastdesc)
3344 break;
3345 }
3346 }
3347 #endif
3348 /* Reset the interface. */
3349 (void) wm_init(ifp);
3350 }
3351
3352 /* Try to get more packets going. */
3353 ifp->if_start(ifp);
3354 }
3355
3356 static int
3357 wm_ifflags_cb(struct ethercom *ec)
3358 {
3359 struct ifnet *ifp = &ec->ec_if;
3360 struct wm_softc *sc = ifp->if_softc;
3361 int change = ifp->if_flags ^ sc->sc_if_flags;
3362
3363 if (change != 0)
3364 sc->sc_if_flags = ifp->if_flags;
3365
3366 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3367 return ENETRESET;
3368
3369 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3370 wm_set_filter(sc);
3371
3372 wm_set_vlan(sc);
3373
3374 return 0;
3375 }
3376
3377 /*
3378 * wm_ioctl: [ifnet interface function]
3379 *
3380 * Handle control requests from the operator.
3381 */
3382 static int
3383 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3384 {
3385 struct wm_softc *sc = ifp->if_softc;
3386 struct ifreq *ifr = (struct ifreq *) data;
3387 struct ifaddr *ifa = (struct ifaddr *)data;
3388 struct sockaddr_dl *sdl;
3389 int s, error;
3390
3391 s = splnet();
3392
3393 switch (cmd) {
3394 case SIOCSIFMEDIA:
3395 case SIOCGIFMEDIA:
3396 /* Flow control requires full-duplex mode. */
3397 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3398 (ifr->ifr_media & IFM_FDX) == 0)
3399 ifr->ifr_media &= ~IFM_ETH_FMASK;
3400 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3401 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3402 /* We can do both TXPAUSE and RXPAUSE. */
3403 ifr->ifr_media |=
3404 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3405 }
3406 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3407 }
3408 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3409 break;
3410 case SIOCINITIFADDR:
3411 if (ifa->ifa_addr->sa_family == AF_LINK) {
3412 sdl = satosdl(ifp->if_dl->ifa_addr);
3413 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3414 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3415 /* unicast address is first multicast entry */
3416 wm_set_filter(sc);
3417 error = 0;
3418 break;
3419 }
3420 /*FALLTHROUGH*/
3421 default:
3422 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3423 break;
3424
3425 error = 0;
3426
3427 if (cmd == SIOCSIFCAP)
3428 error = (*ifp->if_init)(ifp);
3429 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3430 ;
3431 else if (ifp->if_flags & IFF_RUNNING) {
3432 /*
3433 * Multicast list has changed; set the hardware filter
3434 * accordingly.
3435 */
3436 wm_set_filter(sc);
3437 }
3438 break;
3439 }
3440
3441 /* Try to get more packets going. */
3442 ifp->if_start(ifp);
3443
3444 splx(s);
3445 return error;
3446 }
3447
3448 /*
3449 * wm_intr:
3450 *
3451 * Interrupt service routine.
3452 */
3453 static int
3454 wm_intr(void *arg)
3455 {
3456 struct wm_softc *sc = arg;
3457 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3458 uint32_t icr;
3459 int handled = 0;
3460
3461 while (1 /* CONSTCOND */) {
3462 icr = CSR_READ(sc, WMREG_ICR);
3463 if ((icr & sc->sc_icr) == 0)
3464 break;
3465 rnd_add_uint32(&sc->rnd_source, icr);
3466
3467 handled = 1;
3468
3469 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3470 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3471 DPRINTF(WM_DEBUG_RX,
3472 ("%s: RX: got Rx intr 0x%08x\n",
3473 device_xname(sc->sc_dev),
3474 icr & (ICR_RXDMT0|ICR_RXT0)));
3475 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3476 }
3477 #endif
3478 wm_rxintr(sc);
3479
3480 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3481 if (icr & ICR_TXDW) {
3482 DPRINTF(WM_DEBUG_TX,
3483 ("%s: TX: got TXDW interrupt\n",
3484 device_xname(sc->sc_dev)));
3485 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3486 }
3487 #endif
3488 wm_txintr(sc);
3489
3490 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3491 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3492 wm_linkintr(sc, icr);
3493 }
3494
3495 if (icr & ICR_RXO) {
3496 #if defined(WM_DEBUG)
3497 log(LOG_WARNING, "%s: Receive overrun\n",
3498 device_xname(sc->sc_dev));
3499 #endif /* defined(WM_DEBUG) */
3500 }
3501 }
3502
3503 if (handled) {
3504 /* Try to get more packets going. */
3505 ifp->if_start(ifp);
3506 }
3507
3508 return handled;
3509 }
3510
3511 /*
3512 * wm_txintr:
3513 *
3514 * Helper; handle transmit interrupts.
3515 */
3516 static void
3517 wm_txintr(struct wm_softc *sc)
3518 {
3519 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3520 struct wm_txsoft *txs;
3521 uint8_t status;
3522 int i;
3523
3524 ifp->if_flags &= ~IFF_OACTIVE;
3525
3526 /*
3527 * Go through the Tx list and free mbufs for those
3528 * frames which have been transmitted.
3529 */
3530 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3531 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3532 txs = &sc->sc_txsoft[i];
3533
3534 DPRINTF(WM_DEBUG_TX,
3535 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3536
3537 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3538 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3539
3540 status =
3541 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3542 if ((status & WTX_ST_DD) == 0) {
3543 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3544 BUS_DMASYNC_PREREAD);
3545 break;
3546 }
3547
3548 DPRINTF(WM_DEBUG_TX,
3549 ("%s: TX: job %d done: descs %d..%d\n",
3550 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3551 txs->txs_lastdesc));
3552
3553 /*
3554 * XXX We should probably be using the statistics
3555 * XXX registers, but I don't know if they exist
3556 * XXX on chips before the i82544.
3557 */
3558
3559 #ifdef WM_EVENT_COUNTERS
3560 if (status & WTX_ST_TU)
3561 WM_EVCNT_INCR(&sc->sc_ev_tu);
3562 #endif /* WM_EVENT_COUNTERS */
3563
3564 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3565 ifp->if_oerrors++;
3566 if (status & WTX_ST_LC)
3567 log(LOG_WARNING, "%s: late collision\n",
3568 device_xname(sc->sc_dev));
3569 else if (status & WTX_ST_EC) {
3570 ifp->if_collisions += 16;
3571 log(LOG_WARNING, "%s: excessive collisions\n",
3572 device_xname(sc->sc_dev));
3573 }
3574 } else
3575 ifp->if_opackets++;
3576
3577 sc->sc_txfree += txs->txs_ndesc;
3578 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3579 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3580 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3581 m_freem(txs->txs_mbuf);
3582 txs->txs_mbuf = NULL;
3583 }
3584
3585 /* Update the dirty transmit buffer pointer. */
3586 sc->sc_txsdirty = i;
3587 DPRINTF(WM_DEBUG_TX,
3588 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3589
3590 /*
3591 * If there are no more pending transmissions, cancel the watchdog
3592 * timer.
3593 */
3594 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3595 ifp->if_timer = 0;
3596 }
3597
3598 /*
3599 * wm_rxintr:
3600 *
3601 * Helper; handle receive interrupts.
3602 */
3603 static void
3604 wm_rxintr(struct wm_softc *sc)
3605 {
3606 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3607 struct wm_rxsoft *rxs;
3608 struct mbuf *m;
3609 int i, len;
3610 uint8_t status, errors;
3611 uint16_t vlantag;
3612
3613 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3614 rxs = &sc->sc_rxsoft[i];
3615
3616 DPRINTF(WM_DEBUG_RX,
3617 ("%s: RX: checking descriptor %d\n",
3618 device_xname(sc->sc_dev), i));
3619
3620 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3621
3622 status = sc->sc_rxdescs[i].wrx_status;
3623 errors = sc->sc_rxdescs[i].wrx_errors;
3624 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3625 vlantag = sc->sc_rxdescs[i].wrx_special;
3626
3627 if ((status & WRX_ST_DD) == 0) {
3628 /*
3629 * We have processed all of the receive descriptors.
3630 */
3631 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3632 break;
3633 }
3634
3635 if (__predict_false(sc->sc_rxdiscard)) {
3636 DPRINTF(WM_DEBUG_RX,
3637 ("%s: RX: discarding contents of descriptor %d\n",
3638 device_xname(sc->sc_dev), i));
3639 WM_INIT_RXDESC(sc, i);
3640 if (status & WRX_ST_EOP) {
3641 /* Reset our state. */
3642 DPRINTF(WM_DEBUG_RX,
3643 ("%s: RX: resetting rxdiscard -> 0\n",
3644 device_xname(sc->sc_dev)));
3645 sc->sc_rxdiscard = 0;
3646 }
3647 continue;
3648 }
3649
3650 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3651 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3652
3653 m = rxs->rxs_mbuf;
3654
3655 /*
3656 * Add a new receive buffer to the ring, unless of
3657 * course the length is zero. Treat the latter as a
3658 * failed mapping.
3659 */
3660 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3661 /*
3662 * Failed, throw away what we've done so
3663 * far, and discard the rest of the packet.
3664 */
3665 ifp->if_ierrors++;
3666 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3667 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3668 WM_INIT_RXDESC(sc, i);
3669 if ((status & WRX_ST_EOP) == 0)
3670 sc->sc_rxdiscard = 1;
3671 if (sc->sc_rxhead != NULL)
3672 m_freem(sc->sc_rxhead);
3673 WM_RXCHAIN_RESET(sc);
3674 DPRINTF(WM_DEBUG_RX,
3675 ("%s: RX: Rx buffer allocation failed, "
3676 "dropping packet%s\n", device_xname(sc->sc_dev),
3677 sc->sc_rxdiscard ? " (discard)" : ""));
3678 continue;
3679 }
3680
3681 m->m_len = len;
3682 sc->sc_rxlen += len;
3683 DPRINTF(WM_DEBUG_RX,
3684 ("%s: RX: buffer at %p len %d\n",
3685 device_xname(sc->sc_dev), m->m_data, len));
3686
3687 /*
3688 * If this is not the end of the packet, keep
3689 * looking.
3690 */
3691 if ((status & WRX_ST_EOP) == 0) {
3692 WM_RXCHAIN_LINK(sc, m);
3693 DPRINTF(WM_DEBUG_RX,
3694 ("%s: RX: not yet EOP, rxlen -> %d\n",
3695 device_xname(sc->sc_dev), sc->sc_rxlen));
3696 continue;
3697 }
3698
3699 /*
3700 * Okay, we have the entire packet now. The chip is
3701 * configured to include the FCS except I350 and I21[01]
3702 * (not all chips can be configured to strip it),
3703 * so we need to trim it.
3704 * May need to adjust length of previous mbuf in the
3705 * chain if the current mbuf is too short.
3706 * For an eratta, the RCTL_SECRC bit in RCTL register
3707 * is always set in I350, so we don't trim it.
3708 */
3709 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210)
3710 && (sc->sc_type != WM_T_I211)) {
3711 if (m->m_len < ETHER_CRC_LEN) {
3712 sc->sc_rxtail->m_len
3713 -= (ETHER_CRC_LEN - m->m_len);
3714 m->m_len = 0;
3715 } else
3716 m->m_len -= ETHER_CRC_LEN;
3717 len = sc->sc_rxlen - ETHER_CRC_LEN;
3718 } else
3719 len = sc->sc_rxlen;
3720
3721 WM_RXCHAIN_LINK(sc, m);
3722
3723 *sc->sc_rxtailp = NULL;
3724 m = sc->sc_rxhead;
3725
3726 WM_RXCHAIN_RESET(sc);
3727
3728 DPRINTF(WM_DEBUG_RX,
3729 ("%s: RX: have entire packet, len -> %d\n",
3730 device_xname(sc->sc_dev), len));
3731
3732 /*
3733 * If an error occurred, update stats and drop the packet.
3734 */
3735 if (errors &
3736 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3737 if (errors & WRX_ER_SE)
3738 log(LOG_WARNING, "%s: symbol error\n",
3739 device_xname(sc->sc_dev));
3740 else if (errors & WRX_ER_SEQ)
3741 log(LOG_WARNING, "%s: receive sequence error\n",
3742 device_xname(sc->sc_dev));
3743 else if (errors & WRX_ER_CE)
3744 log(LOG_WARNING, "%s: CRC error\n",
3745 device_xname(sc->sc_dev));
3746 m_freem(m);
3747 continue;
3748 }
3749
3750 /*
3751 * No errors. Receive the packet.
3752 */
3753 m->m_pkthdr.rcvif = ifp;
3754 m->m_pkthdr.len = len;
3755
3756 /*
3757 * If VLANs are enabled, VLAN packets have been unwrapped
3758 * for us. Associate the tag with the packet.
3759 */
3760 if ((status & WRX_ST_VP) != 0) {
3761 VLAN_INPUT_TAG(ifp, m,
3762 le16toh(vlantag),
3763 continue);
3764 }
3765
3766 /*
3767 * Set up checksum info for this packet.
3768 */
3769 if ((status & WRX_ST_IXSM) == 0) {
3770 if (status & WRX_ST_IPCS) {
3771 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3772 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3773 if (errors & WRX_ER_IPE)
3774 m->m_pkthdr.csum_flags |=
3775 M_CSUM_IPv4_BAD;
3776 }
3777 if (status & WRX_ST_TCPCS) {
3778 /*
3779 * Note: we don't know if this was TCP or UDP,
3780 * so we just set both bits, and expect the
3781 * upper layers to deal.
3782 */
3783 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3784 m->m_pkthdr.csum_flags |=
3785 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3786 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3787 if (errors & WRX_ER_TCPE)
3788 m->m_pkthdr.csum_flags |=
3789 M_CSUM_TCP_UDP_BAD;
3790 }
3791 }
3792
3793 ifp->if_ipackets++;
3794
3795 /* Pass this up to any BPF listeners. */
3796 bpf_mtap(ifp, m);
3797
3798 /* Pass it on. */
3799 (*ifp->if_input)(ifp, m);
3800 }
3801
3802 /* Update the receive pointer. */
3803 sc->sc_rxptr = i;
3804
3805 DPRINTF(WM_DEBUG_RX,
3806 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3807 }
3808
3809 /*
3810 * wm_linkintr_gmii:
3811 *
3812 * Helper; handle link interrupts for GMII.
3813 */
3814 static void
3815 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3816 {
3817
3818 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3819 __func__));
3820
3821 if (icr & ICR_LSC) {
3822 DPRINTF(WM_DEBUG_LINK,
3823 ("%s: LINK: LSC -> mii_pollstat\n",
3824 device_xname(sc->sc_dev)));
3825 mii_pollstat(&sc->sc_mii);
3826 if (sc->sc_type == WM_T_82543) {
3827 int miistatus, active;
3828
3829 /*
3830 * With 82543, we need to force speed and
3831 * duplex on the MAC equal to what the PHY
3832 * speed and duplex configuration is.
3833 */
3834 miistatus = sc->sc_mii.mii_media_status;
3835
3836 if (miistatus & IFM_ACTIVE) {
3837 active = sc->sc_mii.mii_media_active;
3838 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3839 switch (IFM_SUBTYPE(active)) {
3840 case IFM_10_T:
3841 sc->sc_ctrl |= CTRL_SPEED_10;
3842 break;
3843 case IFM_100_TX:
3844 sc->sc_ctrl |= CTRL_SPEED_100;
3845 break;
3846 case IFM_1000_T:
3847 sc->sc_ctrl |= CTRL_SPEED_1000;
3848 break;
3849 default:
3850 /*
3851 * fiber?
3852 * Shoud not enter here.
3853 */
3854 printf("unknown media (%x)\n",
3855 active);
3856 break;
3857 }
3858 if (active & IFM_FDX)
3859 sc->sc_ctrl |= CTRL_FD;
3860 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3861 }
3862 } else if ((sc->sc_type == WM_T_ICH8)
3863 && (sc->sc_phytype == WMPHY_IGP_3)) {
3864 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3865 } else if (sc->sc_type == WM_T_PCH) {
3866 wm_k1_gig_workaround_hv(sc,
3867 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3868 }
3869
3870 if ((sc->sc_phytype == WMPHY_82578)
3871 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3872 == IFM_1000_T)) {
3873
3874 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3875 delay(200*1000); /* XXX too big */
3876
3877 /* Link stall fix for link up */
3878 wm_gmii_hv_writereg(sc->sc_dev, 1,
3879 HV_MUX_DATA_CTRL,
3880 HV_MUX_DATA_CTRL_GEN_TO_MAC
3881 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3882 wm_gmii_hv_writereg(sc->sc_dev, 1,
3883 HV_MUX_DATA_CTRL,
3884 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3885 }
3886 }
3887 } else if (icr & ICR_RXSEQ) {
3888 DPRINTF(WM_DEBUG_LINK,
3889 ("%s: LINK Receive sequence error\n",
3890 device_xname(sc->sc_dev)));
3891 }
3892 }
3893
3894 /*
3895 * wm_linkintr_tbi:
3896 *
3897 * Helper; handle link interrupts for TBI mode.
3898 */
3899 static void
3900 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3901 {
3902 uint32_t status;
3903
3904 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3905 __func__));
3906
3907 status = CSR_READ(sc, WMREG_STATUS);
3908 if (icr & ICR_LSC) {
3909 if (status & STATUS_LU) {
3910 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3911 device_xname(sc->sc_dev),
3912 (status & STATUS_FD) ? "FDX" : "HDX"));
3913 /*
3914 * NOTE: CTRL will update TFCE and RFCE automatically,
3915 * so we should update sc->sc_ctrl
3916 */
3917
3918 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3919 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3920 sc->sc_fcrtl &= ~FCRTL_XONE;
3921 if (status & STATUS_FD)
3922 sc->sc_tctl |=
3923 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3924 else
3925 sc->sc_tctl |=
3926 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3927 if (sc->sc_ctrl & CTRL_TFCE)
3928 sc->sc_fcrtl |= FCRTL_XONE;
3929 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3930 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3931 WMREG_OLD_FCRTL : WMREG_FCRTL,
3932 sc->sc_fcrtl);
3933 sc->sc_tbi_linkup = 1;
3934 } else {
3935 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3936 device_xname(sc->sc_dev)));
3937 sc->sc_tbi_linkup = 0;
3938 }
3939 wm_tbi_set_linkled(sc);
3940 } else if (icr & ICR_RXCFG) {
3941 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3942 device_xname(sc->sc_dev)));
3943 sc->sc_tbi_nrxcfg++;
3944 wm_check_for_link(sc);
3945 } else if (icr & ICR_RXSEQ) {
3946 DPRINTF(WM_DEBUG_LINK,
3947 ("%s: LINK: Receive sequence error\n",
3948 device_xname(sc->sc_dev)));
3949 }
3950 }
3951
3952 /*
3953 * wm_linkintr:
3954 *
3955 * Helper; handle link interrupts.
3956 */
3957 static void
3958 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3959 {
3960
3961 if (sc->sc_flags & WM_F_HAS_MII)
3962 wm_linkintr_gmii(sc, icr);
3963 else
3964 wm_linkintr_tbi(sc, icr);
3965 }
3966
3967 /*
3968 * wm_tick:
3969 *
3970 * One second timer, used to check link status, sweep up
3971 * completed transmit jobs, etc.
3972 */
3973 static void
3974 wm_tick(void *arg)
3975 {
3976 struct wm_softc *sc = arg;
3977 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3978 int s;
3979
3980 s = splnet();
3981
3982 if (sc->sc_type >= WM_T_82542_2_1) {
3983 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3984 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3985 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3986 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3987 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3988 }
3989
3990 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3991 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3992 + CSR_READ(sc, WMREG_CRCERRS)
3993 + CSR_READ(sc, WMREG_ALGNERRC)
3994 + CSR_READ(sc, WMREG_SYMERRC)
3995 + CSR_READ(sc, WMREG_RXERRC)
3996 + CSR_READ(sc, WMREG_SEC)
3997 + CSR_READ(sc, WMREG_CEXTERR)
3998 + CSR_READ(sc, WMREG_RLEC);
3999 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4000
4001 if (sc->sc_flags & WM_F_HAS_MII)
4002 mii_tick(&sc->sc_mii);
4003 else
4004 wm_tbi_check_link(sc);
4005
4006 splx(s);
4007
4008 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4009 }
4010
4011 /*
4012 * wm_reset:
4013 *
4014 * Reset the i82542 chip.
4015 */
4016 static void
4017 wm_reset(struct wm_softc *sc)
4018 {
4019 int phy_reset = 0;
4020 uint32_t reg, mask;
4021 int i;
4022
4023 /*
4024 * Allocate on-chip memory according to the MTU size.
4025 * The Packet Buffer Allocation register must be written
4026 * before the chip is reset.
4027 */
4028 switch (sc->sc_type) {
4029 case WM_T_82547:
4030 case WM_T_82547_2:
4031 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4032 PBA_22K : PBA_30K;
4033 sc->sc_txfifo_head = 0;
4034 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4035 sc->sc_txfifo_size =
4036 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4037 sc->sc_txfifo_stall = 0;
4038 break;
4039 case WM_T_82571:
4040 case WM_T_82572:
4041 case WM_T_82575: /* XXX need special handing for jumbo frames */
4042 case WM_T_I350:
4043 case WM_T_80003:
4044 sc->sc_pba = PBA_32K;
4045 break;
4046 case WM_T_82580:
4047 case WM_T_82580ER:
4048 sc->sc_pba = PBA_35K;
4049 break;
4050 case WM_T_I210:
4051 case WM_T_I211:
4052 sc->sc_pba = PBA_34K;
4053 break;
4054 case WM_T_82576:
4055 sc->sc_pba = PBA_64K;
4056 break;
4057 case WM_T_82573:
4058 sc->sc_pba = PBA_12K;
4059 break;
4060 case WM_T_82574:
4061 case WM_T_82583:
4062 sc->sc_pba = PBA_20K;
4063 break;
4064 case WM_T_ICH8:
4065 sc->sc_pba = PBA_8K;
4066 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4067 break;
4068 case WM_T_ICH9:
4069 case WM_T_ICH10:
4070 sc->sc_pba = PBA_10K;
4071 break;
4072 case WM_T_PCH:
4073 case WM_T_PCH2:
4074 case WM_T_PCH_LPT:
4075 sc->sc_pba = PBA_26K;
4076 break;
4077 default:
4078 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4079 PBA_40K : PBA_48K;
4080 break;
4081 }
4082 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4083
4084 /* Prevent the PCI-E bus from sticking */
4085 if (sc->sc_flags & WM_F_PCIE) {
4086 int timeout = 800;
4087
4088 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4089 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4090
4091 while (timeout--) {
4092 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4093 == 0)
4094 break;
4095 delay(100);
4096 }
4097 }
4098
4099 /* Set the completion timeout for interface */
4100 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4101 || (sc->sc_type == WM_T_I350))
4102 wm_set_pcie_completion_timeout(sc);
4103
4104 /* Clear interrupt */
4105 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4106
4107 /* Stop the transmit and receive processes. */
4108 CSR_WRITE(sc, WMREG_RCTL, 0);
4109 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4110 sc->sc_rctl &= ~RCTL_EN;
4111
4112 /* XXX set_tbi_sbp_82543() */
4113
4114 delay(10*1000);
4115
4116 /* Must acquire the MDIO ownership before MAC reset */
4117 switch (sc->sc_type) {
4118 case WM_T_82573:
4119 case WM_T_82574:
4120 case WM_T_82583:
4121 i = 0;
4122 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4123 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4124 do {
4125 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4126 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4127 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4128 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4129 break;
4130 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4131 delay(2*1000);
4132 i++;
4133 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4134 break;
4135 default:
4136 break;
4137 }
4138
4139 /*
4140 * 82541 Errata 29? & 82547 Errata 28?
4141 * See also the description about PHY_RST bit in CTRL register
4142 * in 8254x_GBe_SDM.pdf.
4143 */
4144 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4145 CSR_WRITE(sc, WMREG_CTRL,
4146 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4147 delay(5000);
4148 }
4149
4150 switch (sc->sc_type) {
4151 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4152 case WM_T_82541:
4153 case WM_T_82541_2:
4154 case WM_T_82547:
4155 case WM_T_82547_2:
4156 /*
4157 * On some chipsets, a reset through a memory-mapped write
4158 * cycle can cause the chip to reset before completing the
4159 * write cycle. This causes major headache that can be
4160 * avoided by issuing the reset via indirect register writes
4161 * through I/O space.
4162 *
4163 * So, if we successfully mapped the I/O BAR at attach time,
4164 * use that. Otherwise, try our luck with a memory-mapped
4165 * reset.
4166 */
4167 if (sc->sc_flags & WM_F_IOH_VALID)
4168 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4169 else
4170 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4171 break;
4172 case WM_T_82545_3:
4173 case WM_T_82546_3:
4174 /* Use the shadow control register on these chips. */
4175 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4176 break;
4177 case WM_T_80003:
4178 mask = swfwphysem[sc->sc_funcid];
4179 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4180 wm_get_swfw_semaphore(sc, mask);
4181 CSR_WRITE(sc, WMREG_CTRL, reg);
4182 wm_put_swfw_semaphore(sc, mask);
4183 break;
4184 case WM_T_ICH8:
4185 case WM_T_ICH9:
4186 case WM_T_ICH10:
4187 case WM_T_PCH:
4188 case WM_T_PCH2:
4189 case WM_T_PCH_LPT:
4190 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4191 if (wm_check_reset_block(sc) == 0) {
4192 /*
4193 * Gate automatic PHY configuration by hardware on
4194 * non-managed 82579
4195 */
4196 if ((sc->sc_type == WM_T_PCH2)
4197 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4198 != 0))
4199 wm_gate_hw_phy_config_ich8lan(sc, 1);
4200
4201
4202 reg |= CTRL_PHY_RESET;
4203 phy_reset = 1;
4204 }
4205 wm_get_swfwhw_semaphore(sc);
4206 CSR_WRITE(sc, WMREG_CTRL, reg);
4207 delay(20*1000);
4208 wm_put_swfwhw_semaphore(sc);
4209 break;
4210 case WM_T_82542_2_0:
4211 case WM_T_82542_2_1:
4212 case WM_T_82543:
4213 case WM_T_82540:
4214 case WM_T_82545:
4215 case WM_T_82546:
4216 case WM_T_82571:
4217 case WM_T_82572:
4218 case WM_T_82573:
4219 case WM_T_82574:
4220 case WM_T_82575:
4221 case WM_T_82576:
4222 case WM_T_82580:
4223 case WM_T_82580ER:
4224 case WM_T_82583:
4225 case WM_T_I350:
4226 case WM_T_I210:
4227 case WM_T_I211:
4228 default:
4229 /* Everything else can safely use the documented method. */
4230 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4231 break;
4232 }
4233
4234 if (phy_reset != 0)
4235 wm_get_cfg_done(sc);
4236
4237 /* reload EEPROM */
4238 switch (sc->sc_type) {
4239 case WM_T_82542_2_0:
4240 case WM_T_82542_2_1:
4241 case WM_T_82543:
4242 case WM_T_82544:
4243 delay(10);
4244 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4245 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4246 delay(2000);
4247 break;
4248 case WM_T_82540:
4249 case WM_T_82545:
4250 case WM_T_82545_3:
4251 case WM_T_82546:
4252 case WM_T_82546_3:
4253 delay(5*1000);
4254 /* XXX Disable HW ARPs on ASF enabled adapters */
4255 break;
4256 case WM_T_82541:
4257 case WM_T_82541_2:
4258 case WM_T_82547:
4259 case WM_T_82547_2:
4260 delay(20000);
4261 /* XXX Disable HW ARPs on ASF enabled adapters */
4262 break;
4263 case WM_T_82571:
4264 case WM_T_82572:
4265 case WM_T_82573:
4266 case WM_T_82574:
4267 case WM_T_82583:
4268 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4269 delay(10);
4270 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4271 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4272 }
4273 /* check EECD_EE_AUTORD */
4274 wm_get_auto_rd_done(sc);
4275 /*
4276 * Phy configuration from NVM just starts after EECD_AUTO_RD
4277 * is set.
4278 */
4279 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4280 || (sc->sc_type == WM_T_82583))
4281 delay(25*1000);
4282 break;
4283 case WM_T_82575:
4284 case WM_T_82576:
4285 case WM_T_82580:
4286 case WM_T_82580ER:
4287 case WM_T_I350:
4288 case WM_T_I210:
4289 case WM_T_I211:
4290 case WM_T_80003:
4291 /* check EECD_EE_AUTORD */
4292 wm_get_auto_rd_done(sc);
4293 break;
4294 case WM_T_ICH8:
4295 case WM_T_ICH9:
4296 case WM_T_ICH10:
4297 case WM_T_PCH:
4298 case WM_T_PCH2:
4299 case WM_T_PCH_LPT:
4300 break;
4301 default:
4302 panic("%s: unknown type\n", __func__);
4303 }
4304
4305 /* Check whether EEPROM is present or not */
4306 switch (sc->sc_type) {
4307 case WM_T_82575:
4308 case WM_T_82576:
4309 #if 0 /* XXX */
4310 case WM_T_82580:
4311 case WM_T_82580ER:
4312 #endif
4313 case WM_T_I350:
4314 case WM_T_ICH8:
4315 case WM_T_ICH9:
4316 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4317 /* Not found */
4318 sc->sc_flags |= WM_F_EEPROM_INVALID;
4319 if ((sc->sc_type == WM_T_82575)
4320 || (sc->sc_type == WM_T_82576)
4321 || (sc->sc_type == WM_T_82580)
4322 || (sc->sc_type == WM_T_82580ER)
4323 || (sc->sc_type == WM_T_I350))
4324 wm_reset_init_script_82575(sc);
4325 }
4326 break;
4327 default:
4328 break;
4329 }
4330
4331 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4332 || (sc->sc_type == WM_T_I350)) {
4333 /* clear global device reset status bit */
4334 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4335 }
4336
4337 /* Clear any pending interrupt events. */
4338 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4339 reg = CSR_READ(sc, WMREG_ICR);
4340
4341 /* reload sc_ctrl */
4342 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4343
4344 if (sc->sc_type == WM_T_I350)
4345 wm_set_eee_i350(sc);
4346
4347 /* dummy read from WUC */
4348 if (sc->sc_type == WM_T_PCH)
4349 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4350 /*
4351 * For PCH, this write will make sure that any noise will be detected
4352 * as a CRC error and be dropped rather than show up as a bad packet
4353 * to the DMA engine
4354 */
4355 if (sc->sc_type == WM_T_PCH)
4356 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4357
4358 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4359 CSR_WRITE(sc, WMREG_WUC, 0);
4360
4361 /* XXX need special handling for 82580 */
4362 }
4363
4364 static void
4365 wm_set_vlan(struct wm_softc *sc)
4366 {
4367 /* Deal with VLAN enables. */
4368 if (VLAN_ATTACHED(&sc->sc_ethercom))
4369 sc->sc_ctrl |= CTRL_VME;
4370 else
4371 sc->sc_ctrl &= ~CTRL_VME;
4372
4373 /* Write the control registers. */
4374 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4375 }
4376
4377 /*
4378 * wm_init: [ifnet interface function]
4379 *
4380 * Initialize the interface. Must be called at splnet().
4381 */
4382 static int
4383 wm_init(struct ifnet *ifp)
4384 {
4385 struct wm_softc *sc = ifp->if_softc;
4386 struct wm_rxsoft *rxs;
4387 int i, j, trynum, error = 0;
4388 uint32_t reg;
4389
4390 /*
4391 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4392 * There is a small but measurable benefit to avoiding the adjusment
4393 * of the descriptor so that the headers are aligned, for normal mtu,
4394 * on such platforms. One possibility is that the DMA itself is
4395 * slightly more efficient if the front of the entire packet (instead
4396 * of the front of the headers) is aligned.
4397 *
4398 * Note we must always set align_tweak to 0 if we are using
4399 * jumbo frames.
4400 */
4401 #ifdef __NO_STRICT_ALIGNMENT
4402 sc->sc_align_tweak = 0;
4403 #else
4404 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4405 sc->sc_align_tweak = 0;
4406 else
4407 sc->sc_align_tweak = 2;
4408 #endif /* __NO_STRICT_ALIGNMENT */
4409
4410 /* Cancel any pending I/O. */
4411 wm_stop(ifp, 0);
4412
4413 /* update statistics before reset */
4414 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4415 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4416
4417 /* Reset the chip to a known state. */
4418 wm_reset(sc);
4419
4420 switch (sc->sc_type) {
4421 case WM_T_82571:
4422 case WM_T_82572:
4423 case WM_T_82573:
4424 case WM_T_82574:
4425 case WM_T_82583:
4426 case WM_T_80003:
4427 case WM_T_ICH8:
4428 case WM_T_ICH9:
4429 case WM_T_ICH10:
4430 case WM_T_PCH:
4431 case WM_T_PCH2:
4432 case WM_T_PCH_LPT:
4433 if (wm_check_mng_mode(sc) != 0)
4434 wm_get_hw_control(sc);
4435 break;
4436 default:
4437 break;
4438 }
4439
4440 /* Reset the PHY. */
4441 if (sc->sc_flags & WM_F_HAS_MII)
4442 wm_gmii_reset(sc);
4443
4444 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4445 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4446 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4447 || (sc->sc_type == WM_T_PCH_LPT))
4448 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4449
4450 /* Initialize the transmit descriptor ring. */
4451 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4452 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4453 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4454 sc->sc_txfree = WM_NTXDESC(sc);
4455 sc->sc_txnext = 0;
4456
4457 if (sc->sc_type < WM_T_82543) {
4458 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4459 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4460 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4461 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4462 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4463 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4464 } else {
4465 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4466 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4467 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4468 CSR_WRITE(sc, WMREG_TDH, 0);
4469 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4470 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4471
4472 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4473 /*
4474 * Don't write TDT before TCTL.EN is set.
4475 * See the document.
4476 */
4477 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4478 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4479 | TXDCTL_WTHRESH(0));
4480 else {
4481 CSR_WRITE(sc, WMREG_TDT, 0);
4482 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4483 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4484 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4485 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4486 }
4487 }
4488 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4489 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4490
4491 /* Initialize the transmit job descriptors. */
4492 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4493 sc->sc_txsoft[i].txs_mbuf = NULL;
4494 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4495 sc->sc_txsnext = 0;
4496 sc->sc_txsdirty = 0;
4497
4498 /*
4499 * Initialize the receive descriptor and receive job
4500 * descriptor rings.
4501 */
4502 if (sc->sc_type < WM_T_82543) {
4503 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4504 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4505 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4506 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4507 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4508 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4509
4510 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4511 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4512 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4513 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4514 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4515 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4516 } else {
4517 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4518 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4519 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4520 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4521 CSR_WRITE(sc, WMREG_EITR(0), 450);
4522 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4523 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4524 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4525 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4526 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4527 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4528 | RXDCTL_WTHRESH(1));
4529 } else {
4530 CSR_WRITE(sc, WMREG_RDH, 0);
4531 CSR_WRITE(sc, WMREG_RDT, 0);
4532 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4533 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4534 }
4535 }
4536 for (i = 0; i < WM_NRXDESC; i++) {
4537 rxs = &sc->sc_rxsoft[i];
4538 if (rxs->rxs_mbuf == NULL) {
4539 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4540 log(LOG_ERR, "%s: unable to allocate or map "
4541 "rx buffer %d, error = %d\n",
4542 device_xname(sc->sc_dev), i, error);
4543 /*
4544 * XXX Should attempt to run with fewer receive
4545 * XXX buffers instead of just failing.
4546 */
4547 wm_rxdrain(sc);
4548 goto out;
4549 }
4550 } else {
4551 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4552 WM_INIT_RXDESC(sc, i);
4553 /*
4554 * For 82575 and newer device, the RX descriptors
4555 * must be initialized after the setting of RCTL.EN in
4556 * wm_set_filter()
4557 */
4558 }
4559 }
4560 sc->sc_rxptr = 0;
4561 sc->sc_rxdiscard = 0;
4562 WM_RXCHAIN_RESET(sc);
4563
4564 /*
4565 * Clear out the VLAN table -- we don't use it (yet).
4566 */
4567 CSR_WRITE(sc, WMREG_VET, 0);
4568 if (sc->sc_type == WM_T_I350)
4569 trynum = 10; /* Due to hw errata */
4570 else
4571 trynum = 1;
4572 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4573 for (j = 0; j < trynum; j++)
4574 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4575
4576 /*
4577 * Set up flow-control parameters.
4578 *
4579 * XXX Values could probably stand some tuning.
4580 */
4581 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4582 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4583 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4584 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4585 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4586 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4587 }
4588
4589 sc->sc_fcrtl = FCRTL_DFLT;
4590 if (sc->sc_type < WM_T_82543) {
4591 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4592 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4593 } else {
4594 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4595 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4596 }
4597
4598 if (sc->sc_type == WM_T_80003)
4599 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4600 else
4601 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4602
4603 /* Writes the control register. */
4604 wm_set_vlan(sc);
4605
4606 if (sc->sc_flags & WM_F_HAS_MII) {
4607 int val;
4608
4609 switch (sc->sc_type) {
4610 case WM_T_80003:
4611 case WM_T_ICH8:
4612 case WM_T_ICH9:
4613 case WM_T_ICH10:
4614 case WM_T_PCH:
4615 case WM_T_PCH2:
4616 case WM_T_PCH_LPT:
4617 /*
4618 * Set the mac to wait the maximum time between each
4619 * iteration and increase the max iterations when
4620 * polling the phy; this fixes erroneous timeouts at
4621 * 10Mbps.
4622 */
4623 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4624 0xFFFF);
4625 val = wm_kmrn_readreg(sc,
4626 KUMCTRLSTA_OFFSET_INB_PARAM);
4627 val |= 0x3F;
4628 wm_kmrn_writereg(sc,
4629 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4630 break;
4631 default:
4632 break;
4633 }
4634
4635 if (sc->sc_type == WM_T_80003) {
4636 val = CSR_READ(sc, WMREG_CTRL_EXT);
4637 val &= ~CTRL_EXT_LINK_MODE_MASK;
4638 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4639
4640 /* Bypass RX and TX FIFO's */
4641 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4642 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4643 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4644 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4645 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4646 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4647 }
4648 }
4649 #if 0
4650 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4651 #endif
4652
4653 /*
4654 * Set up checksum offload parameters.
4655 */
4656 reg = CSR_READ(sc, WMREG_RXCSUM);
4657 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4658 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4659 reg |= RXCSUM_IPOFL;
4660 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4661 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4662 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4663 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4664 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4665
4666 /* Reset TBI's RXCFG count */
4667 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4668
4669 /*
4670 * Set up the interrupt registers.
4671 */
4672 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4673 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4674 ICR_RXO | ICR_RXT0;
4675 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4676 sc->sc_icr |= ICR_RXCFG;
4677 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4678
4679 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4680 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4681 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4682 reg = CSR_READ(sc, WMREG_KABGTXD);
4683 reg |= KABGTXD_BGSQLBIAS;
4684 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4685 }
4686
4687 /* Set up the inter-packet gap. */
4688 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4689
4690 if (sc->sc_type >= WM_T_82543) {
4691 /*
4692 * Set up the interrupt throttling register (units of 256ns)
4693 * Note that a footnote in Intel's documentation says this
4694 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4695 * or 10Mbit mode. Empirically, it appears to be the case
4696 * that that is also true for the 1024ns units of the other
4697 * interrupt-related timer registers -- so, really, we ought
4698 * to divide this value by 4 when the link speed is low.
4699 *
4700 * XXX implement this division at link speed change!
4701 */
4702
4703 /*
4704 * For N interrupts/sec, set this value to:
4705 * 1000000000 / (N * 256). Note that we set the
4706 * absolute and packet timer values to this value
4707 * divided by 4 to get "simple timer" behavior.
4708 */
4709
4710 sc->sc_itr = 1500; /* 2604 ints/sec */
4711 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4712 }
4713
4714 /* Set the VLAN ethernetype. */
4715 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4716
4717 /*
4718 * Set up the transmit control register; we start out with
4719 * a collision distance suitable for FDX, but update it whe
4720 * we resolve the media type.
4721 */
4722 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4723 | TCTL_CT(TX_COLLISION_THRESHOLD)
4724 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4725 if (sc->sc_type >= WM_T_82571)
4726 sc->sc_tctl |= TCTL_MULR;
4727 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4728
4729 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4730 /*
4731 * Write TDT after TCTL.EN is set.
4732 * See the document.
4733 */
4734 CSR_WRITE(sc, WMREG_TDT, 0);
4735 }
4736
4737 if (sc->sc_type == WM_T_80003) {
4738 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4739 reg &= ~TCTL_EXT_GCEX_MASK;
4740 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4741 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4742 }
4743
4744 /* Set the media. */
4745 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4746 goto out;
4747
4748 /* Configure for OS presence */
4749 wm_init_manageability(sc);
4750
4751 /*
4752 * Set up the receive control register; we actually program
4753 * the register when we set the receive filter. Use multicast
4754 * address offset type 0.
4755 *
4756 * Only the i82544 has the ability to strip the incoming
4757 * CRC, so we don't enable that feature.
4758 */
4759 sc->sc_mchash_type = 0;
4760 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4761 | RCTL_MO(sc->sc_mchash_type);
4762
4763 /*
4764 * The I350 has a bug where it always strips the CRC whether
4765 * asked to or not. So ask for stripped CRC here and cope in rxeof
4766 */
4767 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210))
4768 sc->sc_rctl |= RCTL_SECRC;
4769
4770 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4771 && (ifp->if_mtu > ETHERMTU)) {
4772 sc->sc_rctl |= RCTL_LPE;
4773 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4774 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4775 }
4776
4777 if (MCLBYTES == 2048) {
4778 sc->sc_rctl |= RCTL_2k;
4779 } else {
4780 if (sc->sc_type >= WM_T_82543) {
4781 switch (MCLBYTES) {
4782 case 4096:
4783 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4784 break;
4785 case 8192:
4786 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4787 break;
4788 case 16384:
4789 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4790 break;
4791 default:
4792 panic("wm_init: MCLBYTES %d unsupported",
4793 MCLBYTES);
4794 break;
4795 }
4796 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4797 }
4798
4799 /* Set the receive filter. */
4800 wm_set_filter(sc);
4801
4802 /* On 575 and later set RDT only if RX enabled */
4803 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4804 for (i = 0; i < WM_NRXDESC; i++)
4805 WM_INIT_RXDESC(sc, i);
4806
4807 /* Start the one second link check clock. */
4808 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4809
4810 /* ...all done! */
4811 ifp->if_flags |= IFF_RUNNING;
4812 ifp->if_flags &= ~IFF_OACTIVE;
4813
4814 out:
4815 sc->sc_if_flags = ifp->if_flags;
4816 if (error)
4817 log(LOG_ERR, "%s: interface not running\n",
4818 device_xname(sc->sc_dev));
4819 return error;
4820 }
4821
4822 /*
4823 * wm_rxdrain:
4824 *
4825 * Drain the receive queue.
4826 */
4827 static void
4828 wm_rxdrain(struct wm_softc *sc)
4829 {
4830 struct wm_rxsoft *rxs;
4831 int i;
4832
4833 for (i = 0; i < WM_NRXDESC; i++) {
4834 rxs = &sc->sc_rxsoft[i];
4835 if (rxs->rxs_mbuf != NULL) {
4836 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4837 m_freem(rxs->rxs_mbuf);
4838 rxs->rxs_mbuf = NULL;
4839 }
4840 }
4841 }
4842
4843 /*
4844 * wm_stop: [ifnet interface function]
4845 *
4846 * Stop transmission on the interface.
4847 */
4848 static void
4849 wm_stop(struct ifnet *ifp, int disable)
4850 {
4851 struct wm_softc *sc = ifp->if_softc;
4852 struct wm_txsoft *txs;
4853 int i;
4854
4855 /* Stop the one second clock. */
4856 callout_stop(&sc->sc_tick_ch);
4857
4858 /* Stop the 82547 Tx FIFO stall check timer. */
4859 if (sc->sc_type == WM_T_82547)
4860 callout_stop(&sc->sc_txfifo_ch);
4861
4862 if (sc->sc_flags & WM_F_HAS_MII) {
4863 /* Down the MII. */
4864 mii_down(&sc->sc_mii);
4865 } else {
4866 #if 0
4867 /* Should we clear PHY's status properly? */
4868 wm_reset(sc);
4869 #endif
4870 }
4871
4872 /* Stop the transmit and receive processes. */
4873 CSR_WRITE(sc, WMREG_TCTL, 0);
4874 CSR_WRITE(sc, WMREG_RCTL, 0);
4875 sc->sc_rctl &= ~RCTL_EN;
4876
4877 /*
4878 * Clear the interrupt mask to ensure the device cannot assert its
4879 * interrupt line.
4880 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4881 * any currently pending or shared interrupt.
4882 */
4883 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4884 sc->sc_icr = 0;
4885
4886 /* Release any queued transmit buffers. */
4887 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4888 txs = &sc->sc_txsoft[i];
4889 if (txs->txs_mbuf != NULL) {
4890 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4891 m_freem(txs->txs_mbuf);
4892 txs->txs_mbuf = NULL;
4893 }
4894 }
4895
4896 /* Mark the interface as down and cancel the watchdog timer. */
4897 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4898 ifp->if_timer = 0;
4899
4900 if (disable)
4901 wm_rxdrain(sc);
4902
4903 #if 0 /* notyet */
4904 if (sc->sc_type >= WM_T_82544)
4905 CSR_WRITE(sc, WMREG_WUC, 0);
4906 #endif
4907 }
4908
4909 void
4910 wm_get_auto_rd_done(struct wm_softc *sc)
4911 {
4912 int i;
4913
4914 /* wait for eeprom to reload */
4915 switch (sc->sc_type) {
4916 case WM_T_82571:
4917 case WM_T_82572:
4918 case WM_T_82573:
4919 case WM_T_82574:
4920 case WM_T_82583:
4921 case WM_T_82575:
4922 case WM_T_82576:
4923 case WM_T_82580:
4924 case WM_T_82580ER:
4925 case WM_T_I350:
4926 case WM_T_I210:
4927 case WM_T_I211:
4928 case WM_T_80003:
4929 case WM_T_ICH8:
4930 case WM_T_ICH9:
4931 for (i = 0; i < 10; i++) {
4932 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4933 break;
4934 delay(1000);
4935 }
4936 if (i == 10) {
4937 log(LOG_ERR, "%s: auto read from eeprom failed to "
4938 "complete\n", device_xname(sc->sc_dev));
4939 }
4940 break;
4941 default:
4942 break;
4943 }
4944 }
4945
4946 void
4947 wm_lan_init_done(struct wm_softc *sc)
4948 {
4949 uint32_t reg = 0;
4950 int i;
4951
4952 /* wait for eeprom to reload */
4953 switch (sc->sc_type) {
4954 case WM_T_ICH10:
4955 case WM_T_PCH:
4956 case WM_T_PCH2:
4957 case WM_T_PCH_LPT:
4958 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4959 reg = CSR_READ(sc, WMREG_STATUS);
4960 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4961 break;
4962 delay(100);
4963 }
4964 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4965 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4966 "complete\n", device_xname(sc->sc_dev), __func__);
4967 }
4968 break;
4969 default:
4970 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4971 __func__);
4972 break;
4973 }
4974
4975 reg &= ~STATUS_LAN_INIT_DONE;
4976 CSR_WRITE(sc, WMREG_STATUS, reg);
4977 }
4978
4979 void
4980 wm_get_cfg_done(struct wm_softc *sc)
4981 {
4982 int mask;
4983 uint32_t reg;
4984 int i;
4985
4986 /* wait for eeprom to reload */
4987 switch (sc->sc_type) {
4988 case WM_T_82542_2_0:
4989 case WM_T_82542_2_1:
4990 /* null */
4991 break;
4992 case WM_T_82543:
4993 case WM_T_82544:
4994 case WM_T_82540:
4995 case WM_T_82545:
4996 case WM_T_82545_3:
4997 case WM_T_82546:
4998 case WM_T_82546_3:
4999 case WM_T_82541:
5000 case WM_T_82541_2:
5001 case WM_T_82547:
5002 case WM_T_82547_2:
5003 case WM_T_82573:
5004 case WM_T_82574:
5005 case WM_T_82583:
5006 /* generic */
5007 delay(10*1000);
5008 break;
5009 case WM_T_80003:
5010 case WM_T_82571:
5011 case WM_T_82572:
5012 case WM_T_82575:
5013 case WM_T_82576:
5014 case WM_T_82580:
5015 case WM_T_82580ER:
5016 case WM_T_I350:
5017 case WM_T_I210:
5018 case WM_T_I211:
5019 if (sc->sc_type == WM_T_82571) {
5020 /* Only 82571 shares port 0 */
5021 mask = EEMNGCTL_CFGDONE_0;
5022 } else
5023 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5024 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5025 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5026 break;
5027 delay(1000);
5028 }
5029 if (i >= WM_PHY_CFG_TIMEOUT) {
5030 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5031 device_xname(sc->sc_dev), __func__));
5032 }
5033 break;
5034 case WM_T_ICH8:
5035 case WM_T_ICH9:
5036 case WM_T_ICH10:
5037 case WM_T_PCH:
5038 case WM_T_PCH2:
5039 case WM_T_PCH_LPT:
5040 delay(10*1000);
5041 if (sc->sc_type >= WM_T_ICH10)
5042 wm_lan_init_done(sc);
5043 else
5044 wm_get_auto_rd_done(sc);
5045
5046 reg = CSR_READ(sc, WMREG_STATUS);
5047 if ((reg & STATUS_PHYRA) != 0)
5048 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5049 break;
5050 default:
5051 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5052 __func__);
5053 break;
5054 }
5055 }
5056
5057 /*
5058 * wm_acquire_eeprom:
5059 *
5060 * Perform the EEPROM handshake required on some chips.
5061 */
5062 static int
5063 wm_acquire_eeprom(struct wm_softc *sc)
5064 {
5065 uint32_t reg;
5066 int x;
5067 int ret = 0;
5068
5069 /* always success */
5070 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5071 return 0;
5072
5073 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5074 ret = wm_get_swfwhw_semaphore(sc);
5075 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5076 /* this will also do wm_get_swsm_semaphore() if needed */
5077 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5078 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5079 ret = wm_get_swsm_semaphore(sc);
5080 }
5081
5082 if (ret) {
5083 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5084 __func__);
5085 return 1;
5086 }
5087
5088 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5089 reg = CSR_READ(sc, WMREG_EECD);
5090
5091 /* Request EEPROM access. */
5092 reg |= EECD_EE_REQ;
5093 CSR_WRITE(sc, WMREG_EECD, reg);
5094
5095 /* ..and wait for it to be granted. */
5096 for (x = 0; x < 1000; x++) {
5097 reg = CSR_READ(sc, WMREG_EECD);
5098 if (reg & EECD_EE_GNT)
5099 break;
5100 delay(5);
5101 }
5102 if ((reg & EECD_EE_GNT) == 0) {
5103 aprint_error_dev(sc->sc_dev,
5104 "could not acquire EEPROM GNT\n");
5105 reg &= ~EECD_EE_REQ;
5106 CSR_WRITE(sc, WMREG_EECD, reg);
5107 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5108 wm_put_swfwhw_semaphore(sc);
5109 if (sc->sc_flags & WM_F_SWFW_SYNC)
5110 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5111 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5112 wm_put_swsm_semaphore(sc);
5113 return 1;
5114 }
5115 }
5116
5117 return 0;
5118 }
5119
5120 /*
5121 * wm_release_eeprom:
5122 *
5123 * Release the EEPROM mutex.
5124 */
5125 static void
5126 wm_release_eeprom(struct wm_softc *sc)
5127 {
5128 uint32_t reg;
5129
5130 /* always success */
5131 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5132 return;
5133
5134 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5135 reg = CSR_READ(sc, WMREG_EECD);
5136 reg &= ~EECD_EE_REQ;
5137 CSR_WRITE(sc, WMREG_EECD, reg);
5138 }
5139
5140 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5141 wm_put_swfwhw_semaphore(sc);
5142 if (sc->sc_flags & WM_F_SWFW_SYNC)
5143 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5144 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5145 wm_put_swsm_semaphore(sc);
5146 }
5147
5148 /*
5149 * wm_eeprom_sendbits:
5150 *
5151 * Send a series of bits to the EEPROM.
5152 */
5153 static void
5154 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5155 {
5156 uint32_t reg;
5157 int x;
5158
5159 reg = CSR_READ(sc, WMREG_EECD);
5160
5161 for (x = nbits; x > 0; x--) {
5162 if (bits & (1U << (x - 1)))
5163 reg |= EECD_DI;
5164 else
5165 reg &= ~EECD_DI;
5166 CSR_WRITE(sc, WMREG_EECD, reg);
5167 delay(2);
5168 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5169 delay(2);
5170 CSR_WRITE(sc, WMREG_EECD, reg);
5171 delay(2);
5172 }
5173 }
5174
5175 /*
5176 * wm_eeprom_recvbits:
5177 *
5178 * Receive a series of bits from the EEPROM.
5179 */
5180 static void
5181 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5182 {
5183 uint32_t reg, val;
5184 int x;
5185
5186 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5187
5188 val = 0;
5189 for (x = nbits; x > 0; x--) {
5190 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5191 delay(2);
5192 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5193 val |= (1U << (x - 1));
5194 CSR_WRITE(sc, WMREG_EECD, reg);
5195 delay(2);
5196 }
5197 *valp = val;
5198 }
5199
5200 /*
5201 * wm_read_eeprom_uwire:
5202 *
5203 * Read a word from the EEPROM using the MicroWire protocol.
5204 */
5205 static int
5206 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5207 {
5208 uint32_t reg, val;
5209 int i;
5210
5211 for (i = 0; i < wordcnt; i++) {
5212 /* Clear SK and DI. */
5213 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5214 CSR_WRITE(sc, WMREG_EECD, reg);
5215
5216 /*
5217 * XXX: workaround for a bug in qemu-0.12.x and prior
5218 * and Xen.
5219 *
5220 * We use this workaround only for 82540 because qemu's
5221 * e1000 act as 82540.
5222 */
5223 if (sc->sc_type == WM_T_82540) {
5224 reg |= EECD_SK;
5225 CSR_WRITE(sc, WMREG_EECD, reg);
5226 reg &= ~EECD_SK;
5227 CSR_WRITE(sc, WMREG_EECD, reg);
5228 delay(2);
5229 }
5230 /* XXX: end of workaround */
5231
5232 /* Set CHIP SELECT. */
5233 reg |= EECD_CS;
5234 CSR_WRITE(sc, WMREG_EECD, reg);
5235 delay(2);
5236
5237 /* Shift in the READ command. */
5238 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5239
5240 /* Shift in address. */
5241 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5242
5243 /* Shift out the data. */
5244 wm_eeprom_recvbits(sc, &val, 16);
5245 data[i] = val & 0xffff;
5246
5247 /* Clear CHIP SELECT. */
5248 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5249 CSR_WRITE(sc, WMREG_EECD, reg);
5250 delay(2);
5251 }
5252
5253 return 0;
5254 }
5255
5256 /*
5257 * wm_spi_eeprom_ready:
5258 *
5259 * Wait for a SPI EEPROM to be ready for commands.
5260 */
5261 static int
5262 wm_spi_eeprom_ready(struct wm_softc *sc)
5263 {
5264 uint32_t val;
5265 int usec;
5266
5267 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5268 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5269 wm_eeprom_recvbits(sc, &val, 8);
5270 if ((val & SPI_SR_RDY) == 0)
5271 break;
5272 }
5273 if (usec >= SPI_MAX_RETRIES) {
5274 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5275 return 1;
5276 }
5277 return 0;
5278 }
5279
5280 /*
5281 * wm_read_eeprom_spi:
5282 *
5283 * Read a work from the EEPROM using the SPI protocol.
5284 */
5285 static int
5286 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5287 {
5288 uint32_t reg, val;
5289 int i;
5290 uint8_t opc;
5291
5292 /* Clear SK and CS. */
5293 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5294 CSR_WRITE(sc, WMREG_EECD, reg);
5295 delay(2);
5296
5297 if (wm_spi_eeprom_ready(sc))
5298 return 1;
5299
5300 /* Toggle CS to flush commands. */
5301 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5302 delay(2);
5303 CSR_WRITE(sc, WMREG_EECD, reg);
5304 delay(2);
5305
5306 opc = SPI_OPC_READ;
5307 if (sc->sc_ee_addrbits == 8 && word >= 128)
5308 opc |= SPI_OPC_A8;
5309
5310 wm_eeprom_sendbits(sc, opc, 8);
5311 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5312
5313 for (i = 0; i < wordcnt; i++) {
5314 wm_eeprom_recvbits(sc, &val, 16);
5315 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5316 }
5317
5318 /* Raise CS and clear SK. */
5319 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5320 CSR_WRITE(sc, WMREG_EECD, reg);
5321 delay(2);
5322
5323 return 0;
5324 }
5325
5326 #define NVM_CHECKSUM 0xBABA
5327 #define EEPROM_SIZE 0x0040
5328 #define NVM_COMPAT 0x0003
5329 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
5330 #define NVM_FUTURE_INIT_WORD1 0x0019
5331 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
5332
5333 /*
5334 * wm_validate_eeprom_checksum
5335 *
5336 * The checksum is defined as the sum of the first 64 (16 bit) words.
5337 */
5338 static int
5339 wm_validate_eeprom_checksum(struct wm_softc *sc)
5340 {
5341 uint16_t checksum, valid_checksum;
5342 uint16_t eeprom_data;
5343 uint16_t csum_wordaddr;
5344 int i;
5345
5346 checksum = 0;
5347
5348 /* Don't check for I211 */
5349 if (sc->sc_type == WM_T_I211)
5350 return 0;
5351
5352 if (sc->sc_type == WM_T_PCH_LPT) {
5353 csum_wordaddr = NVM_COMPAT;
5354 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5355 } else {
5356 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5357 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5358 }
5359
5360 #ifdef WM_DEBUG
5361 /* Dump EEPROM image for debug */
5362 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5363 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5364 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5365 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5366 if ((eeprom_data & valid_checksum) == 0) {
5367 DPRINTF(WM_DEBUG_NVM,
5368 ("%s: NVM need to be updated (%04x != %04x)\n",
5369 device_xname(sc->sc_dev), eeprom_data,
5370 valid_checksum));
5371 }
5372 }
5373
5374 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5375 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5376 for (i = 0; i < EEPROM_SIZE; i++) {
5377 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5378 printf("XX ");
5379 else
5380 printf("%04x ", eeprom_data);
5381 if (i % 8 == 7)
5382 printf("\n");
5383 }
5384 }
5385
5386 #endif /* WM_DEBUG */
5387
5388 for (i = 0; i < EEPROM_SIZE; i++) {
5389 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5390 return 1;
5391 checksum += eeprom_data;
5392 }
5393
5394 if (checksum != (uint16_t) NVM_CHECKSUM) {
5395 #ifdef WM_DEBUG
5396 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5397 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5398 #endif
5399 }
5400
5401 return 0;
5402 }
5403
5404 /*
5405 * wm_read_eeprom:
5406 *
5407 * Read data from the serial EEPROM.
5408 */
5409 static int
5410 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5411 {
5412 int rv;
5413
5414 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5415 return 1;
5416
5417 if (wm_acquire_eeprom(sc))
5418 return 1;
5419
5420 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5421 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5422 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5423 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5424 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5425 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5426 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5427 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5428 else
5429 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5430
5431 wm_release_eeprom(sc);
5432 return rv;
5433 }
5434
5435 static int
5436 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5437 uint16_t *data)
5438 {
5439 int i, eerd = 0;
5440 int error = 0;
5441
5442 for (i = 0; i < wordcnt; i++) {
5443 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5444
5445 CSR_WRITE(sc, WMREG_EERD, eerd);
5446 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5447 if (error != 0)
5448 break;
5449
5450 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5451 }
5452
5453 return error;
5454 }
5455
5456 static int
5457 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5458 {
5459 uint32_t attempts = 100000;
5460 uint32_t i, reg = 0;
5461 int32_t done = -1;
5462
5463 for (i = 0; i < attempts; i++) {
5464 reg = CSR_READ(sc, rw);
5465
5466 if (reg & EERD_DONE) {
5467 done = 0;
5468 break;
5469 }
5470 delay(5);
5471 }
5472
5473 return done;
5474 }
5475
5476 static int
5477 wm_check_alt_mac_addr(struct wm_softc *sc)
5478 {
5479 uint16_t myea[ETHER_ADDR_LEN / 2];
5480 uint16_t offset = EEPROM_OFF_MACADDR;
5481
5482 /* Try to read alternative MAC address pointer */
5483 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5484 return -1;
5485
5486 /* Check pointer */
5487 if (offset == 0xffff)
5488 return -1;
5489
5490 /*
5491 * Check whether alternative MAC address is valid or not.
5492 * Some cards have non 0xffff pointer but those don't use
5493 * alternative MAC address in reality.
5494 *
5495 * Check whether the broadcast bit is set or not.
5496 */
5497 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5498 if (((myea[0] & 0xff) & 0x01) == 0)
5499 return 0; /* found! */
5500
5501 /* not found */
5502 return -1;
5503 }
5504
5505 static int
5506 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5507 {
5508 uint16_t myea[ETHER_ADDR_LEN / 2];
5509 uint16_t offset = EEPROM_OFF_MACADDR;
5510 int do_invert = 0;
5511
5512 switch (sc->sc_type) {
5513 case WM_T_82580:
5514 case WM_T_82580ER:
5515 case WM_T_I350:
5516 switch (sc->sc_funcid) {
5517 case 0:
5518 /* default value (== EEPROM_OFF_MACADDR) */
5519 break;
5520 case 1:
5521 offset = EEPROM_OFF_LAN1;
5522 break;
5523 case 2:
5524 offset = EEPROM_OFF_LAN2;
5525 break;
5526 case 3:
5527 offset = EEPROM_OFF_LAN3;
5528 break;
5529 default:
5530 goto bad;
5531 /* NOTREACHED */
5532 break;
5533 }
5534 break;
5535 case WM_T_82571:
5536 case WM_T_82575:
5537 case WM_T_82576:
5538 case WM_T_80003:
5539 case WM_T_I210:
5540 case WM_T_I211:
5541 if (wm_check_alt_mac_addr(sc) != 0) {
5542 /* reset the offset to LAN0 */
5543 offset = EEPROM_OFF_MACADDR;
5544 if ((sc->sc_funcid & 0x01) == 1)
5545 do_invert = 1;
5546 goto do_read;
5547 }
5548 switch (sc->sc_funcid) {
5549 case 0:
5550 /*
5551 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5552 * itself.
5553 */
5554 break;
5555 case 1:
5556 offset += EEPROM_OFF_MACADDR_LAN1;
5557 break;
5558 case 2:
5559 offset += EEPROM_OFF_MACADDR_LAN2;
5560 break;
5561 case 3:
5562 offset += EEPROM_OFF_MACADDR_LAN3;
5563 break;
5564 default:
5565 goto bad;
5566 /* NOTREACHED */
5567 break;
5568 }
5569 break;
5570 default:
5571 if ((sc->sc_funcid & 0x01) == 1)
5572 do_invert = 1;
5573 break;
5574 }
5575
5576 do_read:
5577 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5578 myea) != 0) {
5579 goto bad;
5580 }
5581
5582 enaddr[0] = myea[0] & 0xff;
5583 enaddr[1] = myea[0] >> 8;
5584 enaddr[2] = myea[1] & 0xff;
5585 enaddr[3] = myea[1] >> 8;
5586 enaddr[4] = myea[2] & 0xff;
5587 enaddr[5] = myea[2] >> 8;
5588
5589 /*
5590 * Toggle the LSB of the MAC address on the second port
5591 * of some dual port cards.
5592 */
5593 if (do_invert != 0)
5594 enaddr[5] ^= 1;
5595
5596 return 0;
5597
5598 bad:
5599 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5600
5601 return -1;
5602 }
5603
5604 /*
5605 * wm_add_rxbuf:
5606 *
5607 * Add a receive buffer to the indiciated descriptor.
5608 */
5609 static int
5610 wm_add_rxbuf(struct wm_softc *sc, int idx)
5611 {
5612 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5613 struct mbuf *m;
5614 int error;
5615
5616 MGETHDR(m, M_DONTWAIT, MT_DATA);
5617 if (m == NULL)
5618 return ENOBUFS;
5619
5620 MCLGET(m, M_DONTWAIT);
5621 if ((m->m_flags & M_EXT) == 0) {
5622 m_freem(m);
5623 return ENOBUFS;
5624 }
5625
5626 if (rxs->rxs_mbuf != NULL)
5627 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5628
5629 rxs->rxs_mbuf = m;
5630
5631 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5632 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5633 BUS_DMA_READ|BUS_DMA_NOWAIT);
5634 if (error) {
5635 /* XXX XXX XXX */
5636 aprint_error_dev(sc->sc_dev,
5637 "unable to load rx DMA map %d, error = %d\n",
5638 idx, error);
5639 panic("wm_add_rxbuf");
5640 }
5641
5642 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5643 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5644
5645 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5646 if ((sc->sc_rctl & RCTL_EN) != 0)
5647 WM_INIT_RXDESC(sc, idx);
5648 } else
5649 WM_INIT_RXDESC(sc, idx);
5650
5651 return 0;
5652 }
5653
5654 /*
5655 * wm_set_ral:
5656 *
5657 * Set an entery in the receive address list.
5658 */
5659 static void
5660 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5661 {
5662 uint32_t ral_lo, ral_hi;
5663
5664 if (enaddr != NULL) {
5665 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5666 (enaddr[3] << 24);
5667 ral_hi = enaddr[4] | (enaddr[5] << 8);
5668 ral_hi |= RAL_AV;
5669 } else {
5670 ral_lo = 0;
5671 ral_hi = 0;
5672 }
5673
5674 if (sc->sc_type >= WM_T_82544) {
5675 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5676 ral_lo);
5677 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5678 ral_hi);
5679 } else {
5680 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5681 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5682 }
5683 }
5684
5685 /*
5686 * wm_mchash:
5687 *
5688 * Compute the hash of the multicast address for the 4096-bit
5689 * multicast filter.
5690 */
5691 static uint32_t
5692 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5693 {
5694 static const int lo_shift[4] = { 4, 3, 2, 0 };
5695 static const int hi_shift[4] = { 4, 5, 6, 8 };
5696 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5697 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5698 uint32_t hash;
5699
5700 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5701 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5702 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5703 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5704 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5705 return (hash & 0x3ff);
5706 }
5707 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5708 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5709
5710 return (hash & 0xfff);
5711 }
5712
5713 /*
5714 * wm_set_filter:
5715 *
5716 * Set up the receive filter.
5717 */
5718 static void
5719 wm_set_filter(struct wm_softc *sc)
5720 {
5721 struct ethercom *ec = &sc->sc_ethercom;
5722 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5723 struct ether_multi *enm;
5724 struct ether_multistep step;
5725 bus_addr_t mta_reg;
5726 uint32_t hash, reg, bit;
5727 int i, size;
5728
5729 if (sc->sc_type >= WM_T_82544)
5730 mta_reg = WMREG_CORDOVA_MTA;
5731 else
5732 mta_reg = WMREG_MTA;
5733
5734 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5735
5736 if (ifp->if_flags & IFF_BROADCAST)
5737 sc->sc_rctl |= RCTL_BAM;
5738 if (ifp->if_flags & IFF_PROMISC) {
5739 sc->sc_rctl |= RCTL_UPE;
5740 goto allmulti;
5741 }
5742
5743 /*
5744 * Set the station address in the first RAL slot, and
5745 * clear the remaining slots.
5746 */
5747 if (sc->sc_type == WM_T_ICH8)
5748 size = WM_RAL_TABSIZE_ICH8 -1;
5749 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5750 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5751 || (sc->sc_type == WM_T_PCH_LPT))
5752 size = WM_RAL_TABSIZE_ICH8;
5753 else if (sc->sc_type == WM_T_82575)
5754 size = WM_RAL_TABSIZE_82575;
5755 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5756 size = WM_RAL_TABSIZE_82576;
5757 else if (sc->sc_type == WM_T_I350)
5758 size = WM_RAL_TABSIZE_I350;
5759 else
5760 size = WM_RAL_TABSIZE;
5761 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5762 for (i = 1; i < size; i++)
5763 wm_set_ral(sc, NULL, i);
5764
5765 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5766 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5767 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5768 size = WM_ICH8_MC_TABSIZE;
5769 else
5770 size = WM_MC_TABSIZE;
5771 /* Clear out the multicast table. */
5772 for (i = 0; i < size; i++)
5773 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5774
5775 ETHER_FIRST_MULTI(step, ec, enm);
5776 while (enm != NULL) {
5777 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5778 /*
5779 * We must listen to a range of multicast addresses.
5780 * For now, just accept all multicasts, rather than
5781 * trying to set only those filter bits needed to match
5782 * the range. (At this time, the only use of address
5783 * ranges is for IP multicast routing, for which the
5784 * range is big enough to require all bits set.)
5785 */
5786 goto allmulti;
5787 }
5788
5789 hash = wm_mchash(sc, enm->enm_addrlo);
5790
5791 reg = (hash >> 5);
5792 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5793 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5794 || (sc->sc_type == WM_T_PCH2)
5795 || (sc->sc_type == WM_T_PCH_LPT))
5796 reg &= 0x1f;
5797 else
5798 reg &= 0x7f;
5799 bit = hash & 0x1f;
5800
5801 hash = CSR_READ(sc, mta_reg + (reg << 2));
5802 hash |= 1U << bit;
5803
5804 /* XXX Hardware bug?? */
5805 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5806 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5807 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5808 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5809 } else
5810 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5811
5812 ETHER_NEXT_MULTI(step, enm);
5813 }
5814
5815 ifp->if_flags &= ~IFF_ALLMULTI;
5816 goto setit;
5817
5818 allmulti:
5819 ifp->if_flags |= IFF_ALLMULTI;
5820 sc->sc_rctl |= RCTL_MPE;
5821
5822 setit:
5823 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5824 }
5825
5826 /*
5827 * wm_tbi_mediainit:
5828 *
5829 * Initialize media for use on 1000BASE-X devices.
5830 */
5831 static void
5832 wm_tbi_mediainit(struct wm_softc *sc)
5833 {
5834 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5835 const char *sep = "";
5836
5837 if (sc->sc_type < WM_T_82543)
5838 sc->sc_tipg = TIPG_WM_DFLT;
5839 else
5840 sc->sc_tipg = TIPG_LG_DFLT;
5841
5842 sc->sc_tbi_anegticks = 5;
5843
5844 /* Initialize our media structures */
5845 sc->sc_mii.mii_ifp = ifp;
5846
5847 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5848 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5849 wm_tbi_mediastatus);
5850
5851 /*
5852 * SWD Pins:
5853 *
5854 * 0 = Link LED (output)
5855 * 1 = Loss Of Signal (input)
5856 */
5857 sc->sc_ctrl |= CTRL_SWDPIO(0);
5858 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5859
5860 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5861
5862 #define ADD(ss, mm, dd) \
5863 do { \
5864 aprint_normal("%s%s", sep, ss); \
5865 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5866 sep = ", "; \
5867 } while (/*CONSTCOND*/0)
5868
5869 aprint_normal_dev(sc->sc_dev, "");
5870 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5871 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5872 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5873 aprint_normal("\n");
5874
5875 #undef ADD
5876
5877 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5878 }
5879
5880 /*
5881 * wm_tbi_mediastatus: [ifmedia interface function]
5882 *
5883 * Get the current interface media status on a 1000BASE-X device.
5884 */
5885 static void
5886 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5887 {
5888 struct wm_softc *sc = ifp->if_softc;
5889 uint32_t ctrl, status;
5890
5891 ifmr->ifm_status = IFM_AVALID;
5892 ifmr->ifm_active = IFM_ETHER;
5893
5894 status = CSR_READ(sc, WMREG_STATUS);
5895 if ((status & STATUS_LU) == 0) {
5896 ifmr->ifm_active |= IFM_NONE;
5897 return;
5898 }
5899
5900 ifmr->ifm_status |= IFM_ACTIVE;
5901 ifmr->ifm_active |= IFM_1000_SX;
5902 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5903 ifmr->ifm_active |= IFM_FDX;
5904 ctrl = CSR_READ(sc, WMREG_CTRL);
5905 if (ctrl & CTRL_RFCE)
5906 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5907 if (ctrl & CTRL_TFCE)
5908 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5909 }
5910
5911 /*
5912 * wm_tbi_mediachange: [ifmedia interface function]
5913 *
5914 * Set hardware to newly-selected media on a 1000BASE-X device.
5915 */
5916 static int
5917 wm_tbi_mediachange(struct ifnet *ifp)
5918 {
5919 struct wm_softc *sc = ifp->if_softc;
5920 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5921 uint32_t status;
5922 int i;
5923
5924 sc->sc_txcw = 0;
5925 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5926 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5927 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5928 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5929 sc->sc_txcw |= TXCW_ANE;
5930 } else {
5931 /*
5932 * If autonegotiation is turned off, force link up and turn on
5933 * full duplex
5934 */
5935 sc->sc_txcw &= ~TXCW_ANE;
5936 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5937 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5938 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5939 delay(1000);
5940 }
5941
5942 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5943 device_xname(sc->sc_dev),sc->sc_txcw));
5944 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5945 delay(10000);
5946
5947 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5948 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5949
5950 /*
5951 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5952 * optics detect a signal, 0 if they don't.
5953 */
5954 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5955 /* Have signal; wait for the link to come up. */
5956
5957 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5958 /*
5959 * Reset the link, and let autonegotiation do its thing
5960 */
5961 sc->sc_ctrl |= CTRL_LRST;
5962 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5963 delay(1000);
5964 sc->sc_ctrl &= ~CTRL_LRST;
5965 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5966 delay(1000);
5967 }
5968
5969 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5970 delay(10000);
5971 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5972 break;
5973 }
5974
5975 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5976 device_xname(sc->sc_dev),i));
5977
5978 status = CSR_READ(sc, WMREG_STATUS);
5979 DPRINTF(WM_DEBUG_LINK,
5980 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5981 device_xname(sc->sc_dev),status, STATUS_LU));
5982 if (status & STATUS_LU) {
5983 /* Link is up. */
5984 DPRINTF(WM_DEBUG_LINK,
5985 ("%s: LINK: set media -> link up %s\n",
5986 device_xname(sc->sc_dev),
5987 (status & STATUS_FD) ? "FDX" : "HDX"));
5988
5989 /*
5990 * NOTE: CTRL will update TFCE and RFCE automatically,
5991 * so we should update sc->sc_ctrl
5992 */
5993 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5994 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5995 sc->sc_fcrtl &= ~FCRTL_XONE;
5996 if (status & STATUS_FD)
5997 sc->sc_tctl |=
5998 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5999 else
6000 sc->sc_tctl |=
6001 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6002 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6003 sc->sc_fcrtl |= FCRTL_XONE;
6004 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6005 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6006 WMREG_OLD_FCRTL : WMREG_FCRTL,
6007 sc->sc_fcrtl);
6008 sc->sc_tbi_linkup = 1;
6009 } else {
6010 if (i == WM_LINKUP_TIMEOUT)
6011 wm_check_for_link(sc);
6012 /* Link is down. */
6013 DPRINTF(WM_DEBUG_LINK,
6014 ("%s: LINK: set media -> link down\n",
6015 device_xname(sc->sc_dev)));
6016 sc->sc_tbi_linkup = 0;
6017 }
6018 } else {
6019 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6020 device_xname(sc->sc_dev)));
6021 sc->sc_tbi_linkup = 0;
6022 }
6023
6024 wm_tbi_set_linkled(sc);
6025
6026 return 0;
6027 }
6028
6029 /*
6030 * wm_tbi_set_linkled:
6031 *
6032 * Update the link LED on 1000BASE-X devices.
6033 */
6034 static void
6035 wm_tbi_set_linkled(struct wm_softc *sc)
6036 {
6037
6038 if (sc->sc_tbi_linkup)
6039 sc->sc_ctrl |= CTRL_SWDPIN(0);
6040 else
6041 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6042
6043 /* 82540 or newer devices are active low */
6044 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6045
6046 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6047 }
6048
6049 /*
6050 * wm_tbi_check_link:
6051 *
6052 * Check the link on 1000BASE-X devices.
6053 */
6054 static void
6055 wm_tbi_check_link(struct wm_softc *sc)
6056 {
6057 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6058 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6059 uint32_t rxcw, ctrl, status;
6060
6061 status = CSR_READ(sc, WMREG_STATUS);
6062
6063 rxcw = CSR_READ(sc, WMREG_RXCW);
6064 ctrl = CSR_READ(sc, WMREG_CTRL);
6065
6066 /* set link status */
6067 if ((status & STATUS_LU) == 0) {
6068 DPRINTF(WM_DEBUG_LINK,
6069 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6070 sc->sc_tbi_linkup = 0;
6071 } else if (sc->sc_tbi_linkup == 0) {
6072 DPRINTF(WM_DEBUG_LINK,
6073 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6074 (status & STATUS_FD) ? "FDX" : "HDX"));
6075 sc->sc_tbi_linkup = 1;
6076 }
6077
6078 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6079 && ((status & STATUS_LU) == 0)) {
6080 sc->sc_tbi_linkup = 0;
6081 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6082 /* RXCFG storm! */
6083 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6084 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6085 wm_init(ifp);
6086 ifp->if_start(ifp);
6087 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6088 /* If the timer expired, retry autonegotiation */
6089 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6090 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6091 sc->sc_tbi_ticks = 0;
6092 /*
6093 * Reset the link, and let autonegotiation do
6094 * its thing
6095 */
6096 sc->sc_ctrl |= CTRL_LRST;
6097 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6098 delay(1000);
6099 sc->sc_ctrl &= ~CTRL_LRST;
6100 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6101 delay(1000);
6102 CSR_WRITE(sc, WMREG_TXCW,
6103 sc->sc_txcw & ~TXCW_ANE);
6104 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6105 }
6106 }
6107 }
6108
6109 wm_tbi_set_linkled(sc);
6110 }
6111
6112 /*
6113 * wm_gmii_reset:
6114 *
6115 * Reset the PHY.
6116 */
6117 static void
6118 wm_gmii_reset(struct wm_softc *sc)
6119 {
6120 uint32_t reg;
6121 int rv;
6122
6123 /* get phy semaphore */
6124 switch (sc->sc_type) {
6125 case WM_T_82571:
6126 case WM_T_82572:
6127 case WM_T_82573:
6128 case WM_T_82574:
6129 case WM_T_82583:
6130 /* XXX should get sw semaphore, too */
6131 rv = wm_get_swsm_semaphore(sc);
6132 break;
6133 case WM_T_82575:
6134 case WM_T_82576:
6135 case WM_T_82580:
6136 case WM_T_82580ER:
6137 case WM_T_I350:
6138 case WM_T_I210:
6139 case WM_T_I211:
6140 case WM_T_80003:
6141 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6142 break;
6143 case WM_T_ICH8:
6144 case WM_T_ICH9:
6145 case WM_T_ICH10:
6146 case WM_T_PCH:
6147 case WM_T_PCH2:
6148 case WM_T_PCH_LPT:
6149 rv = wm_get_swfwhw_semaphore(sc);
6150 break;
6151 default:
6152 /* nothing to do*/
6153 rv = 0;
6154 break;
6155 }
6156 if (rv != 0) {
6157 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6158 __func__);
6159 return;
6160 }
6161
6162 switch (sc->sc_type) {
6163 case WM_T_82542_2_0:
6164 case WM_T_82542_2_1:
6165 /* null */
6166 break;
6167 case WM_T_82543:
6168 /*
6169 * With 82543, we need to force speed and duplex on the MAC
6170 * equal to what the PHY speed and duplex configuration is.
6171 * In addition, we need to perform a hardware reset on the PHY
6172 * to take it out of reset.
6173 */
6174 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6175 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6176
6177 /* The PHY reset pin is active-low. */
6178 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6179 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6180 CTRL_EXT_SWDPIN(4));
6181 reg |= CTRL_EXT_SWDPIO(4);
6182
6183 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6184 delay(10*1000);
6185
6186 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6187 delay(150);
6188 #if 0
6189 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6190 #endif
6191 delay(20*1000); /* XXX extra delay to get PHY ID? */
6192 break;
6193 case WM_T_82544: /* reset 10000us */
6194 case WM_T_82540:
6195 case WM_T_82545:
6196 case WM_T_82545_3:
6197 case WM_T_82546:
6198 case WM_T_82546_3:
6199 case WM_T_82541:
6200 case WM_T_82541_2:
6201 case WM_T_82547:
6202 case WM_T_82547_2:
6203 case WM_T_82571: /* reset 100us */
6204 case WM_T_82572:
6205 case WM_T_82573:
6206 case WM_T_82574:
6207 case WM_T_82575:
6208 case WM_T_82576:
6209 case WM_T_82580:
6210 case WM_T_82580ER:
6211 case WM_T_I350:
6212 case WM_T_I210:
6213 case WM_T_I211:
6214 case WM_T_82583:
6215 case WM_T_80003:
6216 /* generic reset */
6217 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6218 delay(20000);
6219 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6220 delay(20000);
6221
6222 if ((sc->sc_type == WM_T_82541)
6223 || (sc->sc_type == WM_T_82541_2)
6224 || (sc->sc_type == WM_T_82547)
6225 || (sc->sc_type == WM_T_82547_2)) {
6226 /* workaround for igp are done in igp_reset() */
6227 /* XXX add code to set LED after phy reset */
6228 }
6229 break;
6230 case WM_T_ICH8:
6231 case WM_T_ICH9:
6232 case WM_T_ICH10:
6233 case WM_T_PCH:
6234 case WM_T_PCH2:
6235 case WM_T_PCH_LPT:
6236 /* generic reset */
6237 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6238 delay(100);
6239 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6240 delay(150);
6241 break;
6242 default:
6243 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6244 __func__);
6245 break;
6246 }
6247
6248 /* release PHY semaphore */
6249 switch (sc->sc_type) {
6250 case WM_T_82571:
6251 case WM_T_82572:
6252 case WM_T_82573:
6253 case WM_T_82574:
6254 case WM_T_82583:
6255 /* XXX should put sw semaphore, too */
6256 wm_put_swsm_semaphore(sc);
6257 break;
6258 case WM_T_82575:
6259 case WM_T_82576:
6260 case WM_T_82580:
6261 case WM_T_82580ER:
6262 case WM_T_I350:
6263 case WM_T_I210:
6264 case WM_T_I211:
6265 case WM_T_80003:
6266 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6267 break;
6268 case WM_T_ICH8:
6269 case WM_T_ICH9:
6270 case WM_T_ICH10:
6271 case WM_T_PCH:
6272 case WM_T_PCH2:
6273 case WM_T_PCH_LPT:
6274 wm_put_swfwhw_semaphore(sc);
6275 break;
6276 default:
6277 /* nothing to do*/
6278 rv = 0;
6279 break;
6280 }
6281
6282 /* get_cfg_done */
6283 wm_get_cfg_done(sc);
6284
6285 /* extra setup */
6286 switch (sc->sc_type) {
6287 case WM_T_82542_2_0:
6288 case WM_T_82542_2_1:
6289 case WM_T_82543:
6290 case WM_T_82544:
6291 case WM_T_82540:
6292 case WM_T_82545:
6293 case WM_T_82545_3:
6294 case WM_T_82546:
6295 case WM_T_82546_3:
6296 case WM_T_82541_2:
6297 case WM_T_82547_2:
6298 case WM_T_82571:
6299 case WM_T_82572:
6300 case WM_T_82573:
6301 case WM_T_82574:
6302 case WM_T_82575:
6303 case WM_T_82576:
6304 case WM_T_82580:
6305 case WM_T_82580ER:
6306 case WM_T_I350:
6307 case WM_T_I210:
6308 case WM_T_I211:
6309 case WM_T_82583:
6310 case WM_T_80003:
6311 /* null */
6312 break;
6313 case WM_T_82541:
6314 case WM_T_82547:
6315 /* XXX Configure actively LED after PHY reset */
6316 break;
6317 case WM_T_ICH8:
6318 case WM_T_ICH9:
6319 case WM_T_ICH10:
6320 case WM_T_PCH:
6321 case WM_T_PCH2:
6322 case WM_T_PCH_LPT:
6323 /* Allow time for h/w to get to a quiescent state afer reset */
6324 delay(10*1000);
6325
6326 if (sc->sc_type == WM_T_PCH)
6327 wm_hv_phy_workaround_ich8lan(sc);
6328
6329 if (sc->sc_type == WM_T_PCH2)
6330 wm_lv_phy_workaround_ich8lan(sc);
6331
6332 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6333 /*
6334 * dummy read to clear the phy wakeup bit after lcd
6335 * reset
6336 */
6337 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6338 }
6339
6340 /*
6341 * XXX Configure the LCD with th extended configuration region
6342 * in NVM
6343 */
6344
6345 /* Configure the LCD with the OEM bits in NVM */
6346 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6347 || (sc->sc_type == WM_T_PCH_LPT)) {
6348 /*
6349 * Disable LPLU.
6350 * XXX It seems that 82567 has LPLU, too.
6351 */
6352 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6353 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6354 reg |= HV_OEM_BITS_ANEGNOW;
6355 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6356 }
6357 break;
6358 default:
6359 panic("%s: unknown type\n", __func__);
6360 break;
6361 }
6362 }
6363
6364 /*
6365 * wm_gmii_mediainit:
6366 *
6367 * Initialize media for use on 1000BASE-T devices.
6368 */
6369 static void
6370 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6371 {
6372 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6373 struct mii_data *mii = &sc->sc_mii;
6374
6375 /* We have MII. */
6376 sc->sc_flags |= WM_F_HAS_MII;
6377
6378 if (sc->sc_type == WM_T_80003)
6379 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6380 else
6381 sc->sc_tipg = TIPG_1000T_DFLT;
6382
6383 /*
6384 * Let the chip set speed/duplex on its own based on
6385 * signals from the PHY.
6386 * XXXbouyer - I'm not sure this is right for the 80003,
6387 * the em driver only sets CTRL_SLU here - but it seems to work.
6388 */
6389 sc->sc_ctrl |= CTRL_SLU;
6390 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6391
6392 /* Initialize our media structures and probe the GMII. */
6393 mii->mii_ifp = ifp;
6394
6395 /*
6396 * Determine the PHY access method.
6397 *
6398 * For SGMII, use SGMII specific method.
6399 *
6400 * For some devices, we can determine the PHY access method
6401 * from sc_type.
6402 *
6403 * For ICH8 variants, it's difficult to detemine the PHY access
6404 * method by sc_type, so use the PCI product ID for some devices.
6405 * For other ICH8 variants, try to use igp's method. If the PHY
6406 * can't detect, then use bm's method.
6407 */
6408 switch (prodid) {
6409 case PCI_PRODUCT_INTEL_PCH_M_LM:
6410 case PCI_PRODUCT_INTEL_PCH_M_LC:
6411 /* 82577 */
6412 sc->sc_phytype = WMPHY_82577;
6413 mii->mii_readreg = wm_gmii_hv_readreg;
6414 mii->mii_writereg = wm_gmii_hv_writereg;
6415 break;
6416 case PCI_PRODUCT_INTEL_PCH_D_DM:
6417 case PCI_PRODUCT_INTEL_PCH_D_DC:
6418 /* 82578 */
6419 sc->sc_phytype = WMPHY_82578;
6420 mii->mii_readreg = wm_gmii_hv_readreg;
6421 mii->mii_writereg = wm_gmii_hv_writereg;
6422 break;
6423 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6424 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6425 /* 82579 */
6426 sc->sc_phytype = WMPHY_82579;
6427 mii->mii_readreg = wm_gmii_hv_readreg;
6428 mii->mii_writereg = wm_gmii_hv_writereg;
6429 break;
6430 case PCI_PRODUCT_INTEL_I217_LM:
6431 case PCI_PRODUCT_INTEL_I217_V:
6432 case PCI_PRODUCT_INTEL_I218_LM:
6433 case PCI_PRODUCT_INTEL_I218_V:
6434 /* I21[78] */
6435 mii->mii_readreg = wm_gmii_hv_readreg;
6436 mii->mii_writereg = wm_gmii_hv_writereg;
6437 break;
6438 case PCI_PRODUCT_INTEL_82801I_BM:
6439 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6440 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6441 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6442 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6443 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6444 /* 82567 */
6445 sc->sc_phytype = WMPHY_BM;
6446 mii->mii_readreg = wm_gmii_bm_readreg;
6447 mii->mii_writereg = wm_gmii_bm_writereg;
6448 break;
6449 default:
6450 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6451 mii->mii_readreg = wm_sgmii_readreg;
6452 mii->mii_writereg = wm_sgmii_writereg;
6453 } else if (sc->sc_type >= WM_T_80003) {
6454 mii->mii_readreg = wm_gmii_i80003_readreg;
6455 mii->mii_writereg = wm_gmii_i80003_writereg;
6456 } else if (sc->sc_type >= WM_T_I210) {
6457 mii->mii_readreg = wm_gmii_i82544_readreg;
6458 mii->mii_writereg = wm_gmii_i82544_writereg;
6459 } else if (sc->sc_type >= WM_T_82580) {
6460 sc->sc_phytype = WMPHY_82580;
6461 mii->mii_readreg = wm_gmii_82580_readreg;
6462 mii->mii_writereg = wm_gmii_82580_writereg;
6463 } else if (sc->sc_type >= WM_T_82544) {
6464 mii->mii_readreg = wm_gmii_i82544_readreg;
6465 mii->mii_writereg = wm_gmii_i82544_writereg;
6466 } else {
6467 mii->mii_readreg = wm_gmii_i82543_readreg;
6468 mii->mii_writereg = wm_gmii_i82543_writereg;
6469 }
6470 break;
6471 }
6472 mii->mii_statchg = wm_gmii_statchg;
6473
6474 wm_gmii_reset(sc);
6475
6476 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6477 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6478 wm_gmii_mediastatus);
6479
6480 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6481 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6482 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6483 || (sc->sc_type == WM_T_I211)) {
6484 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6485 /* Attach only one port */
6486 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6487 MII_OFFSET_ANY, MIIF_DOPAUSE);
6488 } else {
6489 int i;
6490 uint32_t ctrl_ext;
6491
6492 /* Power on sgmii phy if it is disabled */
6493 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6494 CSR_WRITE(sc, WMREG_CTRL_EXT,
6495 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6496 CSR_WRITE_FLUSH(sc);
6497 delay(300*1000); /* XXX too long */
6498
6499 /* from 1 to 8 */
6500 for (i = 1; i < 8; i++)
6501 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6502 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6503
6504 /* restore previous sfp cage power state */
6505 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6506 }
6507 } else {
6508 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6509 MII_OFFSET_ANY, MIIF_DOPAUSE);
6510 }
6511
6512 /*
6513 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6514 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6515 */
6516 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6517 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6518 wm_set_mdio_slow_mode_hv(sc);
6519 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6520 MII_OFFSET_ANY, MIIF_DOPAUSE);
6521 }
6522
6523 /*
6524 * (For ICH8 variants)
6525 * If PHY detection failed, use BM's r/w function and retry.
6526 */
6527 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6528 /* if failed, retry with *_bm_* */
6529 mii->mii_readreg = wm_gmii_bm_readreg;
6530 mii->mii_writereg = wm_gmii_bm_writereg;
6531
6532 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6533 MII_OFFSET_ANY, MIIF_DOPAUSE);
6534 }
6535
6536 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6537 /* Any PHY wasn't find */
6538 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6539 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6540 sc->sc_phytype = WMPHY_NONE;
6541 } else {
6542 /*
6543 * PHY Found!
6544 * Check PHY type.
6545 */
6546 uint32_t model;
6547 struct mii_softc *child;
6548
6549 child = LIST_FIRST(&mii->mii_phys);
6550 if (device_is_a(child->mii_dev, "igphy")) {
6551 struct igphy_softc *isc = (struct igphy_softc *)child;
6552
6553 model = isc->sc_mii.mii_mpd_model;
6554 if (model == MII_MODEL_yyINTEL_I82566)
6555 sc->sc_phytype = WMPHY_IGP_3;
6556 }
6557
6558 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6559 }
6560 }
6561
6562 /*
6563 * wm_gmii_mediastatus: [ifmedia interface function]
6564 *
6565 * Get the current interface media status on a 1000BASE-T device.
6566 */
6567 static void
6568 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6569 {
6570 struct wm_softc *sc = ifp->if_softc;
6571
6572 ether_mediastatus(ifp, ifmr);
6573 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6574 | sc->sc_flowflags;
6575 }
6576
6577 /*
6578 * wm_gmii_mediachange: [ifmedia interface function]
6579 *
6580 * Set hardware to newly-selected media on a 1000BASE-T device.
6581 */
6582 static int
6583 wm_gmii_mediachange(struct ifnet *ifp)
6584 {
6585 struct wm_softc *sc = ifp->if_softc;
6586 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6587 int rc;
6588
6589 if ((ifp->if_flags & IFF_UP) == 0)
6590 return 0;
6591
6592 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6593 sc->sc_ctrl |= CTRL_SLU;
6594 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6595 || (sc->sc_type > WM_T_82543)) {
6596 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6597 } else {
6598 sc->sc_ctrl &= ~CTRL_ASDE;
6599 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6600 if (ife->ifm_media & IFM_FDX)
6601 sc->sc_ctrl |= CTRL_FD;
6602 switch (IFM_SUBTYPE(ife->ifm_media)) {
6603 case IFM_10_T:
6604 sc->sc_ctrl |= CTRL_SPEED_10;
6605 break;
6606 case IFM_100_TX:
6607 sc->sc_ctrl |= CTRL_SPEED_100;
6608 break;
6609 case IFM_1000_T:
6610 sc->sc_ctrl |= CTRL_SPEED_1000;
6611 break;
6612 default:
6613 panic("wm_gmii_mediachange: bad media 0x%x",
6614 ife->ifm_media);
6615 }
6616 }
6617 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6618 if (sc->sc_type <= WM_T_82543)
6619 wm_gmii_reset(sc);
6620
6621 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6622 return 0;
6623 return rc;
6624 }
6625
6626 #define MDI_IO CTRL_SWDPIN(2)
6627 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6628 #define MDI_CLK CTRL_SWDPIN(3)
6629
6630 static void
6631 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6632 {
6633 uint32_t i, v;
6634
6635 v = CSR_READ(sc, WMREG_CTRL);
6636 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6637 v |= MDI_DIR | CTRL_SWDPIO(3);
6638
6639 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6640 if (data & i)
6641 v |= MDI_IO;
6642 else
6643 v &= ~MDI_IO;
6644 CSR_WRITE(sc, WMREG_CTRL, v);
6645 delay(10);
6646 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6647 delay(10);
6648 CSR_WRITE(sc, WMREG_CTRL, v);
6649 delay(10);
6650 }
6651 }
6652
6653 static uint32_t
6654 i82543_mii_recvbits(struct wm_softc *sc)
6655 {
6656 uint32_t v, i, data = 0;
6657
6658 v = CSR_READ(sc, WMREG_CTRL);
6659 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6660 v |= CTRL_SWDPIO(3);
6661
6662 CSR_WRITE(sc, WMREG_CTRL, v);
6663 delay(10);
6664 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6665 delay(10);
6666 CSR_WRITE(sc, WMREG_CTRL, v);
6667 delay(10);
6668
6669 for (i = 0; i < 16; i++) {
6670 data <<= 1;
6671 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6672 delay(10);
6673 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6674 data |= 1;
6675 CSR_WRITE(sc, WMREG_CTRL, v);
6676 delay(10);
6677 }
6678
6679 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6680 delay(10);
6681 CSR_WRITE(sc, WMREG_CTRL, v);
6682 delay(10);
6683
6684 return data;
6685 }
6686
6687 #undef MDI_IO
6688 #undef MDI_DIR
6689 #undef MDI_CLK
6690
6691 /*
6692 * wm_gmii_i82543_readreg: [mii interface function]
6693 *
6694 * Read a PHY register on the GMII (i82543 version).
6695 */
6696 static int
6697 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6698 {
6699 struct wm_softc *sc = device_private(self);
6700 int rv;
6701
6702 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6703 i82543_mii_sendbits(sc, reg | (phy << 5) |
6704 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6705 rv = i82543_mii_recvbits(sc) & 0xffff;
6706
6707 DPRINTF(WM_DEBUG_GMII,
6708 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6709 device_xname(sc->sc_dev), phy, reg, rv));
6710
6711 return rv;
6712 }
6713
6714 /*
6715 * wm_gmii_i82543_writereg: [mii interface function]
6716 *
6717 * Write a PHY register on the GMII (i82543 version).
6718 */
6719 static void
6720 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6721 {
6722 struct wm_softc *sc = device_private(self);
6723
6724 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6725 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6726 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6727 (MII_COMMAND_START << 30), 32);
6728 }
6729
6730 /*
6731 * wm_gmii_i82544_readreg: [mii interface function]
6732 *
6733 * Read a PHY register on the GMII.
6734 */
6735 static int
6736 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6737 {
6738 struct wm_softc *sc = device_private(self);
6739 uint32_t mdic = 0;
6740 int i, rv;
6741
6742 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6743 MDIC_REGADD(reg));
6744
6745 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6746 mdic = CSR_READ(sc, WMREG_MDIC);
6747 if (mdic & MDIC_READY)
6748 break;
6749 delay(50);
6750 }
6751
6752 if ((mdic & MDIC_READY) == 0) {
6753 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6754 device_xname(sc->sc_dev), phy, reg);
6755 rv = 0;
6756 } else if (mdic & MDIC_E) {
6757 #if 0 /* This is normal if no PHY is present. */
6758 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6759 device_xname(sc->sc_dev), phy, reg);
6760 #endif
6761 rv = 0;
6762 } else {
6763 rv = MDIC_DATA(mdic);
6764 if (rv == 0xffff)
6765 rv = 0;
6766 }
6767
6768 return rv;
6769 }
6770
6771 /*
6772 * wm_gmii_i82544_writereg: [mii interface function]
6773 *
6774 * Write a PHY register on the GMII.
6775 */
6776 static void
6777 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6778 {
6779 struct wm_softc *sc = device_private(self);
6780 uint32_t mdic = 0;
6781 int i;
6782
6783 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6784 MDIC_REGADD(reg) | MDIC_DATA(val));
6785
6786 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6787 mdic = CSR_READ(sc, WMREG_MDIC);
6788 if (mdic & MDIC_READY)
6789 break;
6790 delay(50);
6791 }
6792
6793 if ((mdic & MDIC_READY) == 0)
6794 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6795 device_xname(sc->sc_dev), phy, reg);
6796 else if (mdic & MDIC_E)
6797 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6798 device_xname(sc->sc_dev), phy, reg);
6799 }
6800
6801 /*
6802 * wm_gmii_i80003_readreg: [mii interface function]
6803 *
6804 * Read a PHY register on the kumeran
6805 * This could be handled by the PHY layer if we didn't have to lock the
6806 * ressource ...
6807 */
6808 static int
6809 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6810 {
6811 struct wm_softc *sc = device_private(self);
6812 int sem;
6813 int rv;
6814
6815 if (phy != 1) /* only one PHY on kumeran bus */
6816 return 0;
6817
6818 sem = swfwphysem[sc->sc_funcid];
6819 if (wm_get_swfw_semaphore(sc, sem)) {
6820 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6821 __func__);
6822 return 0;
6823 }
6824
6825 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6826 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6827 reg >> GG82563_PAGE_SHIFT);
6828 } else {
6829 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6830 reg >> GG82563_PAGE_SHIFT);
6831 }
6832 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6833 delay(200);
6834 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6835 delay(200);
6836
6837 wm_put_swfw_semaphore(sc, sem);
6838 return rv;
6839 }
6840
6841 /*
6842 * wm_gmii_i80003_writereg: [mii interface function]
6843 *
6844 * Write a PHY register on the kumeran.
6845 * This could be handled by the PHY layer if we didn't have to lock the
6846 * ressource ...
6847 */
6848 static void
6849 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6850 {
6851 struct wm_softc *sc = device_private(self);
6852 int sem;
6853
6854 if (phy != 1) /* only one PHY on kumeran bus */
6855 return;
6856
6857 sem = swfwphysem[sc->sc_funcid];
6858 if (wm_get_swfw_semaphore(sc, sem)) {
6859 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6860 __func__);
6861 return;
6862 }
6863
6864 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6865 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6866 reg >> GG82563_PAGE_SHIFT);
6867 } else {
6868 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6869 reg >> GG82563_PAGE_SHIFT);
6870 }
6871 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6872 delay(200);
6873 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6874 delay(200);
6875
6876 wm_put_swfw_semaphore(sc, sem);
6877 }
6878
6879 /*
6880 * wm_gmii_bm_readreg: [mii interface function]
6881 *
6882 * Read a PHY register on the kumeran
6883 * This could be handled by the PHY layer if we didn't have to lock the
6884 * ressource ...
6885 */
6886 static int
6887 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6888 {
6889 struct wm_softc *sc = device_private(self);
6890 int sem;
6891 int rv;
6892
6893 sem = swfwphysem[sc->sc_funcid];
6894 if (wm_get_swfw_semaphore(sc, sem)) {
6895 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6896 __func__);
6897 return 0;
6898 }
6899
6900 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6901 if (phy == 1)
6902 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6903 reg);
6904 else
6905 wm_gmii_i82544_writereg(self, phy,
6906 GG82563_PHY_PAGE_SELECT,
6907 reg >> GG82563_PAGE_SHIFT);
6908 }
6909
6910 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6911 wm_put_swfw_semaphore(sc, sem);
6912 return rv;
6913 }
6914
6915 /*
6916 * wm_gmii_bm_writereg: [mii interface function]
6917 *
6918 * Write a PHY register on the kumeran.
6919 * This could be handled by the PHY layer if we didn't have to lock the
6920 * ressource ...
6921 */
6922 static void
6923 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6924 {
6925 struct wm_softc *sc = device_private(self);
6926 int sem;
6927
6928 sem = swfwphysem[sc->sc_funcid];
6929 if (wm_get_swfw_semaphore(sc, sem)) {
6930 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6931 __func__);
6932 return;
6933 }
6934
6935 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6936 if (phy == 1)
6937 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6938 reg);
6939 else
6940 wm_gmii_i82544_writereg(self, phy,
6941 GG82563_PHY_PAGE_SELECT,
6942 reg >> GG82563_PAGE_SHIFT);
6943 }
6944
6945 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6946 wm_put_swfw_semaphore(sc, sem);
6947 }
6948
6949 static void
6950 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6951 {
6952 struct wm_softc *sc = device_private(self);
6953 uint16_t regnum = BM_PHY_REG_NUM(offset);
6954 uint16_t wuce;
6955
6956 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6957 if (sc->sc_type == WM_T_PCH) {
6958 /* XXX e1000 driver do nothing... why? */
6959 }
6960
6961 /* Set page 769 */
6962 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6963 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6964
6965 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6966
6967 wuce &= ~BM_WUC_HOST_WU_BIT;
6968 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6969 wuce | BM_WUC_ENABLE_BIT);
6970
6971 /* Select page 800 */
6972 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6973 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6974
6975 /* Write page 800 */
6976 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6977
6978 if (rd)
6979 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6980 else
6981 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6982
6983 /* Set page 769 */
6984 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6985 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6986
6987 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6988 }
6989
6990 /*
6991 * wm_gmii_hv_readreg: [mii interface function]
6992 *
6993 * Read a PHY register on the kumeran
6994 * This could be handled by the PHY layer if we didn't have to lock the
6995 * ressource ...
6996 */
6997 static int
6998 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6999 {
7000 struct wm_softc *sc = device_private(self);
7001 uint16_t page = BM_PHY_REG_PAGE(reg);
7002 uint16_t regnum = BM_PHY_REG_NUM(reg);
7003 uint16_t val;
7004 int rv;
7005
7006 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
7007 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7008 __func__);
7009 return 0;
7010 }
7011
7012 /* XXX Workaround failure in MDIO access while cable is disconnected */
7013 if (sc->sc_phytype == WMPHY_82577) {
7014 /* XXX must write */
7015 }
7016
7017 /* Page 800 works differently than the rest so it has its own func */
7018 if (page == BM_WUC_PAGE) {
7019 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7020 return val;
7021 }
7022
7023 /*
7024 * Lower than page 768 works differently than the rest so it has its
7025 * own func
7026 */
7027 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7028 printf("gmii_hv_readreg!!!\n");
7029 return 0;
7030 }
7031
7032 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7033 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7034 page << BME1000_PAGE_SHIFT);
7035 }
7036
7037 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7038 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7039 return rv;
7040 }
7041
7042 /*
7043 * wm_gmii_hv_writereg: [mii interface function]
7044 *
7045 * Write a PHY register on the kumeran.
7046 * This could be handled by the PHY layer if we didn't have to lock the
7047 * ressource ...
7048 */
7049 static void
7050 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7051 {
7052 struct wm_softc *sc = device_private(self);
7053 uint16_t page = BM_PHY_REG_PAGE(reg);
7054 uint16_t regnum = BM_PHY_REG_NUM(reg);
7055
7056 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
7057 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7058 __func__);
7059 return;
7060 }
7061
7062 /* XXX Workaround failure in MDIO access while cable is disconnected */
7063
7064 /* Page 800 works differently than the rest so it has its own func */
7065 if (page == BM_WUC_PAGE) {
7066 uint16_t tmp;
7067
7068 tmp = val;
7069 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7070 return;
7071 }
7072
7073 /*
7074 * Lower than page 768 works differently than the rest so it has its
7075 * own func
7076 */
7077 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7078 printf("gmii_hv_writereg!!!\n");
7079 return;
7080 }
7081
7082 /*
7083 * XXX Workaround MDIO accesses being disabled after entering IEEE
7084 * Power Down (whenever bit 11 of the PHY control register is set)
7085 */
7086
7087 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7088 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7089 page << BME1000_PAGE_SHIFT);
7090 }
7091
7092 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7093 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7094 }
7095
7096 /*
7097 * wm_sgmii_readreg: [mii interface function]
7098 *
7099 * Read a PHY register on the SGMII
7100 * This could be handled by the PHY layer if we didn't have to lock the
7101 * ressource ...
7102 */
7103 static int
7104 wm_sgmii_readreg(device_t self, int phy, int reg)
7105 {
7106 struct wm_softc *sc = device_private(self);
7107 uint32_t i2ccmd;
7108 int i, rv;
7109
7110 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7111 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7112 __func__);
7113 return 0;
7114 }
7115
7116 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7117 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7118 | I2CCMD_OPCODE_READ;
7119 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7120
7121 /* Poll the ready bit */
7122 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7123 delay(50);
7124 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7125 if (i2ccmd & I2CCMD_READY)
7126 break;
7127 }
7128 if ((i2ccmd & I2CCMD_READY) == 0)
7129 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7130 if ((i2ccmd & I2CCMD_ERROR) != 0)
7131 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7132
7133 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7134
7135 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7136 return rv;
7137 }
7138
7139 /*
7140 * wm_sgmii_writereg: [mii interface function]
7141 *
7142 * Write a PHY register on the SGMII.
7143 * This could be handled by the PHY layer if we didn't have to lock the
7144 * ressource ...
7145 */
7146 static void
7147 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7148 {
7149 struct wm_softc *sc = device_private(self);
7150 uint32_t i2ccmd;
7151 int i;
7152
7153 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7154 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7155 __func__);
7156 return;
7157 }
7158
7159 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7160 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7161 | I2CCMD_OPCODE_WRITE;
7162 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7163
7164 /* Poll the ready bit */
7165 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7166 delay(50);
7167 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7168 if (i2ccmd & I2CCMD_READY)
7169 break;
7170 }
7171 if ((i2ccmd & I2CCMD_READY) == 0)
7172 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7173 if ((i2ccmd & I2CCMD_ERROR) != 0)
7174 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7175
7176 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7177 }
7178
7179 /*
7180 * wm_gmii_82580_readreg: [mii interface function]
7181 *
7182 * Read a PHY register on the 82580 and I350.
7183 * This could be handled by the PHY layer if we didn't have to lock the
7184 * ressource ...
7185 */
7186 static int
7187 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7188 {
7189 struct wm_softc *sc = device_private(self);
7190 int sem;
7191 int rv;
7192
7193 sem = swfwphysem[sc->sc_funcid];
7194 if (wm_get_swfw_semaphore(sc, sem)) {
7195 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7196 __func__);
7197 return 0;
7198 }
7199
7200 rv = wm_gmii_i82544_readreg(self, phy, reg);
7201
7202 wm_put_swfw_semaphore(sc, sem);
7203 return rv;
7204 }
7205
7206 /*
7207 * wm_gmii_82580_writereg: [mii interface function]
7208 *
7209 * Write a PHY register on the 82580 and I350.
7210 * This could be handled by the PHY layer if we didn't have to lock the
7211 * ressource ...
7212 */
7213 static void
7214 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7215 {
7216 struct wm_softc *sc = device_private(self);
7217 int sem;
7218
7219 sem = swfwphysem[sc->sc_funcid];
7220 if (wm_get_swfw_semaphore(sc, sem)) {
7221 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7222 __func__);
7223 return;
7224 }
7225
7226 wm_gmii_i82544_writereg(self, phy, reg, val);
7227
7228 wm_put_swfw_semaphore(sc, sem);
7229 }
7230
7231 /*
7232 * wm_gmii_statchg: [mii interface function]
7233 *
7234 * Callback from MII layer when media changes.
7235 */
7236 static void
7237 wm_gmii_statchg(struct ifnet *ifp)
7238 {
7239 struct wm_softc *sc = ifp->if_softc;
7240 struct mii_data *mii = &sc->sc_mii;
7241
7242 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7243 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7244 sc->sc_fcrtl &= ~FCRTL_XONE;
7245
7246 /*
7247 * Get flow control negotiation result.
7248 */
7249 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7250 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7251 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7252 mii->mii_media_active &= ~IFM_ETH_FMASK;
7253 }
7254
7255 if (sc->sc_flowflags & IFM_FLOW) {
7256 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7257 sc->sc_ctrl |= CTRL_TFCE;
7258 sc->sc_fcrtl |= FCRTL_XONE;
7259 }
7260 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7261 sc->sc_ctrl |= CTRL_RFCE;
7262 }
7263
7264 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7265 DPRINTF(WM_DEBUG_LINK,
7266 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7267 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7268 } else {
7269 DPRINTF(WM_DEBUG_LINK,
7270 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7271 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7272 }
7273
7274 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7275 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7276 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7277 : WMREG_FCRTL, sc->sc_fcrtl);
7278 if (sc->sc_type == WM_T_80003) {
7279 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7280 case IFM_1000_T:
7281 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7282 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7283 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7284 break;
7285 default:
7286 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7287 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7288 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7289 break;
7290 }
7291 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7292 }
7293 }
7294
7295 /*
7296 * wm_kmrn_readreg:
7297 *
7298 * Read a kumeran register
7299 */
7300 static int
7301 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7302 {
7303 int rv;
7304
7305 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7306 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7307 aprint_error_dev(sc->sc_dev,
7308 "%s: failed to get semaphore\n", __func__);
7309 return 0;
7310 }
7311 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7312 if (wm_get_swfwhw_semaphore(sc)) {
7313 aprint_error_dev(sc->sc_dev,
7314 "%s: failed to get semaphore\n", __func__);
7315 return 0;
7316 }
7317 }
7318
7319 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7320 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7321 KUMCTRLSTA_REN);
7322 delay(2);
7323
7324 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7325
7326 if (sc->sc_flags == WM_F_SWFW_SYNC)
7327 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7328 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7329 wm_put_swfwhw_semaphore(sc);
7330
7331 return rv;
7332 }
7333
7334 /*
7335 * wm_kmrn_writereg:
7336 *
7337 * Write a kumeran register
7338 */
7339 static void
7340 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7341 {
7342
7343 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7344 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7345 aprint_error_dev(sc->sc_dev,
7346 "%s: failed to get semaphore\n", __func__);
7347 return;
7348 }
7349 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7350 if (wm_get_swfwhw_semaphore(sc)) {
7351 aprint_error_dev(sc->sc_dev,
7352 "%s: failed to get semaphore\n", __func__);
7353 return;
7354 }
7355 }
7356
7357 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7358 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7359 (val & KUMCTRLSTA_MASK));
7360
7361 if (sc->sc_flags == WM_F_SWFW_SYNC)
7362 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7363 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7364 wm_put_swfwhw_semaphore(sc);
7365 }
7366
7367 static int
7368 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7369 {
7370 uint32_t eecd = 0;
7371
7372 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7373 || sc->sc_type == WM_T_82583) {
7374 eecd = CSR_READ(sc, WMREG_EECD);
7375
7376 /* Isolate bits 15 & 16 */
7377 eecd = ((eecd >> 15) & 0x03);
7378
7379 /* If both bits are set, device is Flash type */
7380 if (eecd == 0x03)
7381 return 0;
7382 }
7383 return 1;
7384 }
7385
7386 static int
7387 wm_get_swsm_semaphore(struct wm_softc *sc)
7388 {
7389 int32_t timeout;
7390 uint32_t swsm;
7391
7392 /* Get the FW semaphore. */
7393 timeout = 1000 + 1; /* XXX */
7394 while (timeout) {
7395 swsm = CSR_READ(sc, WMREG_SWSM);
7396 swsm |= SWSM_SWESMBI;
7397 CSR_WRITE(sc, WMREG_SWSM, swsm);
7398 /* if we managed to set the bit we got the semaphore. */
7399 swsm = CSR_READ(sc, WMREG_SWSM);
7400 if (swsm & SWSM_SWESMBI)
7401 break;
7402
7403 delay(50);
7404 timeout--;
7405 }
7406
7407 if (timeout == 0) {
7408 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7409 /* Release semaphores */
7410 wm_put_swsm_semaphore(sc);
7411 return 1;
7412 }
7413 return 0;
7414 }
7415
7416 static void
7417 wm_put_swsm_semaphore(struct wm_softc *sc)
7418 {
7419 uint32_t swsm;
7420
7421 swsm = CSR_READ(sc, WMREG_SWSM);
7422 swsm &= ~(SWSM_SWESMBI);
7423 CSR_WRITE(sc, WMREG_SWSM, swsm);
7424 }
7425
7426 static int
7427 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7428 {
7429 uint32_t swfw_sync;
7430 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7431 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7432 int timeout = 200;
7433
7434 for (timeout = 0; timeout < 200; timeout++) {
7435 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7436 if (wm_get_swsm_semaphore(sc)) {
7437 aprint_error_dev(sc->sc_dev,
7438 "%s: failed to get semaphore\n",
7439 __func__);
7440 return 1;
7441 }
7442 }
7443 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7444 if ((swfw_sync & (swmask | fwmask)) == 0) {
7445 swfw_sync |= swmask;
7446 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7447 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7448 wm_put_swsm_semaphore(sc);
7449 return 0;
7450 }
7451 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7452 wm_put_swsm_semaphore(sc);
7453 delay(5000);
7454 }
7455 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7456 device_xname(sc->sc_dev), mask, swfw_sync);
7457 return 1;
7458 }
7459
7460 static void
7461 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7462 {
7463 uint32_t swfw_sync;
7464
7465 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7466 while (wm_get_swsm_semaphore(sc) != 0)
7467 continue;
7468 }
7469 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7470 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7471 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7472 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7473 wm_put_swsm_semaphore(sc);
7474 }
7475
7476 static int
7477 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7478 {
7479 uint32_t ext_ctrl;
7480 int timeout = 200;
7481
7482 for (timeout = 0; timeout < 200; timeout++) {
7483 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7484 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7485 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7486
7487 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7488 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7489 return 0;
7490 delay(5000);
7491 }
7492 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7493 device_xname(sc->sc_dev), ext_ctrl);
7494 return 1;
7495 }
7496
7497 static void
7498 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7499 {
7500 uint32_t ext_ctrl;
7501 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7502 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7503 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7504 }
7505
7506 static int
7507 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7508 {
7509 uint32_t eecd;
7510 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7511 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7512 uint8_t sig_byte = 0;
7513
7514 switch (sc->sc_type) {
7515 case WM_T_ICH8:
7516 case WM_T_ICH9:
7517 eecd = CSR_READ(sc, WMREG_EECD);
7518 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7519 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7520 return 0;
7521 }
7522 /* FALLTHROUGH */
7523 default:
7524 /* Default to 0 */
7525 *bank = 0;
7526
7527 /* Check bank 0 */
7528 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7529 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7530 *bank = 0;
7531 return 0;
7532 }
7533
7534 /* Check bank 1 */
7535 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7536 &sig_byte);
7537 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7538 *bank = 1;
7539 return 0;
7540 }
7541 }
7542
7543 aprint_error_dev(sc->sc_dev, "EEPROM not present\n");
7544 return -1;
7545 }
7546
7547 /******************************************************************************
7548 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7549 * register.
7550 *
7551 * sc - Struct containing variables accessed by shared code
7552 * offset - offset of word in the EEPROM to read
7553 * data - word read from the EEPROM
7554 * words - number of words to read
7555 *****************************************************************************/
7556 static int
7557 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7558 {
7559 int32_t error = 0;
7560 uint32_t flash_bank = 0;
7561 uint32_t act_offset = 0;
7562 uint32_t bank_offset = 0;
7563 uint16_t word = 0;
7564 uint16_t i = 0;
7565
7566 /* We need to know which is the valid flash bank. In the event
7567 * that we didn't allocate eeprom_shadow_ram, we may not be
7568 * managing flash_bank. So it cannot be trusted and needs
7569 * to be updated with each read.
7570 */
7571 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7572 if (error) {
7573 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7574 __func__);
7575 return error;
7576 }
7577
7578 /*
7579 * Adjust offset appropriately if we're on bank 1 - adjust for word
7580 * size
7581 */
7582 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7583
7584 error = wm_get_swfwhw_semaphore(sc);
7585 if (error) {
7586 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7587 __func__);
7588 return error;
7589 }
7590
7591 for (i = 0; i < words; i++) {
7592 /* The NVM part needs a byte offset, hence * 2 */
7593 act_offset = bank_offset + ((offset + i) * 2);
7594 error = wm_read_ich8_word(sc, act_offset, &word);
7595 if (error) {
7596 aprint_error_dev(sc->sc_dev,
7597 "%s: failed to read NVM\n", __func__);
7598 break;
7599 }
7600 data[i] = word;
7601 }
7602
7603 wm_put_swfwhw_semaphore(sc);
7604 return error;
7605 }
7606
7607 /******************************************************************************
7608 * This function does initial flash setup so that a new read/write/erase cycle
7609 * can be started.
7610 *
7611 * sc - The pointer to the hw structure
7612 ****************************************************************************/
7613 static int32_t
7614 wm_ich8_cycle_init(struct wm_softc *sc)
7615 {
7616 uint16_t hsfsts;
7617 int32_t error = 1;
7618 int32_t i = 0;
7619
7620 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7621
7622 /* May be check the Flash Des Valid bit in Hw status */
7623 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7624 return error;
7625 }
7626
7627 /* Clear FCERR in Hw status by writing 1 */
7628 /* Clear DAEL in Hw status by writing a 1 */
7629 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7630
7631 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7632
7633 /*
7634 * Either we should have a hardware SPI cycle in progress bit to check
7635 * against, in order to start a new cycle or FDONE bit should be
7636 * changed in the hardware so that it is 1 after harware reset, which
7637 * can then be used as an indication whether a cycle is in progress or
7638 * has been completed .. we should also have some software semaphore
7639 * mechanism to guard FDONE or the cycle in progress bit so that two
7640 * threads access to those bits can be sequentiallized or a way so that
7641 * 2 threads dont start the cycle at the same time
7642 */
7643
7644 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7645 /*
7646 * There is no cycle running at present, so we can start a
7647 * cycle
7648 */
7649
7650 /* Begin by setting Flash Cycle Done. */
7651 hsfsts |= HSFSTS_DONE;
7652 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7653 error = 0;
7654 } else {
7655 /*
7656 * otherwise poll for sometime so the current cycle has a
7657 * chance to end before giving up.
7658 */
7659 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7660 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7661 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7662 error = 0;
7663 break;
7664 }
7665 delay(1);
7666 }
7667 if (error == 0) {
7668 /*
7669 * Successful in waiting for previous cycle to timeout,
7670 * now set the Flash Cycle Done.
7671 */
7672 hsfsts |= HSFSTS_DONE;
7673 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7674 }
7675 }
7676 return error;
7677 }
7678
7679 /******************************************************************************
7680 * This function starts a flash cycle and waits for its completion
7681 *
7682 * sc - The pointer to the hw structure
7683 ****************************************************************************/
7684 static int32_t
7685 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7686 {
7687 uint16_t hsflctl;
7688 uint16_t hsfsts;
7689 int32_t error = 1;
7690 uint32_t i = 0;
7691
7692 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7693 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7694 hsflctl |= HSFCTL_GO;
7695 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7696
7697 /* wait till FDONE bit is set to 1 */
7698 do {
7699 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7700 if (hsfsts & HSFSTS_DONE)
7701 break;
7702 delay(1);
7703 i++;
7704 } while (i < timeout);
7705 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7706 error = 0;
7707
7708 return error;
7709 }
7710
7711 /******************************************************************************
7712 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7713 *
7714 * sc - The pointer to the hw structure
7715 * index - The index of the byte or word to read.
7716 * size - Size of data to read, 1=byte 2=word
7717 * data - Pointer to the word to store the value read.
7718 *****************************************************************************/
7719 static int32_t
7720 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7721 uint32_t size, uint16_t* data)
7722 {
7723 uint16_t hsfsts;
7724 uint16_t hsflctl;
7725 uint32_t flash_linear_address;
7726 uint32_t flash_data = 0;
7727 int32_t error = 1;
7728 int32_t count = 0;
7729
7730 if (size < 1 || size > 2 || data == 0x0 ||
7731 index > ICH_FLASH_LINEAR_ADDR_MASK)
7732 return error;
7733
7734 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7735 sc->sc_ich8_flash_base;
7736
7737 do {
7738 delay(1);
7739 /* Steps */
7740 error = wm_ich8_cycle_init(sc);
7741 if (error)
7742 break;
7743
7744 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7745 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7746 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7747 & HSFCTL_BCOUNT_MASK;
7748 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7749 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7750
7751 /*
7752 * Write the last 24 bits of index into Flash Linear address
7753 * field in Flash Address
7754 */
7755 /* TODO: TBD maybe check the index against the size of flash */
7756
7757 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7758
7759 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7760
7761 /*
7762 * Check if FCERR is set to 1, if set to 1, clear it and try
7763 * the whole sequence a few more times, else read in (shift in)
7764 * the Flash Data0, the order is least significant byte first
7765 * msb to lsb
7766 */
7767 if (error == 0) {
7768 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7769 if (size == 1)
7770 *data = (uint8_t)(flash_data & 0x000000FF);
7771 else if (size == 2)
7772 *data = (uint16_t)(flash_data & 0x0000FFFF);
7773 break;
7774 } else {
7775 /*
7776 * If we've gotten here, then things are probably
7777 * completely hosed, but if the error condition is
7778 * detected, it won't hurt to give it another try...
7779 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7780 */
7781 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7782 if (hsfsts & HSFSTS_ERR) {
7783 /* Repeat for some time before giving up. */
7784 continue;
7785 } else if ((hsfsts & HSFSTS_DONE) == 0)
7786 break;
7787 }
7788 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7789
7790 return error;
7791 }
7792
7793 /******************************************************************************
7794 * Reads a single byte from the NVM using the ICH8 flash access registers.
7795 *
7796 * sc - pointer to wm_hw structure
7797 * index - The index of the byte to read.
7798 * data - Pointer to a byte to store the value read.
7799 *****************************************************************************/
7800 static int32_t
7801 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7802 {
7803 int32_t status;
7804 uint16_t word = 0;
7805
7806 status = wm_read_ich8_data(sc, index, 1, &word);
7807 if (status == 0)
7808 *data = (uint8_t)word;
7809 else
7810 *data = 0;
7811
7812 return status;
7813 }
7814
7815 /******************************************************************************
7816 * Reads a word from the NVM using the ICH8 flash access registers.
7817 *
7818 * sc - pointer to wm_hw structure
7819 * index - The starting byte index of the word to read.
7820 * data - Pointer to a word to store the value read.
7821 *****************************************************************************/
7822 static int32_t
7823 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7824 {
7825 int32_t status;
7826
7827 status = wm_read_ich8_data(sc, index, 2, data);
7828 return status;
7829 }
7830
7831 static int
7832 wm_check_mng_mode(struct wm_softc *sc)
7833 {
7834 int rv;
7835
7836 switch (sc->sc_type) {
7837 case WM_T_ICH8:
7838 case WM_T_ICH9:
7839 case WM_T_ICH10:
7840 case WM_T_PCH:
7841 case WM_T_PCH2:
7842 case WM_T_PCH_LPT:
7843 rv = wm_check_mng_mode_ich8lan(sc);
7844 break;
7845 case WM_T_82574:
7846 case WM_T_82583:
7847 rv = wm_check_mng_mode_82574(sc);
7848 break;
7849 case WM_T_82571:
7850 case WM_T_82572:
7851 case WM_T_82573:
7852 case WM_T_80003:
7853 rv = wm_check_mng_mode_generic(sc);
7854 break;
7855 default:
7856 /* noting to do */
7857 rv = 0;
7858 break;
7859 }
7860
7861 return rv;
7862 }
7863
7864 static int
7865 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7866 {
7867 uint32_t fwsm;
7868
7869 fwsm = CSR_READ(sc, WMREG_FWSM);
7870
7871 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7872 return 1;
7873
7874 return 0;
7875 }
7876
7877 static int
7878 wm_check_mng_mode_82574(struct wm_softc *sc)
7879 {
7880 uint16_t data;
7881
7882 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7883
7884 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7885 return 1;
7886
7887 return 0;
7888 }
7889
7890 static int
7891 wm_check_mng_mode_generic(struct wm_softc *sc)
7892 {
7893 uint32_t fwsm;
7894
7895 fwsm = CSR_READ(sc, WMREG_FWSM);
7896
7897 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7898 return 1;
7899
7900 return 0;
7901 }
7902
7903 static int
7904 wm_enable_mng_pass_thru(struct wm_softc *sc)
7905 {
7906 uint32_t manc, fwsm, factps;
7907
7908 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7909 return 0;
7910
7911 manc = CSR_READ(sc, WMREG_MANC);
7912
7913 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7914 device_xname(sc->sc_dev), manc));
7915 if (((manc & MANC_RECV_TCO_EN) == 0)
7916 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7917 return 0;
7918
7919 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7920 fwsm = CSR_READ(sc, WMREG_FWSM);
7921 factps = CSR_READ(sc, WMREG_FACTPS);
7922 if (((factps & FACTPS_MNGCG) == 0)
7923 && ((fwsm & FWSM_MODE_MASK)
7924 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7925 return 1;
7926 } else if (((manc & MANC_SMBUS_EN) != 0)
7927 && ((manc & MANC_ASF_EN) == 0))
7928 return 1;
7929
7930 return 0;
7931 }
7932
7933 static int
7934 wm_check_reset_block(struct wm_softc *sc)
7935 {
7936 uint32_t reg;
7937
7938 switch (sc->sc_type) {
7939 case WM_T_ICH8:
7940 case WM_T_ICH9:
7941 case WM_T_ICH10:
7942 case WM_T_PCH:
7943 case WM_T_PCH2:
7944 case WM_T_PCH_LPT:
7945 reg = CSR_READ(sc, WMREG_FWSM);
7946 if ((reg & FWSM_RSPCIPHY) != 0)
7947 return 0;
7948 else
7949 return -1;
7950 break;
7951 case WM_T_82571:
7952 case WM_T_82572:
7953 case WM_T_82573:
7954 case WM_T_82574:
7955 case WM_T_82583:
7956 case WM_T_80003:
7957 reg = CSR_READ(sc, WMREG_MANC);
7958 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7959 return -1;
7960 else
7961 return 0;
7962 break;
7963 default:
7964 /* no problem */
7965 break;
7966 }
7967
7968 return 0;
7969 }
7970
7971 static void
7972 wm_get_hw_control(struct wm_softc *sc)
7973 {
7974 uint32_t reg;
7975
7976 switch (sc->sc_type) {
7977 case WM_T_82573:
7978 reg = CSR_READ(sc, WMREG_SWSM);
7979 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7980 break;
7981 case WM_T_82571:
7982 case WM_T_82572:
7983 case WM_T_82574:
7984 case WM_T_82583:
7985 case WM_T_80003:
7986 case WM_T_ICH8:
7987 case WM_T_ICH9:
7988 case WM_T_ICH10:
7989 case WM_T_PCH:
7990 case WM_T_PCH2:
7991 case WM_T_PCH_LPT:
7992 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7993 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7994 break;
7995 default:
7996 break;
7997 }
7998 }
7999
8000 static void
8001 wm_release_hw_control(struct wm_softc *sc)
8002 {
8003 uint32_t reg;
8004
8005 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8006 return;
8007
8008 if (sc->sc_type == WM_T_82573) {
8009 reg = CSR_READ(sc, WMREG_SWSM);
8010 reg &= ~SWSM_DRV_LOAD;
8011 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8012 } else {
8013 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8014 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8015 }
8016 }
8017
8018 /* XXX Currently TBI only */
8019 static int
8020 wm_check_for_link(struct wm_softc *sc)
8021 {
8022 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8023 uint32_t rxcw;
8024 uint32_t ctrl;
8025 uint32_t status;
8026 uint32_t sig;
8027
8028 rxcw = CSR_READ(sc, WMREG_RXCW);
8029 ctrl = CSR_READ(sc, WMREG_CTRL);
8030 status = CSR_READ(sc, WMREG_STATUS);
8031
8032 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8033
8034 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8035 device_xname(sc->sc_dev), __func__,
8036 ((ctrl & CTRL_SWDPIN(1)) == sig),
8037 ((status & STATUS_LU) != 0),
8038 ((rxcw & RXCW_C) != 0)
8039 ));
8040
8041 /*
8042 * SWDPIN LU RXCW
8043 * 0 0 0
8044 * 0 0 1 (should not happen)
8045 * 0 1 0 (should not happen)
8046 * 0 1 1 (should not happen)
8047 * 1 0 0 Disable autonego and force linkup
8048 * 1 0 1 got /C/ but not linkup yet
8049 * 1 1 0 (linkup)
8050 * 1 1 1 If IFM_AUTO, back to autonego
8051 *
8052 */
8053 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8054 && ((status & STATUS_LU) == 0)
8055 && ((rxcw & RXCW_C) == 0)) {
8056 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8057 __func__));
8058 sc->sc_tbi_linkup = 0;
8059 /* Disable auto-negotiation in the TXCW register */
8060 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8061
8062 /*
8063 * Force link-up and also force full-duplex.
8064 *
8065 * NOTE: CTRL was updated TFCE and RFCE automatically,
8066 * so we should update sc->sc_ctrl
8067 */
8068 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8069 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8070 } else if (((status & STATUS_LU) != 0)
8071 && ((rxcw & RXCW_C) != 0)
8072 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8073 sc->sc_tbi_linkup = 1;
8074 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8075 __func__));
8076 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8077 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8078 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8079 && ((rxcw & RXCW_C) != 0)) {
8080 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8081 } else {
8082 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8083 status));
8084 }
8085
8086 return 0;
8087 }
8088
8089 /* Work-around for 82566 Kumeran PCS lock loss */
8090 static void
8091 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8092 {
8093 int miistatus, active, i;
8094 int reg;
8095
8096 miistatus = sc->sc_mii.mii_media_status;
8097
8098 /* If the link is not up, do nothing */
8099 if ((miistatus & IFM_ACTIVE) != 0)
8100 return;
8101
8102 active = sc->sc_mii.mii_media_active;
8103
8104 /* Nothing to do if the link is other than 1Gbps */
8105 if (IFM_SUBTYPE(active) != IFM_1000_T)
8106 return;
8107
8108 for (i = 0; i < 10; i++) {
8109 /* read twice */
8110 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8111 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8112 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8113 goto out; /* GOOD! */
8114
8115 /* Reset the PHY */
8116 wm_gmii_reset(sc);
8117 delay(5*1000);
8118 }
8119
8120 /* Disable GigE link negotiation */
8121 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8122 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8123 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8124
8125 /*
8126 * Call gig speed drop workaround on Gig disable before accessing
8127 * any PHY registers.
8128 */
8129 wm_gig_downshift_workaround_ich8lan(sc);
8130
8131 out:
8132 return;
8133 }
8134
8135 /* WOL from S5 stops working */
8136 static void
8137 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8138 {
8139 uint16_t kmrn_reg;
8140
8141 /* Only for igp3 */
8142 if (sc->sc_phytype == WMPHY_IGP_3) {
8143 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8144 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8145 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8146 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8147 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8148 }
8149 }
8150
8151 #ifdef WM_WOL
8152 /* Power down workaround on D3 */
8153 static void
8154 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8155 {
8156 uint32_t reg;
8157 int i;
8158
8159 for (i = 0; i < 2; i++) {
8160 /* Disable link */
8161 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8162 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8163 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8164
8165 /*
8166 * Call gig speed drop workaround on Gig disable before
8167 * accessing any PHY registers
8168 */
8169 if (sc->sc_type == WM_T_ICH8)
8170 wm_gig_downshift_workaround_ich8lan(sc);
8171
8172 /* Write VR power-down enable */
8173 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8174 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8175 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8176 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8177
8178 /* Read it back and test */
8179 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8180 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8181 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8182 break;
8183
8184 /* Issue PHY reset and repeat at most one more time */
8185 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8186 }
8187 }
8188 #endif /* WM_WOL */
8189
8190 /*
8191 * Workaround for pch's PHYs
8192 * XXX should be moved to new PHY driver?
8193 */
8194 static void
8195 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8196 {
8197 if (sc->sc_phytype == WMPHY_82577)
8198 wm_set_mdio_slow_mode_hv(sc);
8199
8200 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8201
8202 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8203
8204 /* 82578 */
8205 if (sc->sc_phytype == WMPHY_82578) {
8206 /* PCH rev. < 3 */
8207 if (sc->sc_rev < 3) {
8208 /* XXX 6 bit shift? Why? Is it page2? */
8209 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8210 0x66c0);
8211 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8212 0xffff);
8213 }
8214
8215 /* XXX phy rev. < 2 */
8216 }
8217
8218 /* Select page 0 */
8219
8220 /* XXX acquire semaphore */
8221 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8222 /* XXX release semaphore */
8223
8224 /*
8225 * Configure the K1 Si workaround during phy reset assuming there is
8226 * link so that it disables K1 if link is in 1Gbps.
8227 */
8228 wm_k1_gig_workaround_hv(sc, 1);
8229 }
8230
8231 static void
8232 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8233 {
8234
8235 wm_set_mdio_slow_mode_hv(sc);
8236 }
8237
8238 static void
8239 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8240 {
8241 int k1_enable = sc->sc_nvm_k1_enabled;
8242
8243 /* XXX acquire semaphore */
8244
8245 if (link) {
8246 k1_enable = 0;
8247
8248 /* Link stall fix for link up */
8249 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8250 } else {
8251 /* Link stall fix for link down */
8252 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8253 }
8254
8255 wm_configure_k1_ich8lan(sc, k1_enable);
8256
8257 /* XXX release semaphore */
8258 }
8259
8260 static void
8261 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8262 {
8263 uint32_t reg;
8264
8265 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8266 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8267 reg | HV_KMRN_MDIO_SLOW);
8268 }
8269
8270 static void
8271 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8272 {
8273 uint32_t ctrl, ctrl_ext, tmp;
8274 uint16_t kmrn_reg;
8275
8276 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8277
8278 if (k1_enable)
8279 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8280 else
8281 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8282
8283 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8284
8285 delay(20);
8286
8287 ctrl = CSR_READ(sc, WMREG_CTRL);
8288 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8289
8290 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8291 tmp |= CTRL_FRCSPD;
8292
8293 CSR_WRITE(sc, WMREG_CTRL, tmp);
8294 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8295 delay(20);
8296
8297 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8298 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8299 delay(20);
8300 }
8301
8302 static void
8303 wm_smbustopci(struct wm_softc *sc)
8304 {
8305 uint32_t fwsm;
8306
8307 fwsm = CSR_READ(sc, WMREG_FWSM);
8308 if (((fwsm & FWSM_FW_VALID) == 0)
8309 && ((wm_check_reset_block(sc) == 0))) {
8310 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8311 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8312 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8313 delay(10);
8314 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8315 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8316 delay(50*1000);
8317
8318 /*
8319 * Gate automatic PHY configuration by hardware on non-managed
8320 * 82579
8321 */
8322 if (sc->sc_type == WM_T_PCH2)
8323 wm_gate_hw_phy_config_ich8lan(sc, 1);
8324 }
8325 }
8326
8327 static void
8328 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8329 {
8330 uint32_t gcr;
8331 pcireg_t ctrl2;
8332
8333 gcr = CSR_READ(sc, WMREG_GCR);
8334
8335 /* Only take action if timeout value is defaulted to 0 */
8336 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8337 goto out;
8338
8339 if ((gcr & GCR_CAP_VER2) == 0) {
8340 gcr |= GCR_CMPL_TMOUT_10MS;
8341 goto out;
8342 }
8343
8344 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8345 sc->sc_pcixe_capoff + PCIE_DCSR2);
8346 ctrl2 |= WM_PCIE_DCSR2_16MS;
8347 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8348 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8349
8350 out:
8351 /* Disable completion timeout resend */
8352 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8353
8354 CSR_WRITE(sc, WMREG_GCR, gcr);
8355 }
8356
8357 /* special case - for 82575 - need to do manual init ... */
8358 static void
8359 wm_reset_init_script_82575(struct wm_softc *sc)
8360 {
8361 /*
8362 * remark: this is untested code - we have no board without EEPROM
8363 * same setup as mentioned int the freeBSD driver for the i82575
8364 */
8365
8366 /* SerDes configuration via SERDESCTRL */
8367 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8368 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8369 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8370 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8371
8372 /* CCM configuration via CCMCTL register */
8373 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8374 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8375
8376 /* PCIe lanes configuration */
8377 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8378 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8379 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8380 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8381
8382 /* PCIe PLL Configuration */
8383 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8384 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8385 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8386 }
8387
8388 static void
8389 wm_init_manageability(struct wm_softc *sc)
8390 {
8391
8392 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8393 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8394 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8395
8396 /* disabl hardware interception of ARP */
8397 manc &= ~MANC_ARP_EN;
8398
8399 /* enable receiving management packets to the host */
8400 if (sc->sc_type >= WM_T_82571) {
8401 manc |= MANC_EN_MNG2HOST;
8402 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8403 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8404
8405 }
8406
8407 CSR_WRITE(sc, WMREG_MANC, manc);
8408 }
8409 }
8410
8411 static void
8412 wm_release_manageability(struct wm_softc *sc)
8413 {
8414
8415 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8416 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8417
8418 if (sc->sc_type >= WM_T_82571)
8419 manc &= ~MANC_EN_MNG2HOST;
8420
8421 CSR_WRITE(sc, WMREG_MANC, manc);
8422 }
8423 }
8424
8425 static void
8426 wm_get_wakeup(struct wm_softc *sc)
8427 {
8428
8429 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8430 switch (sc->sc_type) {
8431 case WM_T_82573:
8432 case WM_T_82583:
8433 sc->sc_flags |= WM_F_HAS_AMT;
8434 /* FALLTHROUGH */
8435 case WM_T_80003:
8436 case WM_T_82541:
8437 case WM_T_82547:
8438 case WM_T_82571:
8439 case WM_T_82572:
8440 case WM_T_82574:
8441 case WM_T_82575:
8442 case WM_T_82576:
8443 #if 0 /* XXX */
8444 case WM_T_82580:
8445 case WM_T_82580ER:
8446 case WM_T_I350:
8447 #endif
8448 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8449 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8450 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8451 break;
8452 case WM_T_ICH8:
8453 case WM_T_ICH9:
8454 case WM_T_ICH10:
8455 case WM_T_PCH:
8456 case WM_T_PCH2:
8457 case WM_T_PCH_LPT:
8458 sc->sc_flags |= WM_F_HAS_AMT;
8459 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8460 break;
8461 default:
8462 break;
8463 }
8464
8465 /* 1: HAS_MANAGE */
8466 if (wm_enable_mng_pass_thru(sc) != 0)
8467 sc->sc_flags |= WM_F_HAS_MANAGE;
8468
8469 #ifdef WM_DEBUG
8470 printf("\n");
8471 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8472 printf("HAS_AMT,");
8473 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8474 printf("ARC_SUBSYS_VALID,");
8475 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8476 printf("ASF_FIRMWARE_PRES,");
8477 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8478 printf("HAS_MANAGE,");
8479 printf("\n");
8480 #endif
8481 /*
8482 * Note that the WOL flags is set after the resetting of the eeprom
8483 * stuff
8484 */
8485 }
8486
8487 #ifdef WM_WOL
8488 /* WOL in the newer chipset interfaces (pchlan) */
8489 static void
8490 wm_enable_phy_wakeup(struct wm_softc *sc)
8491 {
8492 #if 0
8493 uint16_t preg;
8494
8495 /* Copy MAC RARs to PHY RARs */
8496
8497 /* Copy MAC MTA to PHY MTA */
8498
8499 /* Configure PHY Rx Control register */
8500
8501 /* Enable PHY wakeup in MAC register */
8502
8503 /* Configure and enable PHY wakeup in PHY registers */
8504
8505 /* Activate PHY wakeup */
8506
8507 /* XXX */
8508 #endif
8509 }
8510
8511 static void
8512 wm_enable_wakeup(struct wm_softc *sc)
8513 {
8514 uint32_t reg, pmreg;
8515 pcireg_t pmode;
8516
8517 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8518 &pmreg, NULL) == 0)
8519 return;
8520
8521 /* Advertise the wakeup capability */
8522 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8523 | CTRL_SWDPIN(3));
8524 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8525
8526 /* ICH workaround */
8527 switch (sc->sc_type) {
8528 case WM_T_ICH8:
8529 case WM_T_ICH9:
8530 case WM_T_ICH10:
8531 case WM_T_PCH:
8532 case WM_T_PCH2:
8533 case WM_T_PCH_LPT:
8534 /* Disable gig during WOL */
8535 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8536 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8537 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8538 if (sc->sc_type == WM_T_PCH)
8539 wm_gmii_reset(sc);
8540
8541 /* Power down workaround */
8542 if (sc->sc_phytype == WMPHY_82577) {
8543 struct mii_softc *child;
8544
8545 /* Assume that the PHY is copper */
8546 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8547 if (child->mii_mpd_rev <= 2)
8548 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8549 (768 << 5) | 25, 0x0444); /* magic num */
8550 }
8551 break;
8552 default:
8553 break;
8554 }
8555
8556 /* Keep the laser running on fiber adapters */
8557 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8558 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8559 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8560 reg |= CTRL_EXT_SWDPIN(3);
8561 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8562 }
8563
8564 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8565 #if 0 /* for the multicast packet */
8566 reg |= WUFC_MC;
8567 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8568 #endif
8569
8570 if (sc->sc_type == WM_T_PCH) {
8571 wm_enable_phy_wakeup(sc);
8572 } else {
8573 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8574 CSR_WRITE(sc, WMREG_WUFC, reg);
8575 }
8576
8577 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8578 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8579 || (sc->sc_type == WM_T_PCH2))
8580 && (sc->sc_phytype == WMPHY_IGP_3))
8581 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8582
8583 /* Request PME */
8584 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8585 #if 0
8586 /* Disable WOL */
8587 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8588 #else
8589 /* For WOL */
8590 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8591 #endif
8592 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8593 }
8594 #endif /* WM_WOL */
8595
8596 static bool
8597 wm_suspend(device_t self, const pmf_qual_t *qual)
8598 {
8599 struct wm_softc *sc = device_private(self);
8600
8601 wm_release_manageability(sc);
8602 wm_release_hw_control(sc);
8603 #ifdef WM_WOL
8604 wm_enable_wakeup(sc);
8605 #endif
8606
8607 return true;
8608 }
8609
8610 static bool
8611 wm_resume(device_t self, const pmf_qual_t *qual)
8612 {
8613 struct wm_softc *sc = device_private(self);
8614
8615 wm_init_manageability(sc);
8616
8617 return true;
8618 }
8619
8620 static void
8621 wm_set_eee_i350(struct wm_softc * sc)
8622 {
8623 uint32_t ipcnfg, eeer;
8624
8625 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8626 eeer = CSR_READ(sc, WMREG_EEER);
8627
8628 if ((sc->sc_flags & WM_F_EEE) != 0) {
8629 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8630 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8631 | EEER_LPI_FC);
8632 } else {
8633 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8634 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8635 | EEER_LPI_FC);
8636 }
8637
8638 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8639 CSR_WRITE(sc, WMREG_EEER, eeer);
8640 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8641 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8642 }
8643