if_wm.c revision 1.257 1 /* $NetBSD: if_wm.c,v 1.257 2013/06/19 10:38:51 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.257 2013/06/19 10:38:51 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 /*
145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155 #define WM_NTXSEGS 256
156 #define WM_IFQUEUELEN 256
157 #define WM_TXQUEUELEN_MAX 64
158 #define WM_TXQUEUELEN_MAX_82547 16
159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162 #define WM_NTXDESC_82542 256
163 #define WM_NTXDESC_82544 4096
164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171
172 /*
173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178 #define WM_NRXDESC 256
179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182
183 /*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188 struct wm_control_data_82544 {
189 /*
190 * The receive descriptors.
191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194 /*
195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them.
197 */
198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u;
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257
258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */
260
261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak;
275
276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */
278
279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled;
283
284 /*
285 * Software state for the transmit and receive descriptors.
286 */
287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291 /*
292 * Control data structures.
293 */
294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 krndsource_t rnd_source; /* random source */
383 };
384
385 #define WM_RXCHAIN_RESET(sc) \
386 do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \
390 } while (/*CONSTCOND*/0)
391
392 #define WM_RXCHAIN_LINK(sc, m) \
393 do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \
396 } while (/*CONSTCOND*/0)
397
398 #ifdef WM_EVENT_COUNTERS
399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401 #else
402 #define WM_EVCNT_INCR(ev) /* nothing */
403 #define WM_EVCNT_ADD(ev, val) /* nothing */
404 #endif
405
406 #define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS)
412
413 #define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436 #define WM_CDTXSYNC(sc, x, n, ops) \
437 do { \
438 int __x, __n; \
439 \
440 __x = (x); \
441 __n = (n); \
442 \
443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \
450 } \
451 \
452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455 } while (/*CONSTCOND*/0)
456
457 #define WM_CDRXSYNC(sc, x, ops) \
458 do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_INIT_RXDESC(sc, x) \
464 do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \
469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \
473 * \
474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \
482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495 } while (/*CONSTCOND*/0)
496
497 static void wm_start(struct ifnet *);
498 static void wm_nq_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ifflags_cb(struct ethercom *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_check_alt_mac_addr(struct wm_softc *);
514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void wm_tick(void *);
516
517 static void wm_set_filter(struct wm_softc *);
518 static void wm_set_vlan(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_gmii_82580_readreg(device_t, int, int);
545 static void wm_gmii_82580_writereg(device_t, int, int, int);
546 static int wm_sgmii_readreg(device_t, int, int);
547 static void wm_sgmii_writereg(device_t, int, int, int);
548
549 static void wm_gmii_statchg(struct ifnet *);
550
551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int wm_gmii_mediachange(struct ifnet *);
553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554
555 static int wm_kmrn_readreg(struct wm_softc *, int);
556 static void wm_kmrn_writereg(struct wm_softc *, int, int);
557
558 static void wm_set_spiaddrbits(struct wm_softc *);
559 static int wm_match(device_t, cfdata_t, void *);
560 static void wm_attach(device_t, device_t, void *);
561 static int wm_detach(device_t, int);
562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void wm_get_auto_rd_done(struct wm_softc *);
564 static void wm_lan_init_done(struct wm_softc *);
565 static void wm_get_cfg_done(struct wm_softc *);
566 static int wm_get_swsm_semaphore(struct wm_softc *);
567 static void wm_put_swsm_semaphore(struct wm_softc *);
568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void wm_put_swfwhw_semaphore(struct wm_softc *);
573
574 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
578 uint32_t, uint16_t *);
579 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void wm_82547_txfifo_stall(void *);
582 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int wm_check_mng_mode(struct wm_softc *);
584 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int wm_check_mng_mode_82574(struct wm_softc *);
586 static int wm_check_mng_mode_generic(struct wm_softc *);
587 static int wm_enable_mng_pass_thru(struct wm_softc *);
588 static int wm_check_reset_block(struct wm_softc *);
589 static void wm_get_hw_control(struct wm_softc *);
590 static int wm_check_for_link(struct wm_softc *);
591 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void wm_smbustopci(struct wm_softc *);
602 static void wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void wm_reset_init_script_82575(struct wm_softc *);
604 static void wm_release_manageability(struct wm_softc *);
605 static void wm_release_hw_control(struct wm_softc *);
606 static void wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void wm_enable_phy_wakeup(struct wm_softc *);
609 static void wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void wm_init_manageability(struct wm_softc *);
612 static void wm_set_eee_i350(struct wm_softc *);
613
614 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
615 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
616
617 /*
618 * Devices supported by this driver.
619 */
620 static const struct wm_product {
621 pci_vendor_id_t wmp_vendor;
622 pci_product_id_t wmp_product;
623 const char *wmp_name;
624 wm_chip_type wmp_type;
625 int wmp_flags;
626 #define WMP_F_1000X 0x01
627 #define WMP_F_1000T 0x02
628 #define WMP_F_SERDES 0x04
629 } wm_products[] = {
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
631 "Intel i82542 1000BASE-X Ethernet",
632 WM_T_82542_2_1, WMP_F_1000X },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
635 "Intel i82543GC 1000BASE-X Ethernet",
636 WM_T_82543, WMP_F_1000X },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
639 "Intel i82543GC 1000BASE-T Ethernet",
640 WM_T_82543, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
643 "Intel i82544EI 1000BASE-T Ethernet",
644 WM_T_82544, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
647 "Intel i82544EI 1000BASE-X Ethernet",
648 WM_T_82544, WMP_F_1000X },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
651 "Intel i82544GC 1000BASE-T Ethernet",
652 WM_T_82544, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
655 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
659 "Intel i82540EM 1000BASE-T Ethernet",
660 WM_T_82540, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
663 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
667 "Intel i82540EP 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
679 "Intel i82545EM 1000BASE-T Ethernet",
680 WM_T_82545, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
683 "Intel i82545GM 1000BASE-T Ethernet",
684 WM_T_82545_3, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
687 "Intel i82545GM 1000BASE-X Ethernet",
688 WM_T_82545_3, WMP_F_1000X },
689 #if 0
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
691 "Intel i82545GM Gigabit Ethernet (SERDES)",
692 WM_T_82545_3, WMP_F_SERDES },
693 #endif
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
695 "Intel i82546EB 1000BASE-T Ethernet",
696 WM_T_82546, WMP_F_1000T },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
703 "Intel i82545EM 1000BASE-X Ethernet",
704 WM_T_82545, WMP_F_1000X },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
707 "Intel i82546EB 1000BASE-X Ethernet",
708 WM_T_82546, WMP_F_1000X },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
711 "Intel i82546GB 1000BASE-T Ethernet",
712 WM_T_82546_3, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
715 "Intel i82546GB 1000BASE-X Ethernet",
716 WM_T_82546_3, WMP_F_1000X },
717 #if 0
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
719 "Intel i82546GB Gigabit Ethernet (SERDES)",
720 WM_T_82546_3, WMP_F_SERDES },
721 #endif
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
723 "i82546GB quad-port Gigabit Ethernet",
724 WM_T_82546_3, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
727 "i82546GB quad-port Gigabit Ethernet (KSP3)",
728 WM_T_82546_3, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
731 "Intel PRO/1000MT (82546GB)",
732 WM_T_82546_3, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
735 "Intel i82541EI 1000BASE-T Ethernet",
736 WM_T_82541, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
739 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
743 "Intel i82541EI Mobile 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
747 "Intel i82541ER 1000BASE-T Ethernet",
748 WM_T_82541_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
751 "Intel i82541GI 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
755 "Intel i82541GI Mobile 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
759 "Intel i82541PI 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
763 "Intel i82547EI 1000BASE-T Ethernet",
764 WM_T_82547, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
767 "Intel i82547EI Mobile 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
771 "Intel i82547GI 1000BASE-T Ethernet",
772 WM_T_82547_2, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
775 "Intel PRO/1000 PT (82571EB)",
776 WM_T_82571, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
779 "Intel PRO/1000 PF (82571EB)",
780 WM_T_82571, WMP_F_1000X },
781 #if 0
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
783 "Intel PRO/1000 PB (82571EB)",
784 WM_T_82571, WMP_F_SERDES },
785 #endif
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
787 "Intel PRO/1000 QT (82571EB)",
788 WM_T_82571, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
791 "Intel i82572EI 1000baseT Ethernet",
792 WM_T_82572, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
795 "Intel PRO/1000 PT Quad Port Server Adapter",
796 WM_T_82571, WMP_F_1000T, },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
799 "Intel i82572EI 1000baseX Ethernet",
800 WM_T_82572, WMP_F_1000X },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
803 "Intel i82572EI Gigabit Ethernet (SERDES)",
804 WM_T_82572, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
808 "Intel i82572EI 1000baseT Ethernet",
809 WM_T_82572, WMP_F_1000T },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
812 "Intel i82573E",
813 WM_T_82573, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
816 "Intel i82573E IAMT",
817 WM_T_82573, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
820 "Intel i82573L Gigabit Ethernet",
821 WM_T_82573, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
824 "Intel i82574L",
825 WM_T_82574, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
828 "Intel i82583V",
829 WM_T_82583, WMP_F_1000T },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
832 "i80003 dual 1000baseT Ethernet",
833 WM_T_80003, WMP_F_1000T },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
836 "i80003 dual 1000baseX Ethernet",
837 WM_T_80003, WMP_F_1000T },
838 #if 0
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
840 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
841 WM_T_80003, WMP_F_SERDES },
842 #endif
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
845 "Intel i80003 1000baseT Ethernet",
846 WM_T_80003, WMP_F_1000T },
847 #if 0
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
849 "Intel i80003 Gigabit Ethernet (SERDES)",
850 WM_T_80003, WMP_F_SERDES },
851 #endif
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
853 "Intel i82801H (M_AMT) LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
856 "Intel i82801H (AMT) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
859 "Intel i82801H LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
862 "Intel i82801H (IFE) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
865 "Intel i82801H (M) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
868 "Intel i82801H IFE (GT) LAN Controller",
869 WM_T_ICH8, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
871 "Intel i82801H IFE (G) LAN Controller",
872 WM_T_ICH8, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
874 "82801I (AMT) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
877 "82801I LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
880 "82801I (G) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
883 "82801I (GT) LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
886 "82801I (C) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
889 "82801I mobile LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
892 "82801I mobile (V) LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
895 "82801I mobile (AMT) LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
898 "82567LM-4 LAN Controller",
899 WM_T_ICH9, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
901 "82567V-3 LAN Controller",
902 WM_T_ICH9, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
904 "82567LM-2 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
907 "82567LF-2 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
910 "82567LM-3 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
913 "82567LF-3 LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
916 "82567V-2 LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
919 "82567V-3? LAN Controller",
920 WM_T_ICH10, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
922 "HANKSVILLE LAN Controller",
923 WM_T_ICH10, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
925 "PCH LAN (82577LM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
928 "PCH LAN (82577LC) Controller",
929 WM_T_PCH, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
931 "PCH LAN (82578DM) Controller",
932 WM_T_PCH, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
934 "PCH LAN (82578DC) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
937 "PCH2 LAN (82579LM) Controller",
938 WM_T_PCH2, WMP_F_1000T },
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
940 "PCH2 LAN (82579V) Controller",
941 WM_T_PCH2, WMP_F_1000T },
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
943 "82575EB dual-1000baseT Ethernet",
944 WM_T_82575, WMP_F_1000T },
945 #if 0
946 /*
947 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
948 * disabled for now ...
949 */
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
951 "82575EB dual-1000baseX Ethernet (SERDES)",
952 WM_T_82575, WMP_F_SERDES },
953 #endif
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
955 "82575GB quad-1000baseT Ethernet",
956 WM_T_82575, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
958 "82575GB quad-1000baseT Ethernet (PM)",
959 WM_T_82575, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
961 "82576 1000BaseT Ethernet",
962 WM_T_82576, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
964 "82576 1000BaseX Ethernet",
965 WM_T_82576, WMP_F_1000X },
966 #if 0
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
968 "82576 gigabit Ethernet (SERDES)",
969 WM_T_82576, WMP_F_SERDES },
970 #endif
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
972 "82576 quad-1000BaseT Ethernet",
973 WM_T_82576, WMP_F_1000T },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
975 "82576 gigabit Ethernet",
976 WM_T_82576, WMP_F_1000T },
977 #if 0
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
979 "82576 gigabit Ethernet (SERDES)",
980 WM_T_82576, WMP_F_SERDES },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
982 "82576 quad-gigabit Ethernet (SERDES)",
983 WM_T_82576, WMP_F_SERDES },
984 #endif
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
986 "82580 1000BaseT Ethernet",
987 WM_T_82580, WMP_F_1000T },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
989 "82580 1000BaseX Ethernet",
990 WM_T_82580, WMP_F_1000X },
991 #if 0
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
993 "82580 1000BaseT Ethernet (SERDES)",
994 WM_T_82580, WMP_F_SERDES },
995 #endif
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
997 "82580 gigabit Ethernet (SGMII)",
998 WM_T_82580, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1003 "82580 1000BaseT Ethernet",
1004 WM_T_82580ER, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1006 "82580 dual-1000BaseT Ethernet",
1007 WM_T_82580ER, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1009 "82580 quad-1000BaseX Ethernet",
1010 WM_T_82580, WMP_F_1000X },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1012 "I350 Gigabit Network Connection",
1013 WM_T_I350, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1015 "I350 Gigabit Fiber Network Connection",
1016 WM_T_I350, WMP_F_1000X },
1017 #if 0
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1019 "I350 Gigabit Backplane Connection",
1020 WM_T_I350, WMP_F_SERDES },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1022 "I350 Gigabit Connection",
1023 WM_T_I350, WMP_F_1000T },
1024 #endif
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1026 "I210-T1 Ethernet Server Adapter",
1027 WM_T_I210, WMP_F_1000T },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1029 "I210 Ethernet (Copper OEM)",
1030 WM_T_I210, WMP_F_1000T },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1032 "I210 Ethernet (Copper IT)",
1033 WM_T_I210, WMP_F_1000T },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1035 "I210 Gigabit Ethernet (Fiber)",
1036 WM_T_I210, WMP_F_1000X },
1037 #if 0
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1039 "I210 Gigabit Ethernet (SERDES)",
1040 WM_T_I210, WMP_F_SERDES },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1042 "I210 Gigabit Ethernet (SGMII)",
1043 WM_T_I210, WMP_F_SERDES },
1044 #endif
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1046 "I211 Ethernet (COPPER)",
1047 WM_T_I211, WMP_F_1000T },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1049 "I217 V Ethernet Connection",
1050 WM_T_PCH_LPT, WMP_F_1000T },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1052 "I217 LM Ethernet Connection",
1053 WM_T_PCH_LPT, WMP_F_1000T },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1055 "I218 V Ethernet Connection",
1056 WM_T_PCH_LPT, WMP_F_1000T },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1058 "I218 LM Ethernet Connection",
1059 WM_T_PCH_LPT, WMP_F_1000T },
1060 { 0, 0,
1061 NULL,
1062 0, 0 },
1063 };
1064
1065 #ifdef WM_EVENT_COUNTERS
1066 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1067 #endif /* WM_EVENT_COUNTERS */
1068
1069 #if 0 /* Not currently used */
1070 static inline uint32_t
1071 wm_io_read(struct wm_softc *sc, int reg)
1072 {
1073
1074 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1075 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1076 }
1077 #endif
1078
1079 static inline void
1080 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1081 {
1082
1083 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1084 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1085 }
1086
1087 static inline void
1088 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1089 uint32_t data)
1090 {
1091 uint32_t regval;
1092 int i;
1093
1094 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1095
1096 CSR_WRITE(sc, reg, regval);
1097
1098 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1099 delay(5);
1100 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1101 break;
1102 }
1103 if (i == SCTL_CTL_POLL_TIMEOUT) {
1104 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1105 device_xname(sc->sc_dev), reg);
1106 }
1107 }
1108
1109 static inline void
1110 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1111 {
1112 wa->wa_low = htole32(v & 0xffffffffU);
1113 if (sizeof(bus_addr_t) == 8)
1114 wa->wa_high = htole32((uint64_t) v >> 32);
1115 else
1116 wa->wa_high = 0;
1117 }
1118
1119 static void
1120 wm_set_spiaddrbits(struct wm_softc *sc)
1121 {
1122 uint32_t reg;
1123
1124 sc->sc_flags |= WM_F_EEPROM_SPI;
1125 reg = CSR_READ(sc, WMREG_EECD);
1126 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1127 }
1128
1129 static const struct wm_product *
1130 wm_lookup(const struct pci_attach_args *pa)
1131 {
1132 const struct wm_product *wmp;
1133
1134 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1135 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1136 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1137 return wmp;
1138 }
1139 return NULL;
1140 }
1141
1142 static int
1143 wm_match(device_t parent, cfdata_t cf, void *aux)
1144 {
1145 struct pci_attach_args *pa = aux;
1146
1147 if (wm_lookup(pa) != NULL)
1148 return 1;
1149
1150 return 0;
1151 }
1152
1153 static void
1154 wm_attach(device_t parent, device_t self, void *aux)
1155 {
1156 struct wm_softc *sc = device_private(self);
1157 struct pci_attach_args *pa = aux;
1158 prop_dictionary_t dict;
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160 pci_chipset_tag_t pc = pa->pa_pc;
1161 pci_intr_handle_t ih;
1162 const char *intrstr = NULL;
1163 const char *eetype, *xname;
1164 bus_space_tag_t memt;
1165 bus_space_handle_t memh;
1166 bus_size_t memsize;
1167 int memh_valid;
1168 int i, error;
1169 const struct wm_product *wmp;
1170 prop_data_t ea;
1171 prop_number_t pn;
1172 uint8_t enaddr[ETHER_ADDR_LEN];
1173 uint16_t cfg1, cfg2, swdpin, io3;
1174 pcireg_t preg, memtype;
1175 uint16_t eeprom_data, apme_mask;
1176 uint32_t reg;
1177
1178 sc->sc_dev = self;
1179 callout_init(&sc->sc_tick_ch, 0);
1180
1181 sc->sc_wmp = wmp = wm_lookup(pa);
1182 if (wmp == NULL) {
1183 printf("\n");
1184 panic("wm_attach: impossible");
1185 }
1186
1187 sc->sc_pc = pa->pa_pc;
1188 sc->sc_pcitag = pa->pa_tag;
1189
1190 if (pci_dma64_available(pa))
1191 sc->sc_dmat = pa->pa_dmat64;
1192 else
1193 sc->sc_dmat = pa->pa_dmat;
1194
1195 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1196 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1197
1198 sc->sc_type = wmp->wmp_type;
1199 if (sc->sc_type < WM_T_82543) {
1200 if (sc->sc_rev < 2) {
1201 aprint_error_dev(sc->sc_dev,
1202 "i82542 must be at least rev. 2\n");
1203 return;
1204 }
1205 if (sc->sc_rev < 3)
1206 sc->sc_type = WM_T_82542_2_0;
1207 }
1208
1209 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1210 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1211 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
1212 || (sc->sc_type == WM_T_I211))
1213 sc->sc_flags |= WM_F_NEWQUEUE;
1214
1215 /* Set device properties (mactype) */
1216 dict = device_properties(sc->sc_dev);
1217 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1218
1219 /*
1220 * Map the device. All devices support memory-mapped acccess,
1221 * and it is really required for normal operation.
1222 */
1223 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1224 switch (memtype) {
1225 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1226 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1227 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1228 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1229 break;
1230 default:
1231 memh_valid = 0;
1232 break;
1233 }
1234
1235 if (memh_valid) {
1236 sc->sc_st = memt;
1237 sc->sc_sh = memh;
1238 sc->sc_ss = memsize;
1239 } else {
1240 aprint_error_dev(sc->sc_dev,
1241 "unable to map device registers\n");
1242 return;
1243 }
1244
1245 wm_get_wakeup(sc);
1246
1247 /*
1248 * In addition, i82544 and later support I/O mapped indirect
1249 * register access. It is not desirable (nor supported in
1250 * this driver) to use it for normal operation, though it is
1251 * required to work around bugs in some chip versions.
1252 */
1253 if (sc->sc_type >= WM_T_82544) {
1254 /* First we have to find the I/O BAR. */
1255 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1256 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1257 if (memtype == PCI_MAPREG_TYPE_IO)
1258 break;
1259 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1260 PCI_MAPREG_MEM_TYPE_64BIT)
1261 i += 4; /* skip high bits, too */
1262 }
1263 if (i < PCI_MAPREG_END) {
1264 /*
1265 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1266 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1267 * It's no problem because newer chips has no this
1268 * bug.
1269 *
1270 * The i8254x doesn't apparently respond when the
1271 * I/O BAR is 0, which looks somewhat like it's not
1272 * been configured.
1273 */
1274 preg = pci_conf_read(pc, pa->pa_tag, i);
1275 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1276 aprint_error_dev(sc->sc_dev,
1277 "WARNING: I/O BAR at zero.\n");
1278 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1279 0, &sc->sc_iot, &sc->sc_ioh,
1280 NULL, &sc->sc_ios) == 0) {
1281 sc->sc_flags |= WM_F_IOH_VALID;
1282 } else {
1283 aprint_error_dev(sc->sc_dev,
1284 "WARNING: unable to map I/O space\n");
1285 }
1286 }
1287
1288 }
1289
1290 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1291 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1292 preg |= PCI_COMMAND_MASTER_ENABLE;
1293 if (sc->sc_type < WM_T_82542_2_1)
1294 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1295 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1296
1297 /* power up chip */
1298 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1299 NULL)) && error != EOPNOTSUPP) {
1300 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1301 return;
1302 }
1303
1304 /*
1305 * Map and establish our interrupt.
1306 */
1307 if (pci_intr_map(pa, &ih)) {
1308 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1309 return;
1310 }
1311 intrstr = pci_intr_string(pc, ih);
1312 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1313 if (sc->sc_ih == NULL) {
1314 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1315 if (intrstr != NULL)
1316 aprint_error(" at %s", intrstr);
1317 aprint_error("\n");
1318 return;
1319 }
1320 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1321
1322 /*
1323 * Check the function ID (unit number of the chip).
1324 */
1325 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1326 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1327 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1328 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1329 || (sc->sc_type == WM_T_I350))
1330 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1331 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1332 else
1333 sc->sc_funcid = 0;
1334
1335 /*
1336 * Determine a few things about the bus we're connected to.
1337 */
1338 if (sc->sc_type < WM_T_82543) {
1339 /* We don't really know the bus characteristics here. */
1340 sc->sc_bus_speed = 33;
1341 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1342 /*
1343 * CSA (Communication Streaming Architecture) is about as fast
1344 * a 32-bit 66MHz PCI Bus.
1345 */
1346 sc->sc_flags |= WM_F_CSA;
1347 sc->sc_bus_speed = 66;
1348 aprint_verbose_dev(sc->sc_dev,
1349 "Communication Streaming Architecture\n");
1350 if (sc->sc_type == WM_T_82547) {
1351 callout_init(&sc->sc_txfifo_ch, 0);
1352 callout_setfunc(&sc->sc_txfifo_ch,
1353 wm_82547_txfifo_stall, sc);
1354 aprint_verbose_dev(sc->sc_dev,
1355 "using 82547 Tx FIFO stall work-around\n");
1356 }
1357 } else if (sc->sc_type >= WM_T_82571) {
1358 sc->sc_flags |= WM_F_PCIE;
1359 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1360 && (sc->sc_type != WM_T_ICH10)
1361 && (sc->sc_type != WM_T_PCH)
1362 && (sc->sc_type != WM_T_PCH2)
1363 && (sc->sc_type != WM_T_PCH_LPT)) {
1364 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1365 /* ICH* and PCH* have no PCIe capability registers */
1366 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1367 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1368 NULL) == 0)
1369 aprint_error_dev(sc->sc_dev,
1370 "unable to find PCIe capability\n");
1371 }
1372 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1373 } else {
1374 reg = CSR_READ(sc, WMREG_STATUS);
1375 if (reg & STATUS_BUS64)
1376 sc->sc_flags |= WM_F_BUS64;
1377 if ((reg & STATUS_PCIX_MODE) != 0) {
1378 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1379
1380 sc->sc_flags |= WM_F_PCIX;
1381 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1382 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1383 aprint_error_dev(sc->sc_dev,
1384 "unable to find PCIX capability\n");
1385 else if (sc->sc_type != WM_T_82545_3 &&
1386 sc->sc_type != WM_T_82546_3) {
1387 /*
1388 * Work around a problem caused by the BIOS
1389 * setting the max memory read byte count
1390 * incorrectly.
1391 */
1392 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1393 sc->sc_pcixe_capoff + PCIX_CMD);
1394 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1395 sc->sc_pcixe_capoff + PCIX_STATUS);
1396
1397 bytecnt =
1398 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1399 PCIX_CMD_BYTECNT_SHIFT;
1400 maxb =
1401 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1402 PCIX_STATUS_MAXB_SHIFT;
1403 if (bytecnt > maxb) {
1404 aprint_verbose_dev(sc->sc_dev,
1405 "resetting PCI-X MMRBC: %d -> %d\n",
1406 512 << bytecnt, 512 << maxb);
1407 pcix_cmd = (pcix_cmd &
1408 ~PCIX_CMD_BYTECNT_MASK) |
1409 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1410 pci_conf_write(pa->pa_pc, pa->pa_tag,
1411 sc->sc_pcixe_capoff + PCIX_CMD,
1412 pcix_cmd);
1413 }
1414 }
1415 }
1416 /*
1417 * The quad port adapter is special; it has a PCIX-PCIX
1418 * bridge on the board, and can run the secondary bus at
1419 * a higher speed.
1420 */
1421 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1422 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1423 : 66;
1424 } else if (sc->sc_flags & WM_F_PCIX) {
1425 switch (reg & STATUS_PCIXSPD_MASK) {
1426 case STATUS_PCIXSPD_50_66:
1427 sc->sc_bus_speed = 66;
1428 break;
1429 case STATUS_PCIXSPD_66_100:
1430 sc->sc_bus_speed = 100;
1431 break;
1432 case STATUS_PCIXSPD_100_133:
1433 sc->sc_bus_speed = 133;
1434 break;
1435 default:
1436 aprint_error_dev(sc->sc_dev,
1437 "unknown PCIXSPD %d; assuming 66MHz\n",
1438 reg & STATUS_PCIXSPD_MASK);
1439 sc->sc_bus_speed = 66;
1440 break;
1441 }
1442 } else
1443 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1444 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1445 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1446 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1447 }
1448
1449 /*
1450 * Allocate the control data structures, and create and load the
1451 * DMA map for it.
1452 *
1453 * NOTE: All Tx descriptors must be in the same 4G segment of
1454 * memory. So must Rx descriptors. We simplify by allocating
1455 * both sets within the same 4G segment.
1456 */
1457 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1458 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1459 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1460 sizeof(struct wm_control_data_82542) :
1461 sizeof(struct wm_control_data_82544);
1462 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1463 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1464 &sc->sc_cd_rseg, 0)) != 0) {
1465 aprint_error_dev(sc->sc_dev,
1466 "unable to allocate control data, error = %d\n",
1467 error);
1468 goto fail_0;
1469 }
1470
1471 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1472 sc->sc_cd_rseg, sc->sc_cd_size,
1473 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1474 aprint_error_dev(sc->sc_dev,
1475 "unable to map control data, error = %d\n", error);
1476 goto fail_1;
1477 }
1478
1479 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1480 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1481 aprint_error_dev(sc->sc_dev,
1482 "unable to create control data DMA map, error = %d\n",
1483 error);
1484 goto fail_2;
1485 }
1486
1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1488 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1489 aprint_error_dev(sc->sc_dev,
1490 "unable to load control data DMA map, error = %d\n",
1491 error);
1492 goto fail_3;
1493 }
1494
1495 /*
1496 * Create the transmit buffer DMA maps.
1497 */
1498 WM_TXQUEUELEN(sc) =
1499 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1500 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1501 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1502 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1503 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1504 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1505 aprint_error_dev(sc->sc_dev,
1506 "unable to create Tx DMA map %d, error = %d\n",
1507 i, error);
1508 goto fail_4;
1509 }
1510 }
1511
1512 /*
1513 * Create the receive buffer DMA maps.
1514 */
1515 for (i = 0; i < WM_NRXDESC; i++) {
1516 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1517 MCLBYTES, 0, 0,
1518 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1519 aprint_error_dev(sc->sc_dev,
1520 "unable to create Rx DMA map %d error = %d\n",
1521 i, error);
1522 goto fail_5;
1523 }
1524 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1525 }
1526
1527 /* clear interesting stat counters */
1528 CSR_READ(sc, WMREG_COLC);
1529 CSR_READ(sc, WMREG_RXERRC);
1530
1531 /* get PHY control from SMBus to PCIe */
1532 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1533 || (sc->sc_type == WM_T_PCH_LPT))
1534 wm_smbustopci(sc);
1535
1536 /*
1537 * Reset the chip to a known state.
1538 */
1539 wm_reset(sc);
1540
1541 switch (sc->sc_type) {
1542 case WM_T_82571:
1543 case WM_T_82572:
1544 case WM_T_82573:
1545 case WM_T_82574:
1546 case WM_T_82583:
1547 case WM_T_80003:
1548 case WM_T_ICH8:
1549 case WM_T_ICH9:
1550 case WM_T_ICH10:
1551 case WM_T_PCH:
1552 case WM_T_PCH2:
1553 case WM_T_PCH_LPT:
1554 if (wm_check_mng_mode(sc) != 0)
1555 wm_get_hw_control(sc);
1556 break;
1557 default:
1558 break;
1559 }
1560
1561 /*
1562 * Get some information about the EEPROM.
1563 */
1564 switch (sc->sc_type) {
1565 case WM_T_82542_2_0:
1566 case WM_T_82542_2_1:
1567 case WM_T_82543:
1568 case WM_T_82544:
1569 /* Microwire */
1570 sc->sc_ee_addrbits = 6;
1571 break;
1572 case WM_T_82540:
1573 case WM_T_82545:
1574 case WM_T_82545_3:
1575 case WM_T_82546:
1576 case WM_T_82546_3:
1577 /* Microwire */
1578 reg = CSR_READ(sc, WMREG_EECD);
1579 if (reg & EECD_EE_SIZE)
1580 sc->sc_ee_addrbits = 8;
1581 else
1582 sc->sc_ee_addrbits = 6;
1583 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1584 break;
1585 case WM_T_82541:
1586 case WM_T_82541_2:
1587 case WM_T_82547:
1588 case WM_T_82547_2:
1589 reg = CSR_READ(sc, WMREG_EECD);
1590 if (reg & EECD_EE_TYPE) {
1591 /* SPI */
1592 wm_set_spiaddrbits(sc);
1593 } else
1594 /* Microwire */
1595 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1596 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1597 break;
1598 case WM_T_82571:
1599 case WM_T_82572:
1600 /* SPI */
1601 wm_set_spiaddrbits(sc);
1602 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1603 break;
1604 case WM_T_82573:
1605 case WM_T_82574:
1606 case WM_T_82583:
1607 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1608 sc->sc_flags |= WM_F_EEPROM_FLASH;
1609 else {
1610 /* SPI */
1611 wm_set_spiaddrbits(sc);
1612 }
1613 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1614 break;
1615 case WM_T_82575:
1616 case WM_T_82576:
1617 case WM_T_82580:
1618 case WM_T_82580ER:
1619 case WM_T_I350:
1620 case WM_T_80003:
1621 /* SPI */
1622 wm_set_spiaddrbits(sc);
1623 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1624 break;
1625 case WM_T_ICH8:
1626 case WM_T_ICH9:
1627 case WM_T_ICH10:
1628 case WM_T_PCH:
1629 case WM_T_PCH2:
1630 case WM_T_PCH_LPT:
1631 /* FLASH */
1632 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1633 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1634 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1635 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1636 aprint_error_dev(sc->sc_dev,
1637 "can't map FLASH registers\n");
1638 return;
1639 }
1640 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1641 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1642 ICH_FLASH_SECTOR_SIZE;
1643 sc->sc_ich8_flash_bank_size =
1644 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1645 sc->sc_ich8_flash_bank_size -=
1646 (reg & ICH_GFPREG_BASE_MASK);
1647 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1648 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1649 break;
1650 case WM_T_I210:
1651 case WM_T_I211:
1652 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1653 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1654 break;
1655 default:
1656 break;
1657 }
1658
1659 /*
1660 * Defer printing the EEPROM type until after verifying the checksum
1661 * This allows the EEPROM type to be printed correctly in the case
1662 * that no EEPROM is attached.
1663 */
1664 /*
1665 * Validate the EEPROM checksum. If the checksum fails, flag
1666 * this for later, so we can fail future reads from the EEPROM.
1667 */
1668 if (wm_validate_eeprom_checksum(sc)) {
1669 /*
1670 * Read twice again because some PCI-e parts fail the
1671 * first check due to the link being in sleep state.
1672 */
1673 if (wm_validate_eeprom_checksum(sc))
1674 sc->sc_flags |= WM_F_EEPROM_INVALID;
1675 }
1676
1677 /* Set device properties (macflags) */
1678 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1679
1680 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1681 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1682 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1683 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1684 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1685 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1686 } else {
1687 if (sc->sc_flags & WM_F_EEPROM_SPI)
1688 eetype = "SPI";
1689 else
1690 eetype = "MicroWire";
1691 aprint_verbose_dev(sc->sc_dev,
1692 "%u word (%d address bits) %s EEPROM\n",
1693 1U << sc->sc_ee_addrbits,
1694 sc->sc_ee_addrbits, eetype);
1695 }
1696
1697 /*
1698 * Read the Ethernet address from the EEPROM, if not first found
1699 * in device properties.
1700 */
1701 ea = prop_dictionary_get(dict, "mac-address");
1702 if (ea != NULL) {
1703 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1704 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1705 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1706 } else {
1707 if (wm_read_mac_addr(sc, enaddr) != 0) {
1708 aprint_error_dev(sc->sc_dev,
1709 "unable to read Ethernet address\n");
1710 return;
1711 }
1712 }
1713
1714 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1715 ether_sprintf(enaddr));
1716
1717 /*
1718 * Read the config info from the EEPROM, and set up various
1719 * bits in the control registers based on their contents.
1720 */
1721 pn = prop_dictionary_get(dict, "i82543-cfg1");
1722 if (pn != NULL) {
1723 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1724 cfg1 = (uint16_t) prop_number_integer_value(pn);
1725 } else {
1726 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1727 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1728 return;
1729 }
1730 }
1731
1732 pn = prop_dictionary_get(dict, "i82543-cfg2");
1733 if (pn != NULL) {
1734 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1735 cfg2 = (uint16_t) prop_number_integer_value(pn);
1736 } else {
1737 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1738 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1739 return;
1740 }
1741 }
1742
1743 /* check for WM_F_WOL */
1744 switch (sc->sc_type) {
1745 case WM_T_82542_2_0:
1746 case WM_T_82542_2_1:
1747 case WM_T_82543:
1748 /* dummy? */
1749 eeprom_data = 0;
1750 apme_mask = EEPROM_CFG3_APME;
1751 break;
1752 case WM_T_82544:
1753 apme_mask = EEPROM_CFG2_82544_APM_EN;
1754 eeprom_data = cfg2;
1755 break;
1756 case WM_T_82546:
1757 case WM_T_82546_3:
1758 case WM_T_82571:
1759 case WM_T_82572:
1760 case WM_T_82573:
1761 case WM_T_82574:
1762 case WM_T_82583:
1763 case WM_T_80003:
1764 default:
1765 apme_mask = EEPROM_CFG3_APME;
1766 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1767 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1768 break;
1769 case WM_T_82575:
1770 case WM_T_82576:
1771 case WM_T_82580:
1772 case WM_T_82580ER:
1773 case WM_T_I350:
1774 case WM_T_ICH8:
1775 case WM_T_ICH9:
1776 case WM_T_ICH10:
1777 case WM_T_PCH:
1778 case WM_T_PCH2:
1779 case WM_T_PCH_LPT:
1780 /* XXX The funcid should be checked on some devices */
1781 apme_mask = WUC_APME;
1782 eeprom_data = CSR_READ(sc, WMREG_WUC);
1783 break;
1784 }
1785
1786 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1787 if ((eeprom_data & apme_mask) != 0)
1788 sc->sc_flags |= WM_F_WOL;
1789 #ifdef WM_DEBUG
1790 if ((sc->sc_flags & WM_F_WOL) != 0)
1791 printf("WOL\n");
1792 #endif
1793
1794 /*
1795 * XXX need special handling for some multiple port cards
1796 * to disable a paticular port.
1797 */
1798
1799 if (sc->sc_type >= WM_T_82544) {
1800 pn = prop_dictionary_get(dict, "i82543-swdpin");
1801 if (pn != NULL) {
1802 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1803 swdpin = (uint16_t) prop_number_integer_value(pn);
1804 } else {
1805 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1806 aprint_error_dev(sc->sc_dev,
1807 "unable to read SWDPIN\n");
1808 return;
1809 }
1810 }
1811 }
1812
1813 if (cfg1 & EEPROM_CFG1_ILOS)
1814 sc->sc_ctrl |= CTRL_ILOS;
1815 if (sc->sc_type >= WM_T_82544) {
1816 sc->sc_ctrl |=
1817 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1818 CTRL_SWDPIO_SHIFT;
1819 sc->sc_ctrl |=
1820 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1821 CTRL_SWDPINS_SHIFT;
1822 } else {
1823 sc->sc_ctrl |=
1824 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1825 CTRL_SWDPIO_SHIFT;
1826 }
1827
1828 #if 0
1829 if (sc->sc_type >= WM_T_82544) {
1830 if (cfg1 & EEPROM_CFG1_IPS0)
1831 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1832 if (cfg1 & EEPROM_CFG1_IPS1)
1833 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1834 sc->sc_ctrl_ext |=
1835 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1836 CTRL_EXT_SWDPIO_SHIFT;
1837 sc->sc_ctrl_ext |=
1838 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1839 CTRL_EXT_SWDPINS_SHIFT;
1840 } else {
1841 sc->sc_ctrl_ext |=
1842 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1843 CTRL_EXT_SWDPIO_SHIFT;
1844 }
1845 #endif
1846
1847 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1848 #if 0
1849 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1850 #endif
1851
1852 /*
1853 * Set up some register offsets that are different between
1854 * the i82542 and the i82543 and later chips.
1855 */
1856 if (sc->sc_type < WM_T_82543) {
1857 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1858 sc->sc_tdt_reg = WMREG_OLD_TDT;
1859 } else {
1860 sc->sc_rdt_reg = WMREG_RDT;
1861 sc->sc_tdt_reg = WMREG_TDT;
1862 }
1863
1864 if (sc->sc_type == WM_T_PCH) {
1865 uint16_t val;
1866
1867 /* Save the NVM K1 bit setting */
1868 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1869
1870 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1871 sc->sc_nvm_k1_enabled = 1;
1872 else
1873 sc->sc_nvm_k1_enabled = 0;
1874 }
1875
1876 /*
1877 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1878 * media structures accordingly.
1879 */
1880 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1881 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1882 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1883 || sc->sc_type == WM_T_82573
1884 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1885 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1886 wm_gmii_mediainit(sc, wmp->wmp_product);
1887 } else if (sc->sc_type < WM_T_82543 ||
1888 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1889 if (wmp->wmp_flags & WMP_F_1000T)
1890 aprint_error_dev(sc->sc_dev,
1891 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1892 wm_tbi_mediainit(sc);
1893 } else {
1894 switch (sc->sc_type) {
1895 case WM_T_82575:
1896 case WM_T_82576:
1897 case WM_T_82580:
1898 case WM_T_82580ER:
1899 case WM_T_I350:
1900 case WM_T_I210:
1901 case WM_T_I211:
1902 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1903 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1904 case CTRL_EXT_LINK_MODE_SGMII:
1905 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1906 sc->sc_flags |= WM_F_SGMII;
1907 CSR_WRITE(sc, WMREG_CTRL_EXT,
1908 reg | CTRL_EXT_I2C_ENA);
1909 wm_gmii_mediainit(sc, wmp->wmp_product);
1910 break;
1911 case CTRL_EXT_LINK_MODE_1000KX:
1912 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1913 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1914 CSR_WRITE(sc, WMREG_CTRL_EXT,
1915 reg | CTRL_EXT_I2C_ENA);
1916 panic("not supported yet\n");
1917 break;
1918 case CTRL_EXT_LINK_MODE_GMII:
1919 default:
1920 CSR_WRITE(sc, WMREG_CTRL_EXT,
1921 reg & ~CTRL_EXT_I2C_ENA);
1922 wm_gmii_mediainit(sc, wmp->wmp_product);
1923 break;
1924 }
1925 break;
1926 default:
1927 if (wmp->wmp_flags & WMP_F_1000X)
1928 aprint_error_dev(sc->sc_dev,
1929 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1930 wm_gmii_mediainit(sc, wmp->wmp_product);
1931 }
1932 }
1933
1934 ifp = &sc->sc_ethercom.ec_if;
1935 xname = device_xname(sc->sc_dev);
1936 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1937 ifp->if_softc = sc;
1938 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1939 ifp->if_ioctl = wm_ioctl;
1940 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1941 ifp->if_start = wm_nq_start;
1942 else
1943 ifp->if_start = wm_start;
1944 ifp->if_watchdog = wm_watchdog;
1945 ifp->if_init = wm_init;
1946 ifp->if_stop = wm_stop;
1947 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1948 IFQ_SET_READY(&ifp->if_snd);
1949
1950 /* Check for jumbo frame */
1951 switch (sc->sc_type) {
1952 case WM_T_82573:
1953 /* XXX limited to 9234 if ASPM is disabled */
1954 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1955 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1956 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1957 break;
1958 case WM_T_82571:
1959 case WM_T_82572:
1960 case WM_T_82574:
1961 case WM_T_82575:
1962 case WM_T_82576:
1963 case WM_T_82580:
1964 case WM_T_82580ER:
1965 case WM_T_I350:
1966 case WM_T_I210:
1967 case WM_T_I211:
1968 case WM_T_80003:
1969 case WM_T_ICH9:
1970 case WM_T_ICH10:
1971 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1972 case WM_T_PCH_LPT:
1973 /* XXX limited to 9234 */
1974 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1975 break;
1976 case WM_T_PCH:
1977 /* XXX limited to 4096 */
1978 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1979 break;
1980 case WM_T_82542_2_0:
1981 case WM_T_82542_2_1:
1982 case WM_T_82583:
1983 case WM_T_ICH8:
1984 /* No support for jumbo frame */
1985 break;
1986 default:
1987 /* ETHER_MAX_LEN_JUMBO */
1988 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1989 break;
1990 }
1991
1992 /*
1993 * If we're a i82543 or greater, we can support VLANs.
1994 */
1995 if (sc->sc_type >= WM_T_82543)
1996 sc->sc_ethercom.ec_capabilities |=
1997 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1998
1999 /*
2000 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2001 * on i82543 and later.
2002 */
2003 if (sc->sc_type >= WM_T_82543) {
2004 ifp->if_capabilities |=
2005 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2006 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2007 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2008 IFCAP_CSUM_TCPv6_Tx |
2009 IFCAP_CSUM_UDPv6_Tx;
2010 }
2011
2012 /*
2013 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2014 *
2015 * 82541GI (8086:1076) ... no
2016 * 82572EI (8086:10b9) ... yes
2017 */
2018 if (sc->sc_type >= WM_T_82571) {
2019 ifp->if_capabilities |=
2020 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2021 }
2022
2023 /*
2024 * If we're a i82544 or greater (except i82547), we can do
2025 * TCP segmentation offload.
2026 */
2027 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2028 ifp->if_capabilities |= IFCAP_TSOv4;
2029 }
2030
2031 if (sc->sc_type >= WM_T_82571) {
2032 ifp->if_capabilities |= IFCAP_TSOv6;
2033 }
2034
2035 /*
2036 * Attach the interface.
2037 */
2038 if_attach(ifp);
2039 ether_ifattach(ifp, enaddr);
2040 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2041 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2042
2043 #ifdef WM_EVENT_COUNTERS
2044 /* Attach event counters. */
2045 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2046 NULL, xname, "txsstall");
2047 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2048 NULL, xname, "txdstall");
2049 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2050 NULL, xname, "txfifo_stall");
2051 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2052 NULL, xname, "txdw");
2053 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2054 NULL, xname, "txqe");
2055 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2056 NULL, xname, "rxintr");
2057 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2058 NULL, xname, "linkintr");
2059
2060 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2061 NULL, xname, "rxipsum");
2062 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2063 NULL, xname, "rxtusum");
2064 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2065 NULL, xname, "txipsum");
2066 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2067 NULL, xname, "txtusum");
2068 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2069 NULL, xname, "txtusum6");
2070
2071 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2072 NULL, xname, "txtso");
2073 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2074 NULL, xname, "txtso6");
2075 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2076 NULL, xname, "txtsopain");
2077
2078 for (i = 0; i < WM_NTXSEGS; i++) {
2079 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2080 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2081 NULL, xname, wm_txseg_evcnt_names[i]);
2082 }
2083
2084 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2085 NULL, xname, "txdrop");
2086
2087 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2088 NULL, xname, "tu");
2089
2090 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2091 NULL, xname, "tx_xoff");
2092 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2093 NULL, xname, "tx_xon");
2094 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2095 NULL, xname, "rx_xoff");
2096 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2097 NULL, xname, "rx_xon");
2098 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2099 NULL, xname, "rx_macctl");
2100 #endif /* WM_EVENT_COUNTERS */
2101
2102 if (pmf_device_register(self, wm_suspend, wm_resume))
2103 pmf_class_network_register(self, ifp);
2104 else
2105 aprint_error_dev(self, "couldn't establish power handler\n");
2106
2107 return;
2108
2109 /*
2110 * Free any resources we've allocated during the failed attach
2111 * attempt. Do this in reverse order and fall through.
2112 */
2113 fail_5:
2114 for (i = 0; i < WM_NRXDESC; i++) {
2115 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2116 bus_dmamap_destroy(sc->sc_dmat,
2117 sc->sc_rxsoft[i].rxs_dmamap);
2118 }
2119 fail_4:
2120 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2121 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2122 bus_dmamap_destroy(sc->sc_dmat,
2123 sc->sc_txsoft[i].txs_dmamap);
2124 }
2125 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2126 fail_3:
2127 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2128 fail_2:
2129 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2130 sc->sc_cd_size);
2131 fail_1:
2132 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2133 fail_0:
2134 return;
2135 }
2136
2137 static int
2138 wm_detach(device_t self, int flags __unused)
2139 {
2140 struct wm_softc *sc = device_private(self);
2141 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2142 int i, s;
2143
2144 s = splnet();
2145 /* Stop the interface. Callouts are stopped in it. */
2146 wm_stop(ifp, 1);
2147 splx(s);
2148
2149 pmf_device_deregister(self);
2150
2151 /* Tell the firmware about the release */
2152 wm_release_manageability(sc);
2153 wm_release_hw_control(sc);
2154
2155 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2156
2157 /* Delete all remaining media. */
2158 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2159
2160 ether_ifdetach(ifp);
2161 if_detach(ifp);
2162
2163
2164 /* Unload RX dmamaps and free mbufs */
2165 wm_rxdrain(sc);
2166
2167 /* Free dmamap. It's the same as the end of the wm_attach() function */
2168 for (i = 0; i < WM_NRXDESC; i++) {
2169 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2170 bus_dmamap_destroy(sc->sc_dmat,
2171 sc->sc_rxsoft[i].rxs_dmamap);
2172 }
2173 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2174 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2175 bus_dmamap_destroy(sc->sc_dmat,
2176 sc->sc_txsoft[i].txs_dmamap);
2177 }
2178 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2179 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2180 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2181 sc->sc_cd_size);
2182 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2183
2184 /* Disestablish the interrupt handler */
2185 if (sc->sc_ih != NULL) {
2186 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2187 sc->sc_ih = NULL;
2188 }
2189
2190 /* Unmap the registers */
2191 if (sc->sc_ss) {
2192 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2193 sc->sc_ss = 0;
2194 }
2195
2196 if (sc->sc_ios) {
2197 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2198 sc->sc_ios = 0;
2199 }
2200
2201 return 0;
2202 }
2203
2204 /*
2205 * wm_tx_offload:
2206 *
2207 * Set up TCP/IP checksumming parameters for the
2208 * specified packet.
2209 */
2210 static int
2211 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2212 uint8_t *fieldsp)
2213 {
2214 struct mbuf *m0 = txs->txs_mbuf;
2215 struct livengood_tcpip_ctxdesc *t;
2216 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2217 uint32_t ipcse;
2218 struct ether_header *eh;
2219 int offset, iphl;
2220 uint8_t fields;
2221
2222 /*
2223 * XXX It would be nice if the mbuf pkthdr had offset
2224 * fields for the protocol headers.
2225 */
2226
2227 eh = mtod(m0, struct ether_header *);
2228 switch (htons(eh->ether_type)) {
2229 case ETHERTYPE_IP:
2230 case ETHERTYPE_IPV6:
2231 offset = ETHER_HDR_LEN;
2232 break;
2233
2234 case ETHERTYPE_VLAN:
2235 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2236 break;
2237
2238 default:
2239 /*
2240 * Don't support this protocol or encapsulation.
2241 */
2242 *fieldsp = 0;
2243 *cmdp = 0;
2244 return 0;
2245 }
2246
2247 if ((m0->m_pkthdr.csum_flags &
2248 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2249 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2250 } else {
2251 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2252 }
2253 ipcse = offset + iphl - 1;
2254
2255 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2256 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2257 seg = 0;
2258 fields = 0;
2259
2260 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2261 int hlen = offset + iphl;
2262 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2263
2264 if (__predict_false(m0->m_len <
2265 (hlen + sizeof(struct tcphdr)))) {
2266 /*
2267 * TCP/IP headers are not in the first mbuf; we need
2268 * to do this the slow and painful way. Let's just
2269 * hope this doesn't happen very often.
2270 */
2271 struct tcphdr th;
2272
2273 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2274
2275 m_copydata(m0, hlen, sizeof(th), &th);
2276 if (v4) {
2277 struct ip ip;
2278
2279 m_copydata(m0, offset, sizeof(ip), &ip);
2280 ip.ip_len = 0;
2281 m_copyback(m0,
2282 offset + offsetof(struct ip, ip_len),
2283 sizeof(ip.ip_len), &ip.ip_len);
2284 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2285 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2286 } else {
2287 struct ip6_hdr ip6;
2288
2289 m_copydata(m0, offset, sizeof(ip6), &ip6);
2290 ip6.ip6_plen = 0;
2291 m_copyback(m0,
2292 offset + offsetof(struct ip6_hdr, ip6_plen),
2293 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2294 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2295 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2296 }
2297 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2298 sizeof(th.th_sum), &th.th_sum);
2299
2300 hlen += th.th_off << 2;
2301 } else {
2302 /*
2303 * TCP/IP headers are in the first mbuf; we can do
2304 * this the easy way.
2305 */
2306 struct tcphdr *th;
2307
2308 if (v4) {
2309 struct ip *ip =
2310 (void *)(mtod(m0, char *) + offset);
2311 th = (void *)(mtod(m0, char *) + hlen);
2312
2313 ip->ip_len = 0;
2314 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2315 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2316 } else {
2317 struct ip6_hdr *ip6 =
2318 (void *)(mtod(m0, char *) + offset);
2319 th = (void *)(mtod(m0, char *) + hlen);
2320
2321 ip6->ip6_plen = 0;
2322 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2323 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2324 }
2325 hlen += th->th_off << 2;
2326 }
2327
2328 if (v4) {
2329 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2330 cmdlen |= WTX_TCPIP_CMD_IP;
2331 } else {
2332 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2333 ipcse = 0;
2334 }
2335 cmd |= WTX_TCPIP_CMD_TSE;
2336 cmdlen |= WTX_TCPIP_CMD_TSE |
2337 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2338 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2339 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2340 }
2341
2342 /*
2343 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2344 * offload feature, if we load the context descriptor, we
2345 * MUST provide valid values for IPCSS and TUCSS fields.
2346 */
2347
2348 ipcs = WTX_TCPIP_IPCSS(offset) |
2349 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2350 WTX_TCPIP_IPCSE(ipcse);
2351 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2352 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2353 fields |= WTX_IXSM;
2354 }
2355
2356 offset += iphl;
2357
2358 if (m0->m_pkthdr.csum_flags &
2359 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2360 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2361 fields |= WTX_TXSM;
2362 tucs = WTX_TCPIP_TUCSS(offset) |
2363 WTX_TCPIP_TUCSO(offset +
2364 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2365 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2366 } else if ((m0->m_pkthdr.csum_flags &
2367 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2368 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2369 fields |= WTX_TXSM;
2370 tucs = WTX_TCPIP_TUCSS(offset) |
2371 WTX_TCPIP_TUCSO(offset +
2372 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2373 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2374 } else {
2375 /* Just initialize it to a valid TCP context. */
2376 tucs = WTX_TCPIP_TUCSS(offset) |
2377 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2378 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2379 }
2380
2381 /* Fill in the context descriptor. */
2382 t = (struct livengood_tcpip_ctxdesc *)
2383 &sc->sc_txdescs[sc->sc_txnext];
2384 t->tcpip_ipcs = htole32(ipcs);
2385 t->tcpip_tucs = htole32(tucs);
2386 t->tcpip_cmdlen = htole32(cmdlen);
2387 t->tcpip_seg = htole32(seg);
2388 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2389
2390 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2391 txs->txs_ndesc++;
2392
2393 *cmdp = cmd;
2394 *fieldsp = fields;
2395
2396 return 0;
2397 }
2398
2399 static void
2400 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2401 {
2402 struct mbuf *m;
2403 int i;
2404
2405 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2406 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2407 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2408 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2409 m->m_data, m->m_len, m->m_flags);
2410 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2411 i, i == 1 ? "" : "s");
2412 }
2413
2414 /*
2415 * wm_82547_txfifo_stall:
2416 *
2417 * Callout used to wait for the 82547 Tx FIFO to drain,
2418 * reset the FIFO pointers, and restart packet transmission.
2419 */
2420 static void
2421 wm_82547_txfifo_stall(void *arg)
2422 {
2423 struct wm_softc *sc = arg;
2424 int s;
2425
2426 s = splnet();
2427
2428 if (sc->sc_txfifo_stall) {
2429 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2430 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2431 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2432 /*
2433 * Packets have drained. Stop transmitter, reset
2434 * FIFO pointers, restart transmitter, and kick
2435 * the packet queue.
2436 */
2437 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2438 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2439 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2440 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2441 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2442 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2443 CSR_WRITE(sc, WMREG_TCTL, tctl);
2444 CSR_WRITE_FLUSH(sc);
2445
2446 sc->sc_txfifo_head = 0;
2447 sc->sc_txfifo_stall = 0;
2448 wm_start(&sc->sc_ethercom.ec_if);
2449 } else {
2450 /*
2451 * Still waiting for packets to drain; try again in
2452 * another tick.
2453 */
2454 callout_schedule(&sc->sc_txfifo_ch, 1);
2455 }
2456 }
2457
2458 splx(s);
2459 }
2460
2461 static void
2462 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2463 {
2464 uint32_t reg;
2465
2466 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2467
2468 if (on != 0)
2469 reg |= EXTCNFCTR_GATE_PHY_CFG;
2470 else
2471 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2472
2473 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2474 }
2475
2476 /*
2477 * wm_82547_txfifo_bugchk:
2478 *
2479 * Check for bug condition in the 82547 Tx FIFO. We need to
2480 * prevent enqueueing a packet that would wrap around the end
2481 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2482 *
2483 * We do this by checking the amount of space before the end
2484 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2485 * the Tx FIFO, wait for all remaining packets to drain, reset
2486 * the internal FIFO pointers to the beginning, and restart
2487 * transmission on the interface.
2488 */
2489 #define WM_FIFO_HDR 0x10
2490 #define WM_82547_PAD_LEN 0x3e0
2491 static int
2492 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2493 {
2494 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2495 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2496
2497 /* Just return if already stalled. */
2498 if (sc->sc_txfifo_stall)
2499 return 1;
2500
2501 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2502 /* Stall only occurs in half-duplex mode. */
2503 goto send_packet;
2504 }
2505
2506 if (len >= WM_82547_PAD_LEN + space) {
2507 sc->sc_txfifo_stall = 1;
2508 callout_schedule(&sc->sc_txfifo_ch, 1);
2509 return 1;
2510 }
2511
2512 send_packet:
2513 sc->sc_txfifo_head += len;
2514 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2515 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2516
2517 return 0;
2518 }
2519
2520 /*
2521 * wm_start: [ifnet interface function]
2522 *
2523 * Start packet transmission on the interface.
2524 */
2525 static void
2526 wm_start(struct ifnet *ifp)
2527 {
2528 struct wm_softc *sc = ifp->if_softc;
2529 struct mbuf *m0;
2530 struct m_tag *mtag;
2531 struct wm_txsoft *txs;
2532 bus_dmamap_t dmamap;
2533 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2534 bus_addr_t curaddr;
2535 bus_size_t seglen, curlen;
2536 uint32_t cksumcmd;
2537 uint8_t cksumfields;
2538
2539 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2540 return;
2541
2542 /*
2543 * Remember the previous number of free descriptors.
2544 */
2545 ofree = sc->sc_txfree;
2546
2547 /*
2548 * Loop through the send queue, setting up transmit descriptors
2549 * until we drain the queue, or use up all available transmit
2550 * descriptors.
2551 */
2552 for (;;) {
2553 /* Grab a packet off the queue. */
2554 IFQ_POLL(&ifp->if_snd, m0);
2555 if (m0 == NULL)
2556 break;
2557
2558 DPRINTF(WM_DEBUG_TX,
2559 ("%s: TX: have packet to transmit: %p\n",
2560 device_xname(sc->sc_dev), m0));
2561
2562 /* Get a work queue entry. */
2563 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2564 wm_txintr(sc);
2565 if (sc->sc_txsfree == 0) {
2566 DPRINTF(WM_DEBUG_TX,
2567 ("%s: TX: no free job descriptors\n",
2568 device_xname(sc->sc_dev)));
2569 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2570 break;
2571 }
2572 }
2573
2574 txs = &sc->sc_txsoft[sc->sc_txsnext];
2575 dmamap = txs->txs_dmamap;
2576
2577 use_tso = (m0->m_pkthdr.csum_flags &
2578 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2579
2580 /*
2581 * So says the Linux driver:
2582 * The controller does a simple calculation to make sure
2583 * there is enough room in the FIFO before initiating the
2584 * DMA for each buffer. The calc is:
2585 * 4 = ceil(buffer len / MSS)
2586 * To make sure we don't overrun the FIFO, adjust the max
2587 * buffer len if the MSS drops.
2588 */
2589 dmamap->dm_maxsegsz =
2590 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2591 ? m0->m_pkthdr.segsz << 2
2592 : WTX_MAX_LEN;
2593
2594 /*
2595 * Load the DMA map. If this fails, the packet either
2596 * didn't fit in the allotted number of segments, or we
2597 * were short on resources. For the too-many-segments
2598 * case, we simply report an error and drop the packet,
2599 * since we can't sanely copy a jumbo packet to a single
2600 * buffer.
2601 */
2602 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2603 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2604 if (error) {
2605 if (error == EFBIG) {
2606 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2607 log(LOG_ERR, "%s: Tx packet consumes too many "
2608 "DMA segments, dropping...\n",
2609 device_xname(sc->sc_dev));
2610 IFQ_DEQUEUE(&ifp->if_snd, m0);
2611 wm_dump_mbuf_chain(sc, m0);
2612 m_freem(m0);
2613 continue;
2614 }
2615 /*
2616 * Short on resources, just stop for now.
2617 */
2618 DPRINTF(WM_DEBUG_TX,
2619 ("%s: TX: dmamap load failed: %d\n",
2620 device_xname(sc->sc_dev), error));
2621 break;
2622 }
2623
2624 segs_needed = dmamap->dm_nsegs;
2625 if (use_tso) {
2626 /* For sentinel descriptor; see below. */
2627 segs_needed++;
2628 }
2629
2630 /*
2631 * Ensure we have enough descriptors free to describe
2632 * the packet. Note, we always reserve one descriptor
2633 * at the end of the ring due to the semantics of the
2634 * TDT register, plus one more in the event we need
2635 * to load offload context.
2636 */
2637 if (segs_needed > sc->sc_txfree - 2) {
2638 /*
2639 * Not enough free descriptors to transmit this
2640 * packet. We haven't committed anything yet,
2641 * so just unload the DMA map, put the packet
2642 * pack on the queue, and punt. Notify the upper
2643 * layer that there are no more slots left.
2644 */
2645 DPRINTF(WM_DEBUG_TX,
2646 ("%s: TX: need %d (%d) descriptors, have %d\n",
2647 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2648 segs_needed, sc->sc_txfree - 1));
2649 ifp->if_flags |= IFF_OACTIVE;
2650 bus_dmamap_unload(sc->sc_dmat, dmamap);
2651 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2652 break;
2653 }
2654
2655 /*
2656 * Check for 82547 Tx FIFO bug. We need to do this
2657 * once we know we can transmit the packet, since we
2658 * do some internal FIFO space accounting here.
2659 */
2660 if (sc->sc_type == WM_T_82547 &&
2661 wm_82547_txfifo_bugchk(sc, m0)) {
2662 DPRINTF(WM_DEBUG_TX,
2663 ("%s: TX: 82547 Tx FIFO bug detected\n",
2664 device_xname(sc->sc_dev)));
2665 ifp->if_flags |= IFF_OACTIVE;
2666 bus_dmamap_unload(sc->sc_dmat, dmamap);
2667 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2668 break;
2669 }
2670
2671 IFQ_DEQUEUE(&ifp->if_snd, m0);
2672
2673 /*
2674 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2675 */
2676
2677 DPRINTF(WM_DEBUG_TX,
2678 ("%s: TX: packet has %d (%d) DMA segments\n",
2679 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2680
2681 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2682
2683 /*
2684 * Store a pointer to the packet so that we can free it
2685 * later.
2686 *
2687 * Initially, we consider the number of descriptors the
2688 * packet uses the number of DMA segments. This may be
2689 * incremented by 1 if we do checksum offload (a descriptor
2690 * is used to set the checksum context).
2691 */
2692 txs->txs_mbuf = m0;
2693 txs->txs_firstdesc = sc->sc_txnext;
2694 txs->txs_ndesc = segs_needed;
2695
2696 /* Set up offload parameters for this packet. */
2697 if (m0->m_pkthdr.csum_flags &
2698 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2699 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2700 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2701 if (wm_tx_offload(sc, txs, &cksumcmd,
2702 &cksumfields) != 0) {
2703 /* Error message already displayed. */
2704 bus_dmamap_unload(sc->sc_dmat, dmamap);
2705 continue;
2706 }
2707 } else {
2708 cksumcmd = 0;
2709 cksumfields = 0;
2710 }
2711
2712 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2713
2714 /* Sync the DMA map. */
2715 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2716 BUS_DMASYNC_PREWRITE);
2717
2718 /*
2719 * Initialize the transmit descriptor.
2720 */
2721 for (nexttx = sc->sc_txnext, seg = 0;
2722 seg < dmamap->dm_nsegs; seg++) {
2723 for (seglen = dmamap->dm_segs[seg].ds_len,
2724 curaddr = dmamap->dm_segs[seg].ds_addr;
2725 seglen != 0;
2726 curaddr += curlen, seglen -= curlen,
2727 nexttx = WM_NEXTTX(sc, nexttx)) {
2728 curlen = seglen;
2729
2730 /*
2731 * So says the Linux driver:
2732 * Work around for premature descriptor
2733 * write-backs in TSO mode. Append a
2734 * 4-byte sentinel descriptor.
2735 */
2736 if (use_tso &&
2737 seg == dmamap->dm_nsegs - 1 &&
2738 curlen > 8)
2739 curlen -= 4;
2740
2741 wm_set_dma_addr(
2742 &sc->sc_txdescs[nexttx].wtx_addr,
2743 curaddr);
2744 sc->sc_txdescs[nexttx].wtx_cmdlen =
2745 htole32(cksumcmd | curlen);
2746 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2747 0;
2748 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2749 cksumfields;
2750 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2751 lasttx = nexttx;
2752
2753 DPRINTF(WM_DEBUG_TX,
2754 ("%s: TX: desc %d: low %#" PRIx64 ", "
2755 "len %#04zx\n",
2756 device_xname(sc->sc_dev), nexttx,
2757 (uint64_t)curaddr, curlen));
2758 }
2759 }
2760
2761 KASSERT(lasttx != -1);
2762
2763 /*
2764 * Set up the command byte on the last descriptor of
2765 * the packet. If we're in the interrupt delay window,
2766 * delay the interrupt.
2767 */
2768 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2769 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2770
2771 /*
2772 * If VLANs are enabled and the packet has a VLAN tag, set
2773 * up the descriptor to encapsulate the packet for us.
2774 *
2775 * This is only valid on the last descriptor of the packet.
2776 */
2777 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2778 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2779 htole32(WTX_CMD_VLE);
2780 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2781 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2782 }
2783
2784 txs->txs_lastdesc = lasttx;
2785
2786 DPRINTF(WM_DEBUG_TX,
2787 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2788 device_xname(sc->sc_dev),
2789 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2790
2791 /* Sync the descriptors we're using. */
2792 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2793 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2794
2795 /* Give the packet to the chip. */
2796 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2797
2798 DPRINTF(WM_DEBUG_TX,
2799 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2800
2801 DPRINTF(WM_DEBUG_TX,
2802 ("%s: TX: finished transmitting packet, job %d\n",
2803 device_xname(sc->sc_dev), sc->sc_txsnext));
2804
2805 /* Advance the tx pointer. */
2806 sc->sc_txfree -= txs->txs_ndesc;
2807 sc->sc_txnext = nexttx;
2808
2809 sc->sc_txsfree--;
2810 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2811
2812 /* Pass the packet to any BPF listeners. */
2813 bpf_mtap(ifp, m0);
2814 }
2815
2816 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2817 /* No more slots; notify upper layer. */
2818 ifp->if_flags |= IFF_OACTIVE;
2819 }
2820
2821 if (sc->sc_txfree != ofree) {
2822 /* Set a watchdog timer in case the chip flakes out. */
2823 ifp->if_timer = 5;
2824 }
2825 }
2826
2827 /*
2828 * wm_nq_tx_offload:
2829 *
2830 * Set up TCP/IP checksumming parameters for the
2831 * specified packet, for NEWQUEUE devices
2832 */
2833 static int
2834 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2835 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2836 {
2837 struct mbuf *m0 = txs->txs_mbuf;
2838 struct m_tag *mtag;
2839 uint32_t vl_len, mssidx, cmdc;
2840 struct ether_header *eh;
2841 int offset, iphl;
2842
2843 /*
2844 * XXX It would be nice if the mbuf pkthdr had offset
2845 * fields for the protocol headers.
2846 */
2847 *cmdlenp = 0;
2848 *fieldsp = 0;
2849
2850 eh = mtod(m0, struct ether_header *);
2851 switch (htons(eh->ether_type)) {
2852 case ETHERTYPE_IP:
2853 case ETHERTYPE_IPV6:
2854 offset = ETHER_HDR_LEN;
2855 break;
2856
2857 case ETHERTYPE_VLAN:
2858 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2859 break;
2860
2861 default:
2862 /*
2863 * Don't support this protocol or encapsulation.
2864 */
2865 *do_csum = false;
2866 return 0;
2867 }
2868 *do_csum = true;
2869 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2870 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2871
2872 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2873 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2874
2875 if ((m0->m_pkthdr.csum_flags &
2876 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2877 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2878 } else {
2879 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2880 }
2881 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2882 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2883
2884 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2885 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2886 << NQTXC_VLLEN_VLAN_SHIFT);
2887 *cmdlenp |= NQTX_CMD_VLE;
2888 }
2889
2890 mssidx = 0;
2891
2892 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2893 int hlen = offset + iphl;
2894 int tcp_hlen;
2895 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2896
2897 if (__predict_false(m0->m_len <
2898 (hlen + sizeof(struct tcphdr)))) {
2899 /*
2900 * TCP/IP headers are not in the first mbuf; we need
2901 * to do this the slow and painful way. Let's just
2902 * hope this doesn't happen very often.
2903 */
2904 struct tcphdr th;
2905
2906 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2907
2908 m_copydata(m0, hlen, sizeof(th), &th);
2909 if (v4) {
2910 struct ip ip;
2911
2912 m_copydata(m0, offset, sizeof(ip), &ip);
2913 ip.ip_len = 0;
2914 m_copyback(m0,
2915 offset + offsetof(struct ip, ip_len),
2916 sizeof(ip.ip_len), &ip.ip_len);
2917 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2918 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2919 } else {
2920 struct ip6_hdr ip6;
2921
2922 m_copydata(m0, offset, sizeof(ip6), &ip6);
2923 ip6.ip6_plen = 0;
2924 m_copyback(m0,
2925 offset + offsetof(struct ip6_hdr, ip6_plen),
2926 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2927 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2928 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2929 }
2930 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2931 sizeof(th.th_sum), &th.th_sum);
2932
2933 tcp_hlen = th.th_off << 2;
2934 } else {
2935 /*
2936 * TCP/IP headers are in the first mbuf; we can do
2937 * this the easy way.
2938 */
2939 struct tcphdr *th;
2940
2941 if (v4) {
2942 struct ip *ip =
2943 (void *)(mtod(m0, char *) + offset);
2944 th = (void *)(mtod(m0, char *) + hlen);
2945
2946 ip->ip_len = 0;
2947 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2948 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2949 } else {
2950 struct ip6_hdr *ip6 =
2951 (void *)(mtod(m0, char *) + offset);
2952 th = (void *)(mtod(m0, char *) + hlen);
2953
2954 ip6->ip6_plen = 0;
2955 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2956 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2957 }
2958 tcp_hlen = th->th_off << 2;
2959 }
2960 hlen += tcp_hlen;
2961 *cmdlenp |= NQTX_CMD_TSE;
2962
2963 if (v4) {
2964 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2965 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2966 } else {
2967 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2968 *fieldsp |= NQTXD_FIELDS_TUXSM;
2969 }
2970 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2971 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2972 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2973 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2974 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2975 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2976 } else {
2977 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2978 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2979 }
2980
2981 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2982 *fieldsp |= NQTXD_FIELDS_IXSM;
2983 cmdc |= NQTXC_CMD_IP4;
2984 }
2985
2986 if (m0->m_pkthdr.csum_flags &
2987 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2988 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2989 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2990 cmdc |= NQTXC_CMD_TCP;
2991 } else {
2992 cmdc |= NQTXC_CMD_UDP;
2993 }
2994 cmdc |= NQTXC_CMD_IP4;
2995 *fieldsp |= NQTXD_FIELDS_TUXSM;
2996 }
2997 if (m0->m_pkthdr.csum_flags &
2998 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2999 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3000 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3001 cmdc |= NQTXC_CMD_TCP;
3002 } else {
3003 cmdc |= NQTXC_CMD_UDP;
3004 }
3005 cmdc |= NQTXC_CMD_IP6;
3006 *fieldsp |= NQTXD_FIELDS_TUXSM;
3007 }
3008
3009 /* Fill in the context descriptor. */
3010 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3011 htole32(vl_len);
3012 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3013 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3014 htole32(cmdc);
3015 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3016 htole32(mssidx);
3017 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3018 DPRINTF(WM_DEBUG_TX,
3019 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3020 sc->sc_txnext, 0, vl_len));
3021 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3022 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3023 txs->txs_ndesc++;
3024 return 0;
3025 }
3026
3027 /*
3028 * wm_nq_start: [ifnet interface function]
3029 *
3030 * Start packet transmission on the interface for NEWQUEUE devices
3031 */
3032 static void
3033 wm_nq_start(struct ifnet *ifp)
3034 {
3035 struct wm_softc *sc = ifp->if_softc;
3036 struct mbuf *m0;
3037 struct m_tag *mtag;
3038 struct wm_txsoft *txs;
3039 bus_dmamap_t dmamap;
3040 int error, nexttx, lasttx = -1, seg, segs_needed;
3041 bool do_csum, sent;
3042
3043 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3044 return;
3045
3046 sent = false;
3047
3048 /*
3049 * Loop through the send queue, setting up transmit descriptors
3050 * until we drain the queue, or use up all available transmit
3051 * descriptors.
3052 */
3053 for (;;) {
3054 /* Grab a packet off the queue. */
3055 IFQ_POLL(&ifp->if_snd, m0);
3056 if (m0 == NULL)
3057 break;
3058
3059 DPRINTF(WM_DEBUG_TX,
3060 ("%s: TX: have packet to transmit: %p\n",
3061 device_xname(sc->sc_dev), m0));
3062
3063 /* Get a work queue entry. */
3064 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3065 wm_txintr(sc);
3066 if (sc->sc_txsfree == 0) {
3067 DPRINTF(WM_DEBUG_TX,
3068 ("%s: TX: no free job descriptors\n",
3069 device_xname(sc->sc_dev)));
3070 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3071 break;
3072 }
3073 }
3074
3075 txs = &sc->sc_txsoft[sc->sc_txsnext];
3076 dmamap = txs->txs_dmamap;
3077
3078 /*
3079 * Load the DMA map. If this fails, the packet either
3080 * didn't fit in the allotted number of segments, or we
3081 * were short on resources. For the too-many-segments
3082 * case, we simply report an error and drop the packet,
3083 * since we can't sanely copy a jumbo packet to a single
3084 * buffer.
3085 */
3086 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3087 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3088 if (error) {
3089 if (error == EFBIG) {
3090 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3091 log(LOG_ERR, "%s: Tx packet consumes too many "
3092 "DMA segments, dropping...\n",
3093 device_xname(sc->sc_dev));
3094 IFQ_DEQUEUE(&ifp->if_snd, m0);
3095 wm_dump_mbuf_chain(sc, m0);
3096 m_freem(m0);
3097 continue;
3098 }
3099 /*
3100 * Short on resources, just stop for now.
3101 */
3102 DPRINTF(WM_DEBUG_TX,
3103 ("%s: TX: dmamap load failed: %d\n",
3104 device_xname(sc->sc_dev), error));
3105 break;
3106 }
3107
3108 segs_needed = dmamap->dm_nsegs;
3109
3110 /*
3111 * Ensure we have enough descriptors free to describe
3112 * the packet. Note, we always reserve one descriptor
3113 * at the end of the ring due to the semantics of the
3114 * TDT register, plus one more in the event we need
3115 * to load offload context.
3116 */
3117 if (segs_needed > sc->sc_txfree - 2) {
3118 /*
3119 * Not enough free descriptors to transmit this
3120 * packet. We haven't committed anything yet,
3121 * so just unload the DMA map, put the packet
3122 * pack on the queue, and punt. Notify the upper
3123 * layer that there are no more slots left.
3124 */
3125 DPRINTF(WM_DEBUG_TX,
3126 ("%s: TX: need %d (%d) descriptors, have %d\n",
3127 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3128 segs_needed, sc->sc_txfree - 1));
3129 ifp->if_flags |= IFF_OACTIVE;
3130 bus_dmamap_unload(sc->sc_dmat, dmamap);
3131 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3132 break;
3133 }
3134
3135 IFQ_DEQUEUE(&ifp->if_snd, m0);
3136
3137 /*
3138 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3139 */
3140
3141 DPRINTF(WM_DEBUG_TX,
3142 ("%s: TX: packet has %d (%d) DMA segments\n",
3143 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3144
3145 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3146
3147 /*
3148 * Store a pointer to the packet so that we can free it
3149 * later.
3150 *
3151 * Initially, we consider the number of descriptors the
3152 * packet uses the number of DMA segments. This may be
3153 * incremented by 1 if we do checksum offload (a descriptor
3154 * is used to set the checksum context).
3155 */
3156 txs->txs_mbuf = m0;
3157 txs->txs_firstdesc = sc->sc_txnext;
3158 txs->txs_ndesc = segs_needed;
3159
3160 /* Set up offload parameters for this packet. */
3161 uint32_t cmdlen, fields, dcmdlen;
3162 if (m0->m_pkthdr.csum_flags &
3163 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3164 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3165 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3166 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3167 &do_csum) != 0) {
3168 /* Error message already displayed. */
3169 bus_dmamap_unload(sc->sc_dmat, dmamap);
3170 continue;
3171 }
3172 } else {
3173 do_csum = false;
3174 cmdlen = 0;
3175 fields = 0;
3176 }
3177
3178 /* Sync the DMA map. */
3179 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3180 BUS_DMASYNC_PREWRITE);
3181
3182 /*
3183 * Initialize the first transmit descriptor.
3184 */
3185 nexttx = sc->sc_txnext;
3186 if (!do_csum) {
3187 /* setup a legacy descriptor */
3188 wm_set_dma_addr(
3189 &sc->sc_txdescs[nexttx].wtx_addr,
3190 dmamap->dm_segs[0].ds_addr);
3191 sc->sc_txdescs[nexttx].wtx_cmdlen =
3192 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3193 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3194 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3195 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3196 NULL) {
3197 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3198 htole32(WTX_CMD_VLE);
3199 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3200 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3201 } else {
3202 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3203 }
3204 dcmdlen = 0;
3205 } else {
3206 /* setup an advanced data descriptor */
3207 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3208 htole64(dmamap->dm_segs[0].ds_addr);
3209 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3210 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3211 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3212 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3213 htole32(fields);
3214 DPRINTF(WM_DEBUG_TX,
3215 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3216 device_xname(sc->sc_dev), nexttx,
3217 (uint64_t)dmamap->dm_segs[0].ds_addr));
3218 DPRINTF(WM_DEBUG_TX,
3219 ("\t 0x%08x%08x\n", fields,
3220 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3221 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3222 }
3223
3224 lasttx = nexttx;
3225 nexttx = WM_NEXTTX(sc, nexttx);
3226 /*
3227 * fill in the next descriptors. legacy or adcanced format
3228 * is the same here
3229 */
3230 for (seg = 1; seg < dmamap->dm_nsegs;
3231 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3232 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3233 htole64(dmamap->dm_segs[seg].ds_addr);
3234 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3235 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3236 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3237 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3238 lasttx = nexttx;
3239
3240 DPRINTF(WM_DEBUG_TX,
3241 ("%s: TX: desc %d: %#" PRIx64 ", "
3242 "len %#04zx\n",
3243 device_xname(sc->sc_dev), nexttx,
3244 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3245 dmamap->dm_segs[seg].ds_len));
3246 }
3247
3248 KASSERT(lasttx != -1);
3249
3250 /*
3251 * Set up the command byte on the last descriptor of
3252 * the packet. If we're in the interrupt delay window,
3253 * delay the interrupt.
3254 */
3255 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3256 (NQTX_CMD_EOP | NQTX_CMD_RS));
3257 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3258 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3259
3260 txs->txs_lastdesc = lasttx;
3261
3262 DPRINTF(WM_DEBUG_TX,
3263 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3264 device_xname(sc->sc_dev),
3265 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3266
3267 /* Sync the descriptors we're using. */
3268 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3269 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3270
3271 /* Give the packet to the chip. */
3272 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3273 sent = true;
3274
3275 DPRINTF(WM_DEBUG_TX,
3276 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3277
3278 DPRINTF(WM_DEBUG_TX,
3279 ("%s: TX: finished transmitting packet, job %d\n",
3280 device_xname(sc->sc_dev), sc->sc_txsnext));
3281
3282 /* Advance the tx pointer. */
3283 sc->sc_txfree -= txs->txs_ndesc;
3284 sc->sc_txnext = nexttx;
3285
3286 sc->sc_txsfree--;
3287 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3288
3289 /* Pass the packet to any BPF listeners. */
3290 bpf_mtap(ifp, m0);
3291 }
3292
3293 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3294 /* No more slots; notify upper layer. */
3295 ifp->if_flags |= IFF_OACTIVE;
3296 }
3297
3298 if (sent) {
3299 /* Set a watchdog timer in case the chip flakes out. */
3300 ifp->if_timer = 5;
3301 }
3302 }
3303
3304 /*
3305 * wm_watchdog: [ifnet interface function]
3306 *
3307 * Watchdog timer handler.
3308 */
3309 static void
3310 wm_watchdog(struct ifnet *ifp)
3311 {
3312 struct wm_softc *sc = ifp->if_softc;
3313
3314 /*
3315 * Since we're using delayed interrupts, sweep up
3316 * before we report an error.
3317 */
3318 wm_txintr(sc);
3319
3320 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3321 #ifdef WM_DEBUG
3322 int i, j;
3323 struct wm_txsoft *txs;
3324 #endif
3325 log(LOG_ERR,
3326 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3327 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3328 sc->sc_txnext);
3329 ifp->if_oerrors++;
3330 #ifdef WM_DEBUG
3331 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3332 i = WM_NEXTTXS(sc, i)) {
3333 txs = &sc->sc_txsoft[i];
3334 printf("txs %d tx %d -> %d\n",
3335 i, txs->txs_firstdesc, txs->txs_lastdesc);
3336 for (j = txs->txs_firstdesc; ;
3337 j = WM_NEXTTX(sc, j)) {
3338 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3339 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3340 printf("\t %#08x%08x\n",
3341 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3342 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3343 if (j == txs->txs_lastdesc)
3344 break;
3345 }
3346 }
3347 #endif
3348 /* Reset the interface. */
3349 (void) wm_init(ifp);
3350 }
3351
3352 /* Try to get more packets going. */
3353 ifp->if_start(ifp);
3354 }
3355
3356 static int
3357 wm_ifflags_cb(struct ethercom *ec)
3358 {
3359 struct ifnet *ifp = &ec->ec_if;
3360 struct wm_softc *sc = ifp->if_softc;
3361 int change = ifp->if_flags ^ sc->sc_if_flags;
3362
3363 if (change != 0)
3364 sc->sc_if_flags = ifp->if_flags;
3365
3366 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3367 return ENETRESET;
3368
3369 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3370 wm_set_filter(sc);
3371
3372 wm_set_vlan(sc);
3373
3374 return 0;
3375 }
3376
3377 /*
3378 * wm_ioctl: [ifnet interface function]
3379 *
3380 * Handle control requests from the operator.
3381 */
3382 static int
3383 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3384 {
3385 struct wm_softc *sc = ifp->if_softc;
3386 struct ifreq *ifr = (struct ifreq *) data;
3387 struct ifaddr *ifa = (struct ifaddr *)data;
3388 struct sockaddr_dl *sdl;
3389 int s, error;
3390
3391 s = splnet();
3392
3393 switch (cmd) {
3394 case SIOCSIFMEDIA:
3395 case SIOCGIFMEDIA:
3396 /* Flow control requires full-duplex mode. */
3397 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3398 (ifr->ifr_media & IFM_FDX) == 0)
3399 ifr->ifr_media &= ~IFM_ETH_FMASK;
3400 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3401 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3402 /* We can do both TXPAUSE and RXPAUSE. */
3403 ifr->ifr_media |=
3404 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3405 }
3406 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3407 }
3408 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3409 break;
3410 case SIOCINITIFADDR:
3411 if (ifa->ifa_addr->sa_family == AF_LINK) {
3412 sdl = satosdl(ifp->if_dl->ifa_addr);
3413 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3414 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3415 /* unicast address is first multicast entry */
3416 wm_set_filter(sc);
3417 error = 0;
3418 break;
3419 }
3420 /*FALLTHROUGH*/
3421 default:
3422 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3423 break;
3424
3425 error = 0;
3426
3427 if (cmd == SIOCSIFCAP)
3428 error = (*ifp->if_init)(ifp);
3429 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3430 ;
3431 else if (ifp->if_flags & IFF_RUNNING) {
3432 /*
3433 * Multicast list has changed; set the hardware filter
3434 * accordingly.
3435 */
3436 wm_set_filter(sc);
3437 }
3438 break;
3439 }
3440
3441 /* Try to get more packets going. */
3442 ifp->if_start(ifp);
3443
3444 splx(s);
3445 return error;
3446 }
3447
3448 /*
3449 * wm_intr:
3450 *
3451 * Interrupt service routine.
3452 */
3453 static int
3454 wm_intr(void *arg)
3455 {
3456 struct wm_softc *sc = arg;
3457 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3458 uint32_t icr;
3459 int handled = 0;
3460
3461 while (1 /* CONSTCOND */) {
3462 icr = CSR_READ(sc, WMREG_ICR);
3463 if ((icr & sc->sc_icr) == 0)
3464 break;
3465 rnd_add_uint32(&sc->rnd_source, icr);
3466
3467 handled = 1;
3468
3469 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3470 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3471 DPRINTF(WM_DEBUG_RX,
3472 ("%s: RX: got Rx intr 0x%08x\n",
3473 device_xname(sc->sc_dev),
3474 icr & (ICR_RXDMT0|ICR_RXT0)));
3475 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3476 }
3477 #endif
3478 wm_rxintr(sc);
3479
3480 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3481 if (icr & ICR_TXDW) {
3482 DPRINTF(WM_DEBUG_TX,
3483 ("%s: TX: got TXDW interrupt\n",
3484 device_xname(sc->sc_dev)));
3485 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3486 }
3487 #endif
3488 wm_txintr(sc);
3489
3490 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3491 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3492 wm_linkintr(sc, icr);
3493 }
3494
3495 if (icr & ICR_RXO) {
3496 #if defined(WM_DEBUG)
3497 log(LOG_WARNING, "%s: Receive overrun\n",
3498 device_xname(sc->sc_dev));
3499 #endif /* defined(WM_DEBUG) */
3500 }
3501 }
3502
3503 if (handled) {
3504 /* Try to get more packets going. */
3505 ifp->if_start(ifp);
3506 }
3507
3508 return handled;
3509 }
3510
3511 /*
3512 * wm_txintr:
3513 *
3514 * Helper; handle transmit interrupts.
3515 */
3516 static void
3517 wm_txintr(struct wm_softc *sc)
3518 {
3519 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3520 struct wm_txsoft *txs;
3521 uint8_t status;
3522 int i;
3523
3524 ifp->if_flags &= ~IFF_OACTIVE;
3525
3526 /*
3527 * Go through the Tx list and free mbufs for those
3528 * frames which have been transmitted.
3529 */
3530 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3531 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3532 txs = &sc->sc_txsoft[i];
3533
3534 DPRINTF(WM_DEBUG_TX,
3535 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3536
3537 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3538 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3539
3540 status =
3541 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3542 if ((status & WTX_ST_DD) == 0) {
3543 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3544 BUS_DMASYNC_PREREAD);
3545 break;
3546 }
3547
3548 DPRINTF(WM_DEBUG_TX,
3549 ("%s: TX: job %d done: descs %d..%d\n",
3550 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3551 txs->txs_lastdesc));
3552
3553 /*
3554 * XXX We should probably be using the statistics
3555 * XXX registers, but I don't know if they exist
3556 * XXX on chips before the i82544.
3557 */
3558
3559 #ifdef WM_EVENT_COUNTERS
3560 if (status & WTX_ST_TU)
3561 WM_EVCNT_INCR(&sc->sc_ev_tu);
3562 #endif /* WM_EVENT_COUNTERS */
3563
3564 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3565 ifp->if_oerrors++;
3566 if (status & WTX_ST_LC)
3567 log(LOG_WARNING, "%s: late collision\n",
3568 device_xname(sc->sc_dev));
3569 else if (status & WTX_ST_EC) {
3570 ifp->if_collisions += 16;
3571 log(LOG_WARNING, "%s: excessive collisions\n",
3572 device_xname(sc->sc_dev));
3573 }
3574 } else
3575 ifp->if_opackets++;
3576
3577 sc->sc_txfree += txs->txs_ndesc;
3578 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3579 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3580 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3581 m_freem(txs->txs_mbuf);
3582 txs->txs_mbuf = NULL;
3583 }
3584
3585 /* Update the dirty transmit buffer pointer. */
3586 sc->sc_txsdirty = i;
3587 DPRINTF(WM_DEBUG_TX,
3588 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3589
3590 /*
3591 * If there are no more pending transmissions, cancel the watchdog
3592 * timer.
3593 */
3594 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3595 ifp->if_timer = 0;
3596 }
3597
3598 /*
3599 * wm_rxintr:
3600 *
3601 * Helper; handle receive interrupts.
3602 */
3603 static void
3604 wm_rxintr(struct wm_softc *sc)
3605 {
3606 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3607 struct wm_rxsoft *rxs;
3608 struct mbuf *m;
3609 int i, len;
3610 uint8_t status, errors;
3611 uint16_t vlantag;
3612
3613 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3614 rxs = &sc->sc_rxsoft[i];
3615
3616 DPRINTF(WM_DEBUG_RX,
3617 ("%s: RX: checking descriptor %d\n",
3618 device_xname(sc->sc_dev), i));
3619
3620 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3621
3622 status = sc->sc_rxdescs[i].wrx_status;
3623 errors = sc->sc_rxdescs[i].wrx_errors;
3624 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3625 vlantag = sc->sc_rxdescs[i].wrx_special;
3626
3627 if ((status & WRX_ST_DD) == 0) {
3628 /*
3629 * We have processed all of the receive descriptors.
3630 */
3631 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3632 break;
3633 }
3634
3635 if (__predict_false(sc->sc_rxdiscard)) {
3636 DPRINTF(WM_DEBUG_RX,
3637 ("%s: RX: discarding contents of descriptor %d\n",
3638 device_xname(sc->sc_dev), i));
3639 WM_INIT_RXDESC(sc, i);
3640 if (status & WRX_ST_EOP) {
3641 /* Reset our state. */
3642 DPRINTF(WM_DEBUG_RX,
3643 ("%s: RX: resetting rxdiscard -> 0\n",
3644 device_xname(sc->sc_dev)));
3645 sc->sc_rxdiscard = 0;
3646 }
3647 continue;
3648 }
3649
3650 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3651 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3652
3653 m = rxs->rxs_mbuf;
3654
3655 /*
3656 * Add a new receive buffer to the ring, unless of
3657 * course the length is zero. Treat the latter as a
3658 * failed mapping.
3659 */
3660 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3661 /*
3662 * Failed, throw away what we've done so
3663 * far, and discard the rest of the packet.
3664 */
3665 ifp->if_ierrors++;
3666 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3667 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3668 WM_INIT_RXDESC(sc, i);
3669 if ((status & WRX_ST_EOP) == 0)
3670 sc->sc_rxdiscard = 1;
3671 if (sc->sc_rxhead != NULL)
3672 m_freem(sc->sc_rxhead);
3673 WM_RXCHAIN_RESET(sc);
3674 DPRINTF(WM_DEBUG_RX,
3675 ("%s: RX: Rx buffer allocation failed, "
3676 "dropping packet%s\n", device_xname(sc->sc_dev),
3677 sc->sc_rxdiscard ? " (discard)" : ""));
3678 continue;
3679 }
3680
3681 m->m_len = len;
3682 sc->sc_rxlen += len;
3683 DPRINTF(WM_DEBUG_RX,
3684 ("%s: RX: buffer at %p len %d\n",
3685 device_xname(sc->sc_dev), m->m_data, len));
3686
3687 /*
3688 * If this is not the end of the packet, keep
3689 * looking.
3690 */
3691 if ((status & WRX_ST_EOP) == 0) {
3692 WM_RXCHAIN_LINK(sc, m);
3693 DPRINTF(WM_DEBUG_RX,
3694 ("%s: RX: not yet EOP, rxlen -> %d\n",
3695 device_xname(sc->sc_dev), sc->sc_rxlen));
3696 continue;
3697 }
3698
3699 /*
3700 * Okay, we have the entire packet now. The chip is
3701 * configured to include the FCS except I350 and I21[01]
3702 * (not all chips can be configured to strip it),
3703 * so we need to trim it.
3704 * May need to adjust length of previous mbuf in the
3705 * chain if the current mbuf is too short.
3706 * For an eratta, the RCTL_SECRC bit in RCTL register
3707 * is always set in I350, so we don't trim it.
3708 */
3709 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210)
3710 && (sc->sc_type != WM_T_I211)) {
3711 if (m->m_len < ETHER_CRC_LEN) {
3712 sc->sc_rxtail->m_len
3713 -= (ETHER_CRC_LEN - m->m_len);
3714 m->m_len = 0;
3715 } else
3716 m->m_len -= ETHER_CRC_LEN;
3717 len = sc->sc_rxlen - ETHER_CRC_LEN;
3718 } else
3719 len = sc->sc_rxlen;
3720
3721 WM_RXCHAIN_LINK(sc, m);
3722
3723 *sc->sc_rxtailp = NULL;
3724 m = sc->sc_rxhead;
3725
3726 WM_RXCHAIN_RESET(sc);
3727
3728 DPRINTF(WM_DEBUG_RX,
3729 ("%s: RX: have entire packet, len -> %d\n",
3730 device_xname(sc->sc_dev), len));
3731
3732 /*
3733 * If an error occurred, update stats and drop the packet.
3734 */
3735 if (errors &
3736 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3737 if (errors & WRX_ER_SE)
3738 log(LOG_WARNING, "%s: symbol error\n",
3739 device_xname(sc->sc_dev));
3740 else if (errors & WRX_ER_SEQ)
3741 log(LOG_WARNING, "%s: receive sequence error\n",
3742 device_xname(sc->sc_dev));
3743 else if (errors & WRX_ER_CE)
3744 log(LOG_WARNING, "%s: CRC error\n",
3745 device_xname(sc->sc_dev));
3746 m_freem(m);
3747 continue;
3748 }
3749
3750 /*
3751 * No errors. Receive the packet.
3752 */
3753 m->m_pkthdr.rcvif = ifp;
3754 m->m_pkthdr.len = len;
3755
3756 /*
3757 * If VLANs are enabled, VLAN packets have been unwrapped
3758 * for us. Associate the tag with the packet.
3759 */
3760 if ((status & WRX_ST_VP) != 0) {
3761 VLAN_INPUT_TAG(ifp, m,
3762 le16toh(vlantag),
3763 continue);
3764 }
3765
3766 /*
3767 * Set up checksum info for this packet.
3768 */
3769 if ((status & WRX_ST_IXSM) == 0) {
3770 if (status & WRX_ST_IPCS) {
3771 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3772 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3773 if (errors & WRX_ER_IPE)
3774 m->m_pkthdr.csum_flags |=
3775 M_CSUM_IPv4_BAD;
3776 }
3777 if (status & WRX_ST_TCPCS) {
3778 /*
3779 * Note: we don't know if this was TCP or UDP,
3780 * so we just set both bits, and expect the
3781 * upper layers to deal.
3782 */
3783 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3784 m->m_pkthdr.csum_flags |=
3785 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3786 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3787 if (errors & WRX_ER_TCPE)
3788 m->m_pkthdr.csum_flags |=
3789 M_CSUM_TCP_UDP_BAD;
3790 }
3791 }
3792
3793 ifp->if_ipackets++;
3794
3795 /* Pass this up to any BPF listeners. */
3796 bpf_mtap(ifp, m);
3797
3798 /* Pass it on. */
3799 (*ifp->if_input)(ifp, m);
3800 }
3801
3802 /* Update the receive pointer. */
3803 sc->sc_rxptr = i;
3804
3805 DPRINTF(WM_DEBUG_RX,
3806 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3807 }
3808
3809 /*
3810 * wm_linkintr_gmii:
3811 *
3812 * Helper; handle link interrupts for GMII.
3813 */
3814 static void
3815 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3816 {
3817
3818 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3819 __func__));
3820
3821 if (icr & ICR_LSC) {
3822 DPRINTF(WM_DEBUG_LINK,
3823 ("%s: LINK: LSC -> mii_pollstat\n",
3824 device_xname(sc->sc_dev)));
3825 mii_pollstat(&sc->sc_mii);
3826 if (sc->sc_type == WM_T_82543) {
3827 int miistatus, active;
3828
3829 /*
3830 * With 82543, we need to force speed and
3831 * duplex on the MAC equal to what the PHY
3832 * speed and duplex configuration is.
3833 */
3834 miistatus = sc->sc_mii.mii_media_status;
3835
3836 if (miistatus & IFM_ACTIVE) {
3837 active = sc->sc_mii.mii_media_active;
3838 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3839 switch (IFM_SUBTYPE(active)) {
3840 case IFM_10_T:
3841 sc->sc_ctrl |= CTRL_SPEED_10;
3842 break;
3843 case IFM_100_TX:
3844 sc->sc_ctrl |= CTRL_SPEED_100;
3845 break;
3846 case IFM_1000_T:
3847 sc->sc_ctrl |= CTRL_SPEED_1000;
3848 break;
3849 default:
3850 /*
3851 * fiber?
3852 * Shoud not enter here.
3853 */
3854 printf("unknown media (%x)\n",
3855 active);
3856 break;
3857 }
3858 if (active & IFM_FDX)
3859 sc->sc_ctrl |= CTRL_FD;
3860 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3861 }
3862 } else if ((sc->sc_type == WM_T_ICH8)
3863 && (sc->sc_phytype == WMPHY_IGP_3)) {
3864 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3865 } else if (sc->sc_type == WM_T_PCH) {
3866 wm_k1_gig_workaround_hv(sc,
3867 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3868 }
3869
3870 if ((sc->sc_phytype == WMPHY_82578)
3871 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3872 == IFM_1000_T)) {
3873
3874 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3875 delay(200*1000); /* XXX too big */
3876
3877 /* Link stall fix for link up */
3878 wm_gmii_hv_writereg(sc->sc_dev, 1,
3879 HV_MUX_DATA_CTRL,
3880 HV_MUX_DATA_CTRL_GEN_TO_MAC
3881 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3882 wm_gmii_hv_writereg(sc->sc_dev, 1,
3883 HV_MUX_DATA_CTRL,
3884 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3885 }
3886 }
3887 } else if (icr & ICR_RXSEQ) {
3888 DPRINTF(WM_DEBUG_LINK,
3889 ("%s: LINK Receive sequence error\n",
3890 device_xname(sc->sc_dev)));
3891 }
3892 }
3893
3894 /*
3895 * wm_linkintr_tbi:
3896 *
3897 * Helper; handle link interrupts for TBI mode.
3898 */
3899 static void
3900 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3901 {
3902 uint32_t status;
3903
3904 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3905 __func__));
3906
3907 status = CSR_READ(sc, WMREG_STATUS);
3908 if (icr & ICR_LSC) {
3909 if (status & STATUS_LU) {
3910 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3911 device_xname(sc->sc_dev),
3912 (status & STATUS_FD) ? "FDX" : "HDX"));
3913 /*
3914 * NOTE: CTRL will update TFCE and RFCE automatically,
3915 * so we should update sc->sc_ctrl
3916 */
3917
3918 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3919 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3920 sc->sc_fcrtl &= ~FCRTL_XONE;
3921 if (status & STATUS_FD)
3922 sc->sc_tctl |=
3923 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3924 else
3925 sc->sc_tctl |=
3926 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3927 if (sc->sc_ctrl & CTRL_TFCE)
3928 sc->sc_fcrtl |= FCRTL_XONE;
3929 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3930 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3931 WMREG_OLD_FCRTL : WMREG_FCRTL,
3932 sc->sc_fcrtl);
3933 sc->sc_tbi_linkup = 1;
3934 } else {
3935 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3936 device_xname(sc->sc_dev)));
3937 sc->sc_tbi_linkup = 0;
3938 }
3939 wm_tbi_set_linkled(sc);
3940 } else if (icr & ICR_RXCFG) {
3941 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3942 device_xname(sc->sc_dev)));
3943 sc->sc_tbi_nrxcfg++;
3944 wm_check_for_link(sc);
3945 } else if (icr & ICR_RXSEQ) {
3946 DPRINTF(WM_DEBUG_LINK,
3947 ("%s: LINK: Receive sequence error\n",
3948 device_xname(sc->sc_dev)));
3949 }
3950 }
3951
3952 /*
3953 * wm_linkintr:
3954 *
3955 * Helper; handle link interrupts.
3956 */
3957 static void
3958 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3959 {
3960
3961 if (sc->sc_flags & WM_F_HAS_MII)
3962 wm_linkintr_gmii(sc, icr);
3963 else
3964 wm_linkintr_tbi(sc, icr);
3965 }
3966
3967 /*
3968 * wm_tick:
3969 *
3970 * One second timer, used to check link status, sweep up
3971 * completed transmit jobs, etc.
3972 */
3973 static void
3974 wm_tick(void *arg)
3975 {
3976 struct wm_softc *sc = arg;
3977 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3978 int s;
3979
3980 s = splnet();
3981
3982 if (sc->sc_type >= WM_T_82542_2_1) {
3983 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3984 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3985 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3986 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3987 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3988 }
3989
3990 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3991 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3992 + CSR_READ(sc, WMREG_CRCERRS)
3993 + CSR_READ(sc, WMREG_ALGNERRC)
3994 + CSR_READ(sc, WMREG_SYMERRC)
3995 + CSR_READ(sc, WMREG_RXERRC)
3996 + CSR_READ(sc, WMREG_SEC)
3997 + CSR_READ(sc, WMREG_CEXTERR)
3998 + CSR_READ(sc, WMREG_RLEC);
3999 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4000
4001 if (sc->sc_flags & WM_F_HAS_MII)
4002 mii_tick(&sc->sc_mii);
4003 else
4004 wm_tbi_check_link(sc);
4005
4006 splx(s);
4007
4008 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4009 }
4010
4011 /*
4012 * wm_reset:
4013 *
4014 * Reset the i82542 chip.
4015 */
4016 static void
4017 wm_reset(struct wm_softc *sc)
4018 {
4019 int phy_reset = 0;
4020 uint32_t reg, mask;
4021 int i;
4022
4023 /*
4024 * Allocate on-chip memory according to the MTU size.
4025 * The Packet Buffer Allocation register must be written
4026 * before the chip is reset.
4027 */
4028 switch (sc->sc_type) {
4029 case WM_T_82547:
4030 case WM_T_82547_2:
4031 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4032 PBA_22K : PBA_30K;
4033 sc->sc_txfifo_head = 0;
4034 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4035 sc->sc_txfifo_size =
4036 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4037 sc->sc_txfifo_stall = 0;
4038 break;
4039 case WM_T_82571:
4040 case WM_T_82572:
4041 case WM_T_82575: /* XXX need special handing for jumbo frames */
4042 case WM_T_I350:
4043 case WM_T_80003:
4044 sc->sc_pba = PBA_32K;
4045 break;
4046 case WM_T_82580:
4047 case WM_T_82580ER:
4048 sc->sc_pba = PBA_35K;
4049 break;
4050 case WM_T_I210:
4051 case WM_T_I211:
4052 sc->sc_pba = PBA_34K;
4053 break;
4054 case WM_T_82576:
4055 sc->sc_pba = PBA_64K;
4056 break;
4057 case WM_T_82573:
4058 sc->sc_pba = PBA_12K;
4059 break;
4060 case WM_T_82574:
4061 case WM_T_82583:
4062 sc->sc_pba = PBA_20K;
4063 break;
4064 case WM_T_ICH8:
4065 sc->sc_pba = PBA_8K;
4066 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4067 break;
4068 case WM_T_ICH9:
4069 case WM_T_ICH10:
4070 sc->sc_pba = PBA_10K;
4071 break;
4072 case WM_T_PCH:
4073 case WM_T_PCH2:
4074 case WM_T_PCH_LPT:
4075 sc->sc_pba = PBA_26K;
4076 break;
4077 default:
4078 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4079 PBA_40K : PBA_48K;
4080 break;
4081 }
4082 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4083
4084 /* Prevent the PCI-E bus from sticking */
4085 if (sc->sc_flags & WM_F_PCIE) {
4086 int timeout = 800;
4087
4088 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4089 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4090
4091 while (timeout--) {
4092 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4093 == 0)
4094 break;
4095 delay(100);
4096 }
4097 }
4098
4099 /* Set the completion timeout for interface */
4100 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4101 || (sc->sc_type == WM_T_I350))
4102 wm_set_pcie_completion_timeout(sc);
4103
4104 /* Clear interrupt */
4105 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4106
4107 /* Stop the transmit and receive processes. */
4108 CSR_WRITE(sc, WMREG_RCTL, 0);
4109 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4110 sc->sc_rctl &= ~RCTL_EN;
4111
4112 /* XXX set_tbi_sbp_82543() */
4113
4114 delay(10*1000);
4115
4116 /* Must acquire the MDIO ownership before MAC reset */
4117 switch (sc->sc_type) {
4118 case WM_T_82573:
4119 case WM_T_82574:
4120 case WM_T_82583:
4121 i = 0;
4122 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4123 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4124 do {
4125 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4126 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4127 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4128 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4129 break;
4130 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4131 delay(2*1000);
4132 i++;
4133 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4134 break;
4135 default:
4136 break;
4137 }
4138
4139 /*
4140 * 82541 Errata 29? & 82547 Errata 28?
4141 * See also the description about PHY_RST bit in CTRL register
4142 * in 8254x_GBe_SDM.pdf.
4143 */
4144 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4145 CSR_WRITE(sc, WMREG_CTRL,
4146 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4147 delay(5000);
4148 }
4149
4150 switch (sc->sc_type) {
4151 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4152 case WM_T_82541:
4153 case WM_T_82541_2:
4154 case WM_T_82547:
4155 case WM_T_82547_2:
4156 /*
4157 * On some chipsets, a reset through a memory-mapped write
4158 * cycle can cause the chip to reset before completing the
4159 * write cycle. This causes major headache that can be
4160 * avoided by issuing the reset via indirect register writes
4161 * through I/O space.
4162 *
4163 * So, if we successfully mapped the I/O BAR at attach time,
4164 * use that. Otherwise, try our luck with a memory-mapped
4165 * reset.
4166 */
4167 if (sc->sc_flags & WM_F_IOH_VALID)
4168 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4169 else
4170 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4171 break;
4172 case WM_T_82545_3:
4173 case WM_T_82546_3:
4174 /* Use the shadow control register on these chips. */
4175 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4176 break;
4177 case WM_T_80003:
4178 mask = swfwphysem[sc->sc_funcid];
4179 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4180 wm_get_swfw_semaphore(sc, mask);
4181 CSR_WRITE(sc, WMREG_CTRL, reg);
4182 wm_put_swfw_semaphore(sc, mask);
4183 break;
4184 case WM_T_ICH8:
4185 case WM_T_ICH9:
4186 case WM_T_ICH10:
4187 case WM_T_PCH:
4188 case WM_T_PCH2:
4189 case WM_T_PCH_LPT:
4190 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4191 if (wm_check_reset_block(sc) == 0) {
4192 /*
4193 * Gate automatic PHY configuration by hardware on
4194 * non-managed 82579
4195 */
4196 if ((sc->sc_type == WM_T_PCH2)
4197 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4198 != 0))
4199 wm_gate_hw_phy_config_ich8lan(sc, 1);
4200
4201
4202 reg |= CTRL_PHY_RESET;
4203 phy_reset = 1;
4204 }
4205 wm_get_swfwhw_semaphore(sc);
4206 CSR_WRITE(sc, WMREG_CTRL, reg);
4207 delay(20*1000);
4208 wm_put_swfwhw_semaphore(sc);
4209 break;
4210 case WM_T_82542_2_0:
4211 case WM_T_82542_2_1:
4212 case WM_T_82543:
4213 case WM_T_82540:
4214 case WM_T_82545:
4215 case WM_T_82546:
4216 case WM_T_82571:
4217 case WM_T_82572:
4218 case WM_T_82573:
4219 case WM_T_82574:
4220 case WM_T_82575:
4221 case WM_T_82576:
4222 case WM_T_82580:
4223 case WM_T_82580ER:
4224 case WM_T_82583:
4225 case WM_T_I350:
4226 case WM_T_I210:
4227 case WM_T_I211:
4228 default:
4229 /* Everything else can safely use the documented method. */
4230 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4231 break;
4232 }
4233
4234 if (phy_reset != 0)
4235 wm_get_cfg_done(sc);
4236
4237 /* reload EEPROM */
4238 switch (sc->sc_type) {
4239 case WM_T_82542_2_0:
4240 case WM_T_82542_2_1:
4241 case WM_T_82543:
4242 case WM_T_82544:
4243 delay(10);
4244 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4245 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4246 delay(2000);
4247 break;
4248 case WM_T_82540:
4249 case WM_T_82545:
4250 case WM_T_82545_3:
4251 case WM_T_82546:
4252 case WM_T_82546_3:
4253 delay(5*1000);
4254 /* XXX Disable HW ARPs on ASF enabled adapters */
4255 break;
4256 case WM_T_82541:
4257 case WM_T_82541_2:
4258 case WM_T_82547:
4259 case WM_T_82547_2:
4260 delay(20000);
4261 /* XXX Disable HW ARPs on ASF enabled adapters */
4262 break;
4263 case WM_T_82571:
4264 case WM_T_82572:
4265 case WM_T_82573:
4266 case WM_T_82574:
4267 case WM_T_82583:
4268 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4269 delay(10);
4270 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4271 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4272 }
4273 /* check EECD_EE_AUTORD */
4274 wm_get_auto_rd_done(sc);
4275 /*
4276 * Phy configuration from NVM just starts after EECD_AUTO_RD
4277 * is set.
4278 */
4279 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4280 || (sc->sc_type == WM_T_82583))
4281 delay(25*1000);
4282 break;
4283 case WM_T_82575:
4284 case WM_T_82576:
4285 case WM_T_82580:
4286 case WM_T_82580ER:
4287 case WM_T_I350:
4288 case WM_T_I210:
4289 case WM_T_I211:
4290 case WM_T_80003:
4291 /* check EECD_EE_AUTORD */
4292 wm_get_auto_rd_done(sc);
4293 break;
4294 case WM_T_ICH8:
4295 case WM_T_ICH9:
4296 case WM_T_ICH10:
4297 case WM_T_PCH:
4298 case WM_T_PCH2:
4299 case WM_T_PCH_LPT:
4300 break;
4301 default:
4302 panic("%s: unknown type\n", __func__);
4303 }
4304
4305 /* Check whether EEPROM is present or not */
4306 switch (sc->sc_type) {
4307 case WM_T_82575:
4308 case WM_T_82576:
4309 #if 0 /* XXX */
4310 case WM_T_82580:
4311 case WM_T_82580ER:
4312 #endif
4313 case WM_T_I350:
4314 case WM_T_ICH8:
4315 case WM_T_ICH9:
4316 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4317 /* Not found */
4318 sc->sc_flags |= WM_F_EEPROM_INVALID;
4319 if ((sc->sc_type == WM_T_82575)
4320 || (sc->sc_type == WM_T_82576)
4321 || (sc->sc_type == WM_T_82580)
4322 || (sc->sc_type == WM_T_82580ER)
4323 || (sc->sc_type == WM_T_I350))
4324 wm_reset_init_script_82575(sc);
4325 }
4326 break;
4327 default:
4328 break;
4329 }
4330
4331 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4332 || (sc->sc_type == WM_T_I350)) {
4333 /* clear global device reset status bit */
4334 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4335 }
4336
4337 /* Clear any pending interrupt events. */
4338 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4339 reg = CSR_READ(sc, WMREG_ICR);
4340
4341 /* reload sc_ctrl */
4342 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4343
4344 if (sc->sc_type == WM_T_I350)
4345 wm_set_eee_i350(sc);
4346
4347 /* dummy read from WUC */
4348 if (sc->sc_type == WM_T_PCH)
4349 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4350 /*
4351 * For PCH, this write will make sure that any noise will be detected
4352 * as a CRC error and be dropped rather than show up as a bad packet
4353 * to the DMA engine
4354 */
4355 if (sc->sc_type == WM_T_PCH)
4356 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4357
4358 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4359 CSR_WRITE(sc, WMREG_WUC, 0);
4360
4361 /* XXX need special handling for 82580 */
4362 }
4363
4364 static void
4365 wm_set_vlan(struct wm_softc *sc)
4366 {
4367 /* Deal with VLAN enables. */
4368 if (VLAN_ATTACHED(&sc->sc_ethercom))
4369 sc->sc_ctrl |= CTRL_VME;
4370 else
4371 sc->sc_ctrl &= ~CTRL_VME;
4372
4373 /* Write the control registers. */
4374 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4375 }
4376
4377 /*
4378 * wm_init: [ifnet interface function]
4379 *
4380 * Initialize the interface. Must be called at splnet().
4381 */
4382 static int
4383 wm_init(struct ifnet *ifp)
4384 {
4385 struct wm_softc *sc = ifp->if_softc;
4386 struct wm_rxsoft *rxs;
4387 int i, j, trynum, error = 0;
4388 uint32_t reg;
4389
4390 /*
4391 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4392 * There is a small but measurable benefit to avoiding the adjusment
4393 * of the descriptor so that the headers are aligned, for normal mtu,
4394 * on such platforms. One possibility is that the DMA itself is
4395 * slightly more efficient if the front of the entire packet (instead
4396 * of the front of the headers) is aligned.
4397 *
4398 * Note we must always set align_tweak to 0 if we are using
4399 * jumbo frames.
4400 */
4401 #ifdef __NO_STRICT_ALIGNMENT
4402 sc->sc_align_tweak = 0;
4403 #else
4404 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4405 sc->sc_align_tweak = 0;
4406 else
4407 sc->sc_align_tweak = 2;
4408 #endif /* __NO_STRICT_ALIGNMENT */
4409
4410 /* Cancel any pending I/O. */
4411 wm_stop(ifp, 0);
4412
4413 /* update statistics before reset */
4414 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4415 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4416
4417 /* Reset the chip to a known state. */
4418 wm_reset(sc);
4419
4420 switch (sc->sc_type) {
4421 case WM_T_82571:
4422 case WM_T_82572:
4423 case WM_T_82573:
4424 case WM_T_82574:
4425 case WM_T_82583:
4426 case WM_T_80003:
4427 case WM_T_ICH8:
4428 case WM_T_ICH9:
4429 case WM_T_ICH10:
4430 case WM_T_PCH:
4431 case WM_T_PCH2:
4432 case WM_T_PCH_LPT:
4433 if (wm_check_mng_mode(sc) != 0)
4434 wm_get_hw_control(sc);
4435 break;
4436 default:
4437 break;
4438 }
4439
4440 /* Reset the PHY. */
4441 if (sc->sc_flags & WM_F_HAS_MII)
4442 wm_gmii_reset(sc);
4443
4444 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4445 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4446 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4447 || (sc->sc_type == WM_T_PCH_LPT))
4448 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4449
4450 /* Initialize the transmit descriptor ring. */
4451 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4452 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4453 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4454 sc->sc_txfree = WM_NTXDESC(sc);
4455 sc->sc_txnext = 0;
4456
4457 if (sc->sc_type < WM_T_82543) {
4458 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4459 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4460 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4461 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4462 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4463 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4464 } else {
4465 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4466 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4467 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4468 CSR_WRITE(sc, WMREG_TDH, 0);
4469 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4470 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4471
4472 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4473 /*
4474 * Don't write TDT before TCTL.EN is set.
4475 * See the document.
4476 */
4477 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4478 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4479 | TXDCTL_WTHRESH(0));
4480 else {
4481 CSR_WRITE(sc, WMREG_TDT, 0);
4482 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4483 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4484 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4485 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4486 }
4487 }
4488 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4489 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4490
4491 /* Initialize the transmit job descriptors. */
4492 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4493 sc->sc_txsoft[i].txs_mbuf = NULL;
4494 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4495 sc->sc_txsnext = 0;
4496 sc->sc_txsdirty = 0;
4497
4498 /*
4499 * Initialize the receive descriptor and receive job
4500 * descriptor rings.
4501 */
4502 if (sc->sc_type < WM_T_82543) {
4503 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4504 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4505 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4506 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4507 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4508 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4509
4510 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4511 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4512 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4513 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4514 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4515 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4516 } else {
4517 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4518 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4519 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4520 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4521 CSR_WRITE(sc, WMREG_EITR(0), 450);
4522 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4523 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4524 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4525 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4526 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4527 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4528 | RXDCTL_WTHRESH(1));
4529 } else {
4530 CSR_WRITE(sc, WMREG_RDH, 0);
4531 CSR_WRITE(sc, WMREG_RDT, 0);
4532 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4533 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4534 }
4535 }
4536 for (i = 0; i < WM_NRXDESC; i++) {
4537 rxs = &sc->sc_rxsoft[i];
4538 if (rxs->rxs_mbuf == NULL) {
4539 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4540 log(LOG_ERR, "%s: unable to allocate or map "
4541 "rx buffer %d, error = %d\n",
4542 device_xname(sc->sc_dev), i, error);
4543 /*
4544 * XXX Should attempt to run with fewer receive
4545 * XXX buffers instead of just failing.
4546 */
4547 wm_rxdrain(sc);
4548 goto out;
4549 }
4550 } else {
4551 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4552 WM_INIT_RXDESC(sc, i);
4553 /*
4554 * For 82575 and newer device, the RX descriptors
4555 * must be initialized after the setting of RCTL.EN in
4556 * wm_set_filter()
4557 */
4558 }
4559 }
4560 sc->sc_rxptr = 0;
4561 sc->sc_rxdiscard = 0;
4562 WM_RXCHAIN_RESET(sc);
4563
4564 /*
4565 * Clear out the VLAN table -- we don't use it (yet).
4566 */
4567 CSR_WRITE(sc, WMREG_VET, 0);
4568 if (sc->sc_type == WM_T_I350)
4569 trynum = 10; /* Due to hw errata */
4570 else
4571 trynum = 1;
4572 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4573 for (j = 0; j < trynum; j++)
4574 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4575
4576 /*
4577 * Set up flow-control parameters.
4578 *
4579 * XXX Values could probably stand some tuning.
4580 */
4581 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4582 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4583 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4584 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4585 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4586 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4587 }
4588
4589 sc->sc_fcrtl = FCRTL_DFLT;
4590 if (sc->sc_type < WM_T_82543) {
4591 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4592 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4593 } else {
4594 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4595 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4596 }
4597
4598 if (sc->sc_type == WM_T_80003)
4599 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4600 else
4601 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4602
4603 /* Writes the control register. */
4604 wm_set_vlan(sc);
4605
4606 if (sc->sc_flags & WM_F_HAS_MII) {
4607 int val;
4608
4609 switch (sc->sc_type) {
4610 case WM_T_80003:
4611 case WM_T_ICH8:
4612 case WM_T_ICH9:
4613 case WM_T_ICH10:
4614 case WM_T_PCH:
4615 case WM_T_PCH2:
4616 case WM_T_PCH_LPT:
4617 /*
4618 * Set the mac to wait the maximum time between each
4619 * iteration and increase the max iterations when
4620 * polling the phy; this fixes erroneous timeouts at
4621 * 10Mbps.
4622 */
4623 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4624 0xFFFF);
4625 val = wm_kmrn_readreg(sc,
4626 KUMCTRLSTA_OFFSET_INB_PARAM);
4627 val |= 0x3F;
4628 wm_kmrn_writereg(sc,
4629 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4630 break;
4631 default:
4632 break;
4633 }
4634
4635 if (sc->sc_type == WM_T_80003) {
4636 val = CSR_READ(sc, WMREG_CTRL_EXT);
4637 val &= ~CTRL_EXT_LINK_MODE_MASK;
4638 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4639
4640 /* Bypass RX and TX FIFO's */
4641 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4642 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4643 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4644 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4645 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4646 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4647 }
4648 }
4649 #if 0
4650 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4651 #endif
4652
4653 /*
4654 * Set up checksum offload parameters.
4655 */
4656 reg = CSR_READ(sc, WMREG_RXCSUM);
4657 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4658 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4659 reg |= RXCSUM_IPOFL;
4660 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4661 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4662 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4663 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4664 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4665
4666 /* Reset TBI's RXCFG count */
4667 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4668
4669 /*
4670 * Set up the interrupt registers.
4671 */
4672 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4673 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4674 ICR_RXO | ICR_RXT0;
4675 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4676 sc->sc_icr |= ICR_RXCFG;
4677 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4678
4679 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4680 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4681 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4682 reg = CSR_READ(sc, WMREG_KABGTXD);
4683 reg |= KABGTXD_BGSQLBIAS;
4684 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4685 }
4686
4687 /* Set up the inter-packet gap. */
4688 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4689
4690 if (sc->sc_type >= WM_T_82543) {
4691 /*
4692 * Set up the interrupt throttling register (units of 256ns)
4693 * Note that a footnote in Intel's documentation says this
4694 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4695 * or 10Mbit mode. Empirically, it appears to be the case
4696 * that that is also true for the 1024ns units of the other
4697 * interrupt-related timer registers -- so, really, we ought
4698 * to divide this value by 4 when the link speed is low.
4699 *
4700 * XXX implement this division at link speed change!
4701 */
4702
4703 /*
4704 * For N interrupts/sec, set this value to:
4705 * 1000000000 / (N * 256). Note that we set the
4706 * absolute and packet timer values to this value
4707 * divided by 4 to get "simple timer" behavior.
4708 */
4709
4710 sc->sc_itr = 1500; /* 2604 ints/sec */
4711 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4712 }
4713
4714 /* Set the VLAN ethernetype. */
4715 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4716
4717 /*
4718 * Set up the transmit control register; we start out with
4719 * a collision distance suitable for FDX, but update it whe
4720 * we resolve the media type.
4721 */
4722 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4723 | TCTL_CT(TX_COLLISION_THRESHOLD)
4724 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4725 if (sc->sc_type >= WM_T_82571)
4726 sc->sc_tctl |= TCTL_MULR;
4727 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4728
4729 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4730 /*
4731 * Write TDT after TCTL.EN is set.
4732 * See the document.
4733 */
4734 CSR_WRITE(sc, WMREG_TDT, 0);
4735 }
4736
4737 if (sc->sc_type == WM_T_80003) {
4738 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4739 reg &= ~TCTL_EXT_GCEX_MASK;
4740 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4741 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4742 }
4743
4744 /* Set the media. */
4745 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4746 goto out;
4747
4748 /* Configure for OS presence */
4749 wm_init_manageability(sc);
4750
4751 /*
4752 * Set up the receive control register; we actually program
4753 * the register when we set the receive filter. Use multicast
4754 * address offset type 0.
4755 *
4756 * Only the i82544 has the ability to strip the incoming
4757 * CRC, so we don't enable that feature.
4758 */
4759 sc->sc_mchash_type = 0;
4760 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4761 | RCTL_MO(sc->sc_mchash_type);
4762
4763 /*
4764 * The I350 has a bug where it always strips the CRC whether
4765 * asked to or not. So ask for stripped CRC here and cope in rxeof
4766 */
4767 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210))
4768 sc->sc_rctl |= RCTL_SECRC;
4769
4770 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4771 && (ifp->if_mtu > ETHERMTU)) {
4772 sc->sc_rctl |= RCTL_LPE;
4773 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4774 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4775 }
4776
4777 if (MCLBYTES == 2048) {
4778 sc->sc_rctl |= RCTL_2k;
4779 } else {
4780 if (sc->sc_type >= WM_T_82543) {
4781 switch (MCLBYTES) {
4782 case 4096:
4783 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4784 break;
4785 case 8192:
4786 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4787 break;
4788 case 16384:
4789 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4790 break;
4791 default:
4792 panic("wm_init: MCLBYTES %d unsupported",
4793 MCLBYTES);
4794 break;
4795 }
4796 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4797 }
4798
4799 /* Set the receive filter. */
4800 wm_set_filter(sc);
4801
4802 /* Enable ECC */
4803 switch (sc->sc_type) {
4804 case WM_T_82571:
4805 reg = CSR_READ(sc, WMREG_PBA_ECC);
4806 reg |= PBA_ECC_CORR_EN;
4807 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4808 break;
4809 case WM_T_PCH_LPT:
4810 reg = CSR_READ(sc, WMREG_PBECCSTS);
4811 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4812 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4813
4814 reg = CSR_READ(sc, WMREG_CTRL);
4815 reg |= CTRL_MEHE;
4816 CSR_WRITE(sc, WMREG_CTRL, reg);
4817 break;
4818 default:
4819 break;
4820 }
4821
4822 /* On 575 and later set RDT only if RX enabled */
4823 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4824 for (i = 0; i < WM_NRXDESC; i++)
4825 WM_INIT_RXDESC(sc, i);
4826
4827 /* Start the one second link check clock. */
4828 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4829
4830 /* ...all done! */
4831 ifp->if_flags |= IFF_RUNNING;
4832 ifp->if_flags &= ~IFF_OACTIVE;
4833
4834 out:
4835 sc->sc_if_flags = ifp->if_flags;
4836 if (error)
4837 log(LOG_ERR, "%s: interface not running\n",
4838 device_xname(sc->sc_dev));
4839 return error;
4840 }
4841
4842 /*
4843 * wm_rxdrain:
4844 *
4845 * Drain the receive queue.
4846 */
4847 static void
4848 wm_rxdrain(struct wm_softc *sc)
4849 {
4850 struct wm_rxsoft *rxs;
4851 int i;
4852
4853 for (i = 0; i < WM_NRXDESC; i++) {
4854 rxs = &sc->sc_rxsoft[i];
4855 if (rxs->rxs_mbuf != NULL) {
4856 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4857 m_freem(rxs->rxs_mbuf);
4858 rxs->rxs_mbuf = NULL;
4859 }
4860 }
4861 }
4862
4863 /*
4864 * wm_stop: [ifnet interface function]
4865 *
4866 * Stop transmission on the interface.
4867 */
4868 static void
4869 wm_stop(struct ifnet *ifp, int disable)
4870 {
4871 struct wm_softc *sc = ifp->if_softc;
4872 struct wm_txsoft *txs;
4873 int i;
4874
4875 /* Stop the one second clock. */
4876 callout_stop(&sc->sc_tick_ch);
4877
4878 /* Stop the 82547 Tx FIFO stall check timer. */
4879 if (sc->sc_type == WM_T_82547)
4880 callout_stop(&sc->sc_txfifo_ch);
4881
4882 if (sc->sc_flags & WM_F_HAS_MII) {
4883 /* Down the MII. */
4884 mii_down(&sc->sc_mii);
4885 } else {
4886 #if 0
4887 /* Should we clear PHY's status properly? */
4888 wm_reset(sc);
4889 #endif
4890 }
4891
4892 /* Stop the transmit and receive processes. */
4893 CSR_WRITE(sc, WMREG_TCTL, 0);
4894 CSR_WRITE(sc, WMREG_RCTL, 0);
4895 sc->sc_rctl &= ~RCTL_EN;
4896
4897 /*
4898 * Clear the interrupt mask to ensure the device cannot assert its
4899 * interrupt line.
4900 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4901 * any currently pending or shared interrupt.
4902 */
4903 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4904 sc->sc_icr = 0;
4905
4906 /* Release any queued transmit buffers. */
4907 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4908 txs = &sc->sc_txsoft[i];
4909 if (txs->txs_mbuf != NULL) {
4910 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4911 m_freem(txs->txs_mbuf);
4912 txs->txs_mbuf = NULL;
4913 }
4914 }
4915
4916 /* Mark the interface as down and cancel the watchdog timer. */
4917 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4918 ifp->if_timer = 0;
4919
4920 if (disable)
4921 wm_rxdrain(sc);
4922
4923 #if 0 /* notyet */
4924 if (sc->sc_type >= WM_T_82544)
4925 CSR_WRITE(sc, WMREG_WUC, 0);
4926 #endif
4927 }
4928
4929 void
4930 wm_get_auto_rd_done(struct wm_softc *sc)
4931 {
4932 int i;
4933
4934 /* wait for eeprom to reload */
4935 switch (sc->sc_type) {
4936 case WM_T_82571:
4937 case WM_T_82572:
4938 case WM_T_82573:
4939 case WM_T_82574:
4940 case WM_T_82583:
4941 case WM_T_82575:
4942 case WM_T_82576:
4943 case WM_T_82580:
4944 case WM_T_82580ER:
4945 case WM_T_I350:
4946 case WM_T_I210:
4947 case WM_T_I211:
4948 case WM_T_80003:
4949 case WM_T_ICH8:
4950 case WM_T_ICH9:
4951 for (i = 0; i < 10; i++) {
4952 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4953 break;
4954 delay(1000);
4955 }
4956 if (i == 10) {
4957 log(LOG_ERR, "%s: auto read from eeprom failed to "
4958 "complete\n", device_xname(sc->sc_dev));
4959 }
4960 break;
4961 default:
4962 break;
4963 }
4964 }
4965
4966 void
4967 wm_lan_init_done(struct wm_softc *sc)
4968 {
4969 uint32_t reg = 0;
4970 int i;
4971
4972 /* wait for eeprom to reload */
4973 switch (sc->sc_type) {
4974 case WM_T_ICH10:
4975 case WM_T_PCH:
4976 case WM_T_PCH2:
4977 case WM_T_PCH_LPT:
4978 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4979 reg = CSR_READ(sc, WMREG_STATUS);
4980 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4981 break;
4982 delay(100);
4983 }
4984 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4985 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4986 "complete\n", device_xname(sc->sc_dev), __func__);
4987 }
4988 break;
4989 default:
4990 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4991 __func__);
4992 break;
4993 }
4994
4995 reg &= ~STATUS_LAN_INIT_DONE;
4996 CSR_WRITE(sc, WMREG_STATUS, reg);
4997 }
4998
4999 void
5000 wm_get_cfg_done(struct wm_softc *sc)
5001 {
5002 int mask;
5003 uint32_t reg;
5004 int i;
5005
5006 /* wait for eeprom to reload */
5007 switch (sc->sc_type) {
5008 case WM_T_82542_2_0:
5009 case WM_T_82542_2_1:
5010 /* null */
5011 break;
5012 case WM_T_82543:
5013 case WM_T_82544:
5014 case WM_T_82540:
5015 case WM_T_82545:
5016 case WM_T_82545_3:
5017 case WM_T_82546:
5018 case WM_T_82546_3:
5019 case WM_T_82541:
5020 case WM_T_82541_2:
5021 case WM_T_82547:
5022 case WM_T_82547_2:
5023 case WM_T_82573:
5024 case WM_T_82574:
5025 case WM_T_82583:
5026 /* generic */
5027 delay(10*1000);
5028 break;
5029 case WM_T_80003:
5030 case WM_T_82571:
5031 case WM_T_82572:
5032 case WM_T_82575:
5033 case WM_T_82576:
5034 case WM_T_82580:
5035 case WM_T_82580ER:
5036 case WM_T_I350:
5037 case WM_T_I210:
5038 case WM_T_I211:
5039 if (sc->sc_type == WM_T_82571) {
5040 /* Only 82571 shares port 0 */
5041 mask = EEMNGCTL_CFGDONE_0;
5042 } else
5043 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5044 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5045 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5046 break;
5047 delay(1000);
5048 }
5049 if (i >= WM_PHY_CFG_TIMEOUT) {
5050 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5051 device_xname(sc->sc_dev), __func__));
5052 }
5053 break;
5054 case WM_T_ICH8:
5055 case WM_T_ICH9:
5056 case WM_T_ICH10:
5057 case WM_T_PCH:
5058 case WM_T_PCH2:
5059 case WM_T_PCH_LPT:
5060 delay(10*1000);
5061 if (sc->sc_type >= WM_T_ICH10)
5062 wm_lan_init_done(sc);
5063 else
5064 wm_get_auto_rd_done(sc);
5065
5066 reg = CSR_READ(sc, WMREG_STATUS);
5067 if ((reg & STATUS_PHYRA) != 0)
5068 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5069 break;
5070 default:
5071 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5072 __func__);
5073 break;
5074 }
5075 }
5076
5077 /*
5078 * wm_acquire_eeprom:
5079 *
5080 * Perform the EEPROM handshake required on some chips.
5081 */
5082 static int
5083 wm_acquire_eeprom(struct wm_softc *sc)
5084 {
5085 uint32_t reg;
5086 int x;
5087 int ret = 0;
5088
5089 /* always success */
5090 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5091 return 0;
5092
5093 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5094 ret = wm_get_swfwhw_semaphore(sc);
5095 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5096 /* this will also do wm_get_swsm_semaphore() if needed */
5097 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5098 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5099 ret = wm_get_swsm_semaphore(sc);
5100 }
5101
5102 if (ret) {
5103 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5104 __func__);
5105 return 1;
5106 }
5107
5108 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5109 reg = CSR_READ(sc, WMREG_EECD);
5110
5111 /* Request EEPROM access. */
5112 reg |= EECD_EE_REQ;
5113 CSR_WRITE(sc, WMREG_EECD, reg);
5114
5115 /* ..and wait for it to be granted. */
5116 for (x = 0; x < 1000; x++) {
5117 reg = CSR_READ(sc, WMREG_EECD);
5118 if (reg & EECD_EE_GNT)
5119 break;
5120 delay(5);
5121 }
5122 if ((reg & EECD_EE_GNT) == 0) {
5123 aprint_error_dev(sc->sc_dev,
5124 "could not acquire EEPROM GNT\n");
5125 reg &= ~EECD_EE_REQ;
5126 CSR_WRITE(sc, WMREG_EECD, reg);
5127 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5128 wm_put_swfwhw_semaphore(sc);
5129 if (sc->sc_flags & WM_F_SWFW_SYNC)
5130 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5131 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5132 wm_put_swsm_semaphore(sc);
5133 return 1;
5134 }
5135 }
5136
5137 return 0;
5138 }
5139
5140 /*
5141 * wm_release_eeprom:
5142 *
5143 * Release the EEPROM mutex.
5144 */
5145 static void
5146 wm_release_eeprom(struct wm_softc *sc)
5147 {
5148 uint32_t reg;
5149
5150 /* always success */
5151 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5152 return;
5153
5154 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5155 reg = CSR_READ(sc, WMREG_EECD);
5156 reg &= ~EECD_EE_REQ;
5157 CSR_WRITE(sc, WMREG_EECD, reg);
5158 }
5159
5160 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5161 wm_put_swfwhw_semaphore(sc);
5162 if (sc->sc_flags & WM_F_SWFW_SYNC)
5163 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5164 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5165 wm_put_swsm_semaphore(sc);
5166 }
5167
5168 /*
5169 * wm_eeprom_sendbits:
5170 *
5171 * Send a series of bits to the EEPROM.
5172 */
5173 static void
5174 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5175 {
5176 uint32_t reg;
5177 int x;
5178
5179 reg = CSR_READ(sc, WMREG_EECD);
5180
5181 for (x = nbits; x > 0; x--) {
5182 if (bits & (1U << (x - 1)))
5183 reg |= EECD_DI;
5184 else
5185 reg &= ~EECD_DI;
5186 CSR_WRITE(sc, WMREG_EECD, reg);
5187 delay(2);
5188 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5189 delay(2);
5190 CSR_WRITE(sc, WMREG_EECD, reg);
5191 delay(2);
5192 }
5193 }
5194
5195 /*
5196 * wm_eeprom_recvbits:
5197 *
5198 * Receive a series of bits from the EEPROM.
5199 */
5200 static void
5201 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5202 {
5203 uint32_t reg, val;
5204 int x;
5205
5206 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5207
5208 val = 0;
5209 for (x = nbits; x > 0; x--) {
5210 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5211 delay(2);
5212 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5213 val |= (1U << (x - 1));
5214 CSR_WRITE(sc, WMREG_EECD, reg);
5215 delay(2);
5216 }
5217 *valp = val;
5218 }
5219
5220 /*
5221 * wm_read_eeprom_uwire:
5222 *
5223 * Read a word from the EEPROM using the MicroWire protocol.
5224 */
5225 static int
5226 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5227 {
5228 uint32_t reg, val;
5229 int i;
5230
5231 for (i = 0; i < wordcnt; i++) {
5232 /* Clear SK and DI. */
5233 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5234 CSR_WRITE(sc, WMREG_EECD, reg);
5235
5236 /*
5237 * XXX: workaround for a bug in qemu-0.12.x and prior
5238 * and Xen.
5239 *
5240 * We use this workaround only for 82540 because qemu's
5241 * e1000 act as 82540.
5242 */
5243 if (sc->sc_type == WM_T_82540) {
5244 reg |= EECD_SK;
5245 CSR_WRITE(sc, WMREG_EECD, reg);
5246 reg &= ~EECD_SK;
5247 CSR_WRITE(sc, WMREG_EECD, reg);
5248 delay(2);
5249 }
5250 /* XXX: end of workaround */
5251
5252 /* Set CHIP SELECT. */
5253 reg |= EECD_CS;
5254 CSR_WRITE(sc, WMREG_EECD, reg);
5255 delay(2);
5256
5257 /* Shift in the READ command. */
5258 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5259
5260 /* Shift in address. */
5261 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5262
5263 /* Shift out the data. */
5264 wm_eeprom_recvbits(sc, &val, 16);
5265 data[i] = val & 0xffff;
5266
5267 /* Clear CHIP SELECT. */
5268 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5269 CSR_WRITE(sc, WMREG_EECD, reg);
5270 delay(2);
5271 }
5272
5273 return 0;
5274 }
5275
5276 /*
5277 * wm_spi_eeprom_ready:
5278 *
5279 * Wait for a SPI EEPROM to be ready for commands.
5280 */
5281 static int
5282 wm_spi_eeprom_ready(struct wm_softc *sc)
5283 {
5284 uint32_t val;
5285 int usec;
5286
5287 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5288 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5289 wm_eeprom_recvbits(sc, &val, 8);
5290 if ((val & SPI_SR_RDY) == 0)
5291 break;
5292 }
5293 if (usec >= SPI_MAX_RETRIES) {
5294 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5295 return 1;
5296 }
5297 return 0;
5298 }
5299
5300 /*
5301 * wm_read_eeprom_spi:
5302 *
5303 * Read a work from the EEPROM using the SPI protocol.
5304 */
5305 static int
5306 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5307 {
5308 uint32_t reg, val;
5309 int i;
5310 uint8_t opc;
5311
5312 /* Clear SK and CS. */
5313 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5314 CSR_WRITE(sc, WMREG_EECD, reg);
5315 delay(2);
5316
5317 if (wm_spi_eeprom_ready(sc))
5318 return 1;
5319
5320 /* Toggle CS to flush commands. */
5321 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5322 delay(2);
5323 CSR_WRITE(sc, WMREG_EECD, reg);
5324 delay(2);
5325
5326 opc = SPI_OPC_READ;
5327 if (sc->sc_ee_addrbits == 8 && word >= 128)
5328 opc |= SPI_OPC_A8;
5329
5330 wm_eeprom_sendbits(sc, opc, 8);
5331 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5332
5333 for (i = 0; i < wordcnt; i++) {
5334 wm_eeprom_recvbits(sc, &val, 16);
5335 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5336 }
5337
5338 /* Raise CS and clear SK. */
5339 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5340 CSR_WRITE(sc, WMREG_EECD, reg);
5341 delay(2);
5342
5343 return 0;
5344 }
5345
5346 #define NVM_CHECKSUM 0xBABA
5347 #define EEPROM_SIZE 0x0040
5348 #define NVM_COMPAT 0x0003
5349 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
5350 #define NVM_FUTURE_INIT_WORD1 0x0019
5351 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
5352
5353 /*
5354 * wm_validate_eeprom_checksum
5355 *
5356 * The checksum is defined as the sum of the first 64 (16 bit) words.
5357 */
5358 static int
5359 wm_validate_eeprom_checksum(struct wm_softc *sc)
5360 {
5361 uint16_t checksum, valid_checksum;
5362 uint16_t eeprom_data;
5363 uint16_t csum_wordaddr;
5364 int i;
5365
5366 checksum = 0;
5367
5368 /* Don't check for I211 */
5369 if (sc->sc_type == WM_T_I211)
5370 return 0;
5371
5372 if (sc->sc_type == WM_T_PCH_LPT) {
5373 csum_wordaddr = NVM_COMPAT;
5374 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5375 } else {
5376 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5377 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5378 }
5379
5380 #ifdef WM_DEBUG
5381 /* Dump EEPROM image for debug */
5382 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5383 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5384 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5385 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5386 if ((eeprom_data & valid_checksum) == 0) {
5387 DPRINTF(WM_DEBUG_NVM,
5388 ("%s: NVM need to be updated (%04x != %04x)\n",
5389 device_xname(sc->sc_dev), eeprom_data,
5390 valid_checksum));
5391 }
5392 }
5393
5394 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5395 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5396 for (i = 0; i < EEPROM_SIZE; i++) {
5397 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5398 printf("XX ");
5399 else
5400 printf("%04x ", eeprom_data);
5401 if (i % 8 == 7)
5402 printf("\n");
5403 }
5404 }
5405
5406 #endif /* WM_DEBUG */
5407
5408 for (i = 0; i < EEPROM_SIZE; i++) {
5409 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5410 return 1;
5411 checksum += eeprom_data;
5412 }
5413
5414 if (checksum != (uint16_t) NVM_CHECKSUM) {
5415 #ifdef WM_DEBUG
5416 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5417 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5418 #endif
5419 }
5420
5421 return 0;
5422 }
5423
5424 /*
5425 * wm_read_eeprom:
5426 *
5427 * Read data from the serial EEPROM.
5428 */
5429 static int
5430 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5431 {
5432 int rv;
5433
5434 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5435 return 1;
5436
5437 if (wm_acquire_eeprom(sc))
5438 return 1;
5439
5440 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5441 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5442 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5443 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5444 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5445 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5446 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5447 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5448 else
5449 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5450
5451 wm_release_eeprom(sc);
5452 return rv;
5453 }
5454
5455 static int
5456 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5457 uint16_t *data)
5458 {
5459 int i, eerd = 0;
5460 int error = 0;
5461
5462 for (i = 0; i < wordcnt; i++) {
5463 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5464
5465 CSR_WRITE(sc, WMREG_EERD, eerd);
5466 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5467 if (error != 0)
5468 break;
5469
5470 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5471 }
5472
5473 return error;
5474 }
5475
5476 static int
5477 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5478 {
5479 uint32_t attempts = 100000;
5480 uint32_t i, reg = 0;
5481 int32_t done = -1;
5482
5483 for (i = 0; i < attempts; i++) {
5484 reg = CSR_READ(sc, rw);
5485
5486 if (reg & EERD_DONE) {
5487 done = 0;
5488 break;
5489 }
5490 delay(5);
5491 }
5492
5493 return done;
5494 }
5495
5496 static int
5497 wm_check_alt_mac_addr(struct wm_softc *sc)
5498 {
5499 uint16_t myea[ETHER_ADDR_LEN / 2];
5500 uint16_t offset = EEPROM_OFF_MACADDR;
5501
5502 /* Try to read alternative MAC address pointer */
5503 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5504 return -1;
5505
5506 /* Check pointer */
5507 if (offset == 0xffff)
5508 return -1;
5509
5510 /*
5511 * Check whether alternative MAC address is valid or not.
5512 * Some cards have non 0xffff pointer but those don't use
5513 * alternative MAC address in reality.
5514 *
5515 * Check whether the broadcast bit is set or not.
5516 */
5517 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5518 if (((myea[0] & 0xff) & 0x01) == 0)
5519 return 0; /* found! */
5520
5521 /* not found */
5522 return -1;
5523 }
5524
5525 static int
5526 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5527 {
5528 uint16_t myea[ETHER_ADDR_LEN / 2];
5529 uint16_t offset = EEPROM_OFF_MACADDR;
5530 int do_invert = 0;
5531
5532 switch (sc->sc_type) {
5533 case WM_T_82580:
5534 case WM_T_82580ER:
5535 case WM_T_I350:
5536 switch (sc->sc_funcid) {
5537 case 0:
5538 /* default value (== EEPROM_OFF_MACADDR) */
5539 break;
5540 case 1:
5541 offset = EEPROM_OFF_LAN1;
5542 break;
5543 case 2:
5544 offset = EEPROM_OFF_LAN2;
5545 break;
5546 case 3:
5547 offset = EEPROM_OFF_LAN3;
5548 break;
5549 default:
5550 goto bad;
5551 /* NOTREACHED */
5552 break;
5553 }
5554 break;
5555 case WM_T_82571:
5556 case WM_T_82575:
5557 case WM_T_82576:
5558 case WM_T_80003:
5559 case WM_T_I210:
5560 case WM_T_I211:
5561 if (wm_check_alt_mac_addr(sc) != 0) {
5562 /* reset the offset to LAN0 */
5563 offset = EEPROM_OFF_MACADDR;
5564 if ((sc->sc_funcid & 0x01) == 1)
5565 do_invert = 1;
5566 goto do_read;
5567 }
5568 switch (sc->sc_funcid) {
5569 case 0:
5570 /*
5571 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5572 * itself.
5573 */
5574 break;
5575 case 1:
5576 offset += EEPROM_OFF_MACADDR_LAN1;
5577 break;
5578 case 2:
5579 offset += EEPROM_OFF_MACADDR_LAN2;
5580 break;
5581 case 3:
5582 offset += EEPROM_OFF_MACADDR_LAN3;
5583 break;
5584 default:
5585 goto bad;
5586 /* NOTREACHED */
5587 break;
5588 }
5589 break;
5590 default:
5591 if ((sc->sc_funcid & 0x01) == 1)
5592 do_invert = 1;
5593 break;
5594 }
5595
5596 do_read:
5597 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5598 myea) != 0) {
5599 goto bad;
5600 }
5601
5602 enaddr[0] = myea[0] & 0xff;
5603 enaddr[1] = myea[0] >> 8;
5604 enaddr[2] = myea[1] & 0xff;
5605 enaddr[3] = myea[1] >> 8;
5606 enaddr[4] = myea[2] & 0xff;
5607 enaddr[5] = myea[2] >> 8;
5608
5609 /*
5610 * Toggle the LSB of the MAC address on the second port
5611 * of some dual port cards.
5612 */
5613 if (do_invert != 0)
5614 enaddr[5] ^= 1;
5615
5616 return 0;
5617
5618 bad:
5619 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5620
5621 return -1;
5622 }
5623
5624 /*
5625 * wm_add_rxbuf:
5626 *
5627 * Add a receive buffer to the indiciated descriptor.
5628 */
5629 static int
5630 wm_add_rxbuf(struct wm_softc *sc, int idx)
5631 {
5632 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5633 struct mbuf *m;
5634 int error;
5635
5636 MGETHDR(m, M_DONTWAIT, MT_DATA);
5637 if (m == NULL)
5638 return ENOBUFS;
5639
5640 MCLGET(m, M_DONTWAIT);
5641 if ((m->m_flags & M_EXT) == 0) {
5642 m_freem(m);
5643 return ENOBUFS;
5644 }
5645
5646 if (rxs->rxs_mbuf != NULL)
5647 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5648
5649 rxs->rxs_mbuf = m;
5650
5651 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5652 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5653 BUS_DMA_READ|BUS_DMA_NOWAIT);
5654 if (error) {
5655 /* XXX XXX XXX */
5656 aprint_error_dev(sc->sc_dev,
5657 "unable to load rx DMA map %d, error = %d\n",
5658 idx, error);
5659 panic("wm_add_rxbuf");
5660 }
5661
5662 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5663 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5664
5665 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5666 if ((sc->sc_rctl & RCTL_EN) != 0)
5667 WM_INIT_RXDESC(sc, idx);
5668 } else
5669 WM_INIT_RXDESC(sc, idx);
5670
5671 return 0;
5672 }
5673
5674 /*
5675 * wm_set_ral:
5676 *
5677 * Set an entery in the receive address list.
5678 */
5679 static void
5680 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5681 {
5682 uint32_t ral_lo, ral_hi;
5683
5684 if (enaddr != NULL) {
5685 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5686 (enaddr[3] << 24);
5687 ral_hi = enaddr[4] | (enaddr[5] << 8);
5688 ral_hi |= RAL_AV;
5689 } else {
5690 ral_lo = 0;
5691 ral_hi = 0;
5692 }
5693
5694 if (sc->sc_type >= WM_T_82544) {
5695 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5696 ral_lo);
5697 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5698 ral_hi);
5699 } else {
5700 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5701 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5702 }
5703 }
5704
5705 /*
5706 * wm_mchash:
5707 *
5708 * Compute the hash of the multicast address for the 4096-bit
5709 * multicast filter.
5710 */
5711 static uint32_t
5712 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5713 {
5714 static const int lo_shift[4] = { 4, 3, 2, 0 };
5715 static const int hi_shift[4] = { 4, 5, 6, 8 };
5716 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5717 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5718 uint32_t hash;
5719
5720 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5721 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5722 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5723 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5724 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5725 return (hash & 0x3ff);
5726 }
5727 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5728 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5729
5730 return (hash & 0xfff);
5731 }
5732
5733 /*
5734 * wm_set_filter:
5735 *
5736 * Set up the receive filter.
5737 */
5738 static void
5739 wm_set_filter(struct wm_softc *sc)
5740 {
5741 struct ethercom *ec = &sc->sc_ethercom;
5742 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5743 struct ether_multi *enm;
5744 struct ether_multistep step;
5745 bus_addr_t mta_reg;
5746 uint32_t hash, reg, bit;
5747 int i, size;
5748
5749 if (sc->sc_type >= WM_T_82544)
5750 mta_reg = WMREG_CORDOVA_MTA;
5751 else
5752 mta_reg = WMREG_MTA;
5753
5754 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5755
5756 if (ifp->if_flags & IFF_BROADCAST)
5757 sc->sc_rctl |= RCTL_BAM;
5758 if (ifp->if_flags & IFF_PROMISC) {
5759 sc->sc_rctl |= RCTL_UPE;
5760 goto allmulti;
5761 }
5762
5763 /*
5764 * Set the station address in the first RAL slot, and
5765 * clear the remaining slots.
5766 */
5767 if (sc->sc_type == WM_T_ICH8)
5768 size = WM_RAL_TABSIZE_ICH8 -1;
5769 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5770 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5771 || (sc->sc_type == WM_T_PCH_LPT))
5772 size = WM_RAL_TABSIZE_ICH8;
5773 else if (sc->sc_type == WM_T_82575)
5774 size = WM_RAL_TABSIZE_82575;
5775 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5776 size = WM_RAL_TABSIZE_82576;
5777 else if (sc->sc_type == WM_T_I350)
5778 size = WM_RAL_TABSIZE_I350;
5779 else
5780 size = WM_RAL_TABSIZE;
5781 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5782 for (i = 1; i < size; i++)
5783 wm_set_ral(sc, NULL, i);
5784
5785 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5786 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5787 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5788 size = WM_ICH8_MC_TABSIZE;
5789 else
5790 size = WM_MC_TABSIZE;
5791 /* Clear out the multicast table. */
5792 for (i = 0; i < size; i++)
5793 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5794
5795 ETHER_FIRST_MULTI(step, ec, enm);
5796 while (enm != NULL) {
5797 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5798 /*
5799 * We must listen to a range of multicast addresses.
5800 * For now, just accept all multicasts, rather than
5801 * trying to set only those filter bits needed to match
5802 * the range. (At this time, the only use of address
5803 * ranges is for IP multicast routing, for which the
5804 * range is big enough to require all bits set.)
5805 */
5806 goto allmulti;
5807 }
5808
5809 hash = wm_mchash(sc, enm->enm_addrlo);
5810
5811 reg = (hash >> 5);
5812 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5813 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5814 || (sc->sc_type == WM_T_PCH2)
5815 || (sc->sc_type == WM_T_PCH_LPT))
5816 reg &= 0x1f;
5817 else
5818 reg &= 0x7f;
5819 bit = hash & 0x1f;
5820
5821 hash = CSR_READ(sc, mta_reg + (reg << 2));
5822 hash |= 1U << bit;
5823
5824 /* XXX Hardware bug?? */
5825 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5826 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5827 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5828 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5829 } else
5830 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5831
5832 ETHER_NEXT_MULTI(step, enm);
5833 }
5834
5835 ifp->if_flags &= ~IFF_ALLMULTI;
5836 goto setit;
5837
5838 allmulti:
5839 ifp->if_flags |= IFF_ALLMULTI;
5840 sc->sc_rctl |= RCTL_MPE;
5841
5842 setit:
5843 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5844 }
5845
5846 /*
5847 * wm_tbi_mediainit:
5848 *
5849 * Initialize media for use on 1000BASE-X devices.
5850 */
5851 static void
5852 wm_tbi_mediainit(struct wm_softc *sc)
5853 {
5854 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5855 const char *sep = "";
5856
5857 if (sc->sc_type < WM_T_82543)
5858 sc->sc_tipg = TIPG_WM_DFLT;
5859 else
5860 sc->sc_tipg = TIPG_LG_DFLT;
5861
5862 sc->sc_tbi_anegticks = 5;
5863
5864 /* Initialize our media structures */
5865 sc->sc_mii.mii_ifp = ifp;
5866
5867 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5868 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5869 wm_tbi_mediastatus);
5870
5871 /*
5872 * SWD Pins:
5873 *
5874 * 0 = Link LED (output)
5875 * 1 = Loss Of Signal (input)
5876 */
5877 sc->sc_ctrl |= CTRL_SWDPIO(0);
5878 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5879
5880 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5881
5882 #define ADD(ss, mm, dd) \
5883 do { \
5884 aprint_normal("%s%s", sep, ss); \
5885 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5886 sep = ", "; \
5887 } while (/*CONSTCOND*/0)
5888
5889 aprint_normal_dev(sc->sc_dev, "");
5890 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5891 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5892 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5893 aprint_normal("\n");
5894
5895 #undef ADD
5896
5897 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5898 }
5899
5900 /*
5901 * wm_tbi_mediastatus: [ifmedia interface function]
5902 *
5903 * Get the current interface media status on a 1000BASE-X device.
5904 */
5905 static void
5906 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5907 {
5908 struct wm_softc *sc = ifp->if_softc;
5909 uint32_t ctrl, status;
5910
5911 ifmr->ifm_status = IFM_AVALID;
5912 ifmr->ifm_active = IFM_ETHER;
5913
5914 status = CSR_READ(sc, WMREG_STATUS);
5915 if ((status & STATUS_LU) == 0) {
5916 ifmr->ifm_active |= IFM_NONE;
5917 return;
5918 }
5919
5920 ifmr->ifm_status |= IFM_ACTIVE;
5921 ifmr->ifm_active |= IFM_1000_SX;
5922 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5923 ifmr->ifm_active |= IFM_FDX;
5924 ctrl = CSR_READ(sc, WMREG_CTRL);
5925 if (ctrl & CTRL_RFCE)
5926 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5927 if (ctrl & CTRL_TFCE)
5928 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5929 }
5930
5931 /*
5932 * wm_tbi_mediachange: [ifmedia interface function]
5933 *
5934 * Set hardware to newly-selected media on a 1000BASE-X device.
5935 */
5936 static int
5937 wm_tbi_mediachange(struct ifnet *ifp)
5938 {
5939 struct wm_softc *sc = ifp->if_softc;
5940 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5941 uint32_t status;
5942 int i;
5943
5944 sc->sc_txcw = 0;
5945 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5946 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5947 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5948 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5949 sc->sc_txcw |= TXCW_ANE;
5950 } else {
5951 /*
5952 * If autonegotiation is turned off, force link up and turn on
5953 * full duplex
5954 */
5955 sc->sc_txcw &= ~TXCW_ANE;
5956 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5957 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5958 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5959 delay(1000);
5960 }
5961
5962 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5963 device_xname(sc->sc_dev),sc->sc_txcw));
5964 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5965 delay(10000);
5966
5967 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5968 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5969
5970 /*
5971 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5972 * optics detect a signal, 0 if they don't.
5973 */
5974 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5975 /* Have signal; wait for the link to come up. */
5976
5977 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5978 /*
5979 * Reset the link, and let autonegotiation do its thing
5980 */
5981 sc->sc_ctrl |= CTRL_LRST;
5982 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5983 delay(1000);
5984 sc->sc_ctrl &= ~CTRL_LRST;
5985 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5986 delay(1000);
5987 }
5988
5989 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5990 delay(10000);
5991 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5992 break;
5993 }
5994
5995 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5996 device_xname(sc->sc_dev),i));
5997
5998 status = CSR_READ(sc, WMREG_STATUS);
5999 DPRINTF(WM_DEBUG_LINK,
6000 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6001 device_xname(sc->sc_dev),status, STATUS_LU));
6002 if (status & STATUS_LU) {
6003 /* Link is up. */
6004 DPRINTF(WM_DEBUG_LINK,
6005 ("%s: LINK: set media -> link up %s\n",
6006 device_xname(sc->sc_dev),
6007 (status & STATUS_FD) ? "FDX" : "HDX"));
6008
6009 /*
6010 * NOTE: CTRL will update TFCE and RFCE automatically,
6011 * so we should update sc->sc_ctrl
6012 */
6013 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6014 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6015 sc->sc_fcrtl &= ~FCRTL_XONE;
6016 if (status & STATUS_FD)
6017 sc->sc_tctl |=
6018 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6019 else
6020 sc->sc_tctl |=
6021 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6022 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6023 sc->sc_fcrtl |= FCRTL_XONE;
6024 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6025 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6026 WMREG_OLD_FCRTL : WMREG_FCRTL,
6027 sc->sc_fcrtl);
6028 sc->sc_tbi_linkup = 1;
6029 } else {
6030 if (i == WM_LINKUP_TIMEOUT)
6031 wm_check_for_link(sc);
6032 /* Link is down. */
6033 DPRINTF(WM_DEBUG_LINK,
6034 ("%s: LINK: set media -> link down\n",
6035 device_xname(sc->sc_dev)));
6036 sc->sc_tbi_linkup = 0;
6037 }
6038 } else {
6039 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6040 device_xname(sc->sc_dev)));
6041 sc->sc_tbi_linkup = 0;
6042 }
6043
6044 wm_tbi_set_linkled(sc);
6045
6046 return 0;
6047 }
6048
6049 /*
6050 * wm_tbi_set_linkled:
6051 *
6052 * Update the link LED on 1000BASE-X devices.
6053 */
6054 static void
6055 wm_tbi_set_linkled(struct wm_softc *sc)
6056 {
6057
6058 if (sc->sc_tbi_linkup)
6059 sc->sc_ctrl |= CTRL_SWDPIN(0);
6060 else
6061 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6062
6063 /* 82540 or newer devices are active low */
6064 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6065
6066 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6067 }
6068
6069 /*
6070 * wm_tbi_check_link:
6071 *
6072 * Check the link on 1000BASE-X devices.
6073 */
6074 static void
6075 wm_tbi_check_link(struct wm_softc *sc)
6076 {
6077 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6078 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6079 uint32_t rxcw, ctrl, status;
6080
6081 status = CSR_READ(sc, WMREG_STATUS);
6082
6083 rxcw = CSR_READ(sc, WMREG_RXCW);
6084 ctrl = CSR_READ(sc, WMREG_CTRL);
6085
6086 /* set link status */
6087 if ((status & STATUS_LU) == 0) {
6088 DPRINTF(WM_DEBUG_LINK,
6089 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6090 sc->sc_tbi_linkup = 0;
6091 } else if (sc->sc_tbi_linkup == 0) {
6092 DPRINTF(WM_DEBUG_LINK,
6093 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6094 (status & STATUS_FD) ? "FDX" : "HDX"));
6095 sc->sc_tbi_linkup = 1;
6096 }
6097
6098 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6099 && ((status & STATUS_LU) == 0)) {
6100 sc->sc_tbi_linkup = 0;
6101 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6102 /* RXCFG storm! */
6103 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6104 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6105 wm_init(ifp);
6106 ifp->if_start(ifp);
6107 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6108 /* If the timer expired, retry autonegotiation */
6109 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6110 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6111 sc->sc_tbi_ticks = 0;
6112 /*
6113 * Reset the link, and let autonegotiation do
6114 * its thing
6115 */
6116 sc->sc_ctrl |= CTRL_LRST;
6117 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6118 delay(1000);
6119 sc->sc_ctrl &= ~CTRL_LRST;
6120 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6121 delay(1000);
6122 CSR_WRITE(sc, WMREG_TXCW,
6123 sc->sc_txcw & ~TXCW_ANE);
6124 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6125 }
6126 }
6127 }
6128
6129 wm_tbi_set_linkled(sc);
6130 }
6131
6132 /*
6133 * wm_gmii_reset:
6134 *
6135 * Reset the PHY.
6136 */
6137 static void
6138 wm_gmii_reset(struct wm_softc *sc)
6139 {
6140 uint32_t reg;
6141 int rv;
6142
6143 /* get phy semaphore */
6144 switch (sc->sc_type) {
6145 case WM_T_82571:
6146 case WM_T_82572:
6147 case WM_T_82573:
6148 case WM_T_82574:
6149 case WM_T_82583:
6150 /* XXX should get sw semaphore, too */
6151 rv = wm_get_swsm_semaphore(sc);
6152 break;
6153 case WM_T_82575:
6154 case WM_T_82576:
6155 case WM_T_82580:
6156 case WM_T_82580ER:
6157 case WM_T_I350:
6158 case WM_T_I210:
6159 case WM_T_I211:
6160 case WM_T_80003:
6161 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6162 break;
6163 case WM_T_ICH8:
6164 case WM_T_ICH9:
6165 case WM_T_ICH10:
6166 case WM_T_PCH:
6167 case WM_T_PCH2:
6168 case WM_T_PCH_LPT:
6169 rv = wm_get_swfwhw_semaphore(sc);
6170 break;
6171 default:
6172 /* nothing to do*/
6173 rv = 0;
6174 break;
6175 }
6176 if (rv != 0) {
6177 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6178 __func__);
6179 return;
6180 }
6181
6182 switch (sc->sc_type) {
6183 case WM_T_82542_2_0:
6184 case WM_T_82542_2_1:
6185 /* null */
6186 break;
6187 case WM_T_82543:
6188 /*
6189 * With 82543, we need to force speed and duplex on the MAC
6190 * equal to what the PHY speed and duplex configuration is.
6191 * In addition, we need to perform a hardware reset on the PHY
6192 * to take it out of reset.
6193 */
6194 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6195 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6196
6197 /* The PHY reset pin is active-low. */
6198 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6199 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6200 CTRL_EXT_SWDPIN(4));
6201 reg |= CTRL_EXT_SWDPIO(4);
6202
6203 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6204 delay(10*1000);
6205
6206 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6207 delay(150);
6208 #if 0
6209 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6210 #endif
6211 delay(20*1000); /* XXX extra delay to get PHY ID? */
6212 break;
6213 case WM_T_82544: /* reset 10000us */
6214 case WM_T_82540:
6215 case WM_T_82545:
6216 case WM_T_82545_3:
6217 case WM_T_82546:
6218 case WM_T_82546_3:
6219 case WM_T_82541:
6220 case WM_T_82541_2:
6221 case WM_T_82547:
6222 case WM_T_82547_2:
6223 case WM_T_82571: /* reset 100us */
6224 case WM_T_82572:
6225 case WM_T_82573:
6226 case WM_T_82574:
6227 case WM_T_82575:
6228 case WM_T_82576:
6229 case WM_T_82580:
6230 case WM_T_82580ER:
6231 case WM_T_I350:
6232 case WM_T_I210:
6233 case WM_T_I211:
6234 case WM_T_82583:
6235 case WM_T_80003:
6236 /* generic reset */
6237 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6238 delay(20000);
6239 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6240 delay(20000);
6241
6242 if ((sc->sc_type == WM_T_82541)
6243 || (sc->sc_type == WM_T_82541_2)
6244 || (sc->sc_type == WM_T_82547)
6245 || (sc->sc_type == WM_T_82547_2)) {
6246 /* workaround for igp are done in igp_reset() */
6247 /* XXX add code to set LED after phy reset */
6248 }
6249 break;
6250 case WM_T_ICH8:
6251 case WM_T_ICH9:
6252 case WM_T_ICH10:
6253 case WM_T_PCH:
6254 case WM_T_PCH2:
6255 case WM_T_PCH_LPT:
6256 /* generic reset */
6257 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6258 delay(100);
6259 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6260 delay(150);
6261 break;
6262 default:
6263 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6264 __func__);
6265 break;
6266 }
6267
6268 /* release PHY semaphore */
6269 switch (sc->sc_type) {
6270 case WM_T_82571:
6271 case WM_T_82572:
6272 case WM_T_82573:
6273 case WM_T_82574:
6274 case WM_T_82583:
6275 /* XXX should put sw semaphore, too */
6276 wm_put_swsm_semaphore(sc);
6277 break;
6278 case WM_T_82575:
6279 case WM_T_82576:
6280 case WM_T_82580:
6281 case WM_T_82580ER:
6282 case WM_T_I350:
6283 case WM_T_I210:
6284 case WM_T_I211:
6285 case WM_T_80003:
6286 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6287 break;
6288 case WM_T_ICH8:
6289 case WM_T_ICH9:
6290 case WM_T_ICH10:
6291 case WM_T_PCH:
6292 case WM_T_PCH2:
6293 case WM_T_PCH_LPT:
6294 wm_put_swfwhw_semaphore(sc);
6295 break;
6296 default:
6297 /* nothing to do*/
6298 rv = 0;
6299 break;
6300 }
6301
6302 /* get_cfg_done */
6303 wm_get_cfg_done(sc);
6304
6305 /* extra setup */
6306 switch (sc->sc_type) {
6307 case WM_T_82542_2_0:
6308 case WM_T_82542_2_1:
6309 case WM_T_82543:
6310 case WM_T_82544:
6311 case WM_T_82540:
6312 case WM_T_82545:
6313 case WM_T_82545_3:
6314 case WM_T_82546:
6315 case WM_T_82546_3:
6316 case WM_T_82541_2:
6317 case WM_T_82547_2:
6318 case WM_T_82571:
6319 case WM_T_82572:
6320 case WM_T_82573:
6321 case WM_T_82574:
6322 case WM_T_82575:
6323 case WM_T_82576:
6324 case WM_T_82580:
6325 case WM_T_82580ER:
6326 case WM_T_I350:
6327 case WM_T_I210:
6328 case WM_T_I211:
6329 case WM_T_82583:
6330 case WM_T_80003:
6331 /* null */
6332 break;
6333 case WM_T_82541:
6334 case WM_T_82547:
6335 /* XXX Configure actively LED after PHY reset */
6336 break;
6337 case WM_T_ICH8:
6338 case WM_T_ICH9:
6339 case WM_T_ICH10:
6340 case WM_T_PCH:
6341 case WM_T_PCH2:
6342 case WM_T_PCH_LPT:
6343 /* Allow time for h/w to get to a quiescent state afer reset */
6344 delay(10*1000);
6345
6346 if (sc->sc_type == WM_T_PCH)
6347 wm_hv_phy_workaround_ich8lan(sc);
6348
6349 if (sc->sc_type == WM_T_PCH2)
6350 wm_lv_phy_workaround_ich8lan(sc);
6351
6352 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6353 /*
6354 * dummy read to clear the phy wakeup bit after lcd
6355 * reset
6356 */
6357 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6358 }
6359
6360 /*
6361 * XXX Configure the LCD with th extended configuration region
6362 * in NVM
6363 */
6364
6365 /* Configure the LCD with the OEM bits in NVM */
6366 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6367 || (sc->sc_type == WM_T_PCH_LPT)) {
6368 /*
6369 * Disable LPLU.
6370 * XXX It seems that 82567 has LPLU, too.
6371 */
6372 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6373 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6374 reg |= HV_OEM_BITS_ANEGNOW;
6375 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6376 }
6377 break;
6378 default:
6379 panic("%s: unknown type\n", __func__);
6380 break;
6381 }
6382 }
6383
6384 /*
6385 * wm_gmii_mediainit:
6386 *
6387 * Initialize media for use on 1000BASE-T devices.
6388 */
6389 static void
6390 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6391 {
6392 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6393 struct mii_data *mii = &sc->sc_mii;
6394
6395 /* We have MII. */
6396 sc->sc_flags |= WM_F_HAS_MII;
6397
6398 if (sc->sc_type == WM_T_80003)
6399 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6400 else
6401 sc->sc_tipg = TIPG_1000T_DFLT;
6402
6403 /*
6404 * Let the chip set speed/duplex on its own based on
6405 * signals from the PHY.
6406 * XXXbouyer - I'm not sure this is right for the 80003,
6407 * the em driver only sets CTRL_SLU here - but it seems to work.
6408 */
6409 sc->sc_ctrl |= CTRL_SLU;
6410 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6411
6412 /* Initialize our media structures and probe the GMII. */
6413 mii->mii_ifp = ifp;
6414
6415 /*
6416 * Determine the PHY access method.
6417 *
6418 * For SGMII, use SGMII specific method.
6419 *
6420 * For some devices, we can determine the PHY access method
6421 * from sc_type.
6422 *
6423 * For ICH8 variants, it's difficult to detemine the PHY access
6424 * method by sc_type, so use the PCI product ID for some devices.
6425 * For other ICH8 variants, try to use igp's method. If the PHY
6426 * can't detect, then use bm's method.
6427 */
6428 switch (prodid) {
6429 case PCI_PRODUCT_INTEL_PCH_M_LM:
6430 case PCI_PRODUCT_INTEL_PCH_M_LC:
6431 /* 82577 */
6432 sc->sc_phytype = WMPHY_82577;
6433 mii->mii_readreg = wm_gmii_hv_readreg;
6434 mii->mii_writereg = wm_gmii_hv_writereg;
6435 break;
6436 case PCI_PRODUCT_INTEL_PCH_D_DM:
6437 case PCI_PRODUCT_INTEL_PCH_D_DC:
6438 /* 82578 */
6439 sc->sc_phytype = WMPHY_82578;
6440 mii->mii_readreg = wm_gmii_hv_readreg;
6441 mii->mii_writereg = wm_gmii_hv_writereg;
6442 break;
6443 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6444 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6445 /* 82579 */
6446 sc->sc_phytype = WMPHY_82579;
6447 mii->mii_readreg = wm_gmii_hv_readreg;
6448 mii->mii_writereg = wm_gmii_hv_writereg;
6449 break;
6450 case PCI_PRODUCT_INTEL_I217_LM:
6451 case PCI_PRODUCT_INTEL_I217_V:
6452 case PCI_PRODUCT_INTEL_I218_LM:
6453 case PCI_PRODUCT_INTEL_I218_V:
6454 /* I21[78] */
6455 mii->mii_readreg = wm_gmii_hv_readreg;
6456 mii->mii_writereg = wm_gmii_hv_writereg;
6457 break;
6458 case PCI_PRODUCT_INTEL_82801I_BM:
6459 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6460 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6461 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6462 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6463 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6464 /* 82567 */
6465 sc->sc_phytype = WMPHY_BM;
6466 mii->mii_readreg = wm_gmii_bm_readreg;
6467 mii->mii_writereg = wm_gmii_bm_writereg;
6468 break;
6469 default:
6470 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6471 mii->mii_readreg = wm_sgmii_readreg;
6472 mii->mii_writereg = wm_sgmii_writereg;
6473 } else if (sc->sc_type >= WM_T_80003) {
6474 mii->mii_readreg = wm_gmii_i80003_readreg;
6475 mii->mii_writereg = wm_gmii_i80003_writereg;
6476 } else if (sc->sc_type >= WM_T_I210) {
6477 mii->mii_readreg = wm_gmii_i82544_readreg;
6478 mii->mii_writereg = wm_gmii_i82544_writereg;
6479 } else if (sc->sc_type >= WM_T_82580) {
6480 sc->sc_phytype = WMPHY_82580;
6481 mii->mii_readreg = wm_gmii_82580_readreg;
6482 mii->mii_writereg = wm_gmii_82580_writereg;
6483 } else if (sc->sc_type >= WM_T_82544) {
6484 mii->mii_readreg = wm_gmii_i82544_readreg;
6485 mii->mii_writereg = wm_gmii_i82544_writereg;
6486 } else {
6487 mii->mii_readreg = wm_gmii_i82543_readreg;
6488 mii->mii_writereg = wm_gmii_i82543_writereg;
6489 }
6490 break;
6491 }
6492 mii->mii_statchg = wm_gmii_statchg;
6493
6494 wm_gmii_reset(sc);
6495
6496 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6497 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6498 wm_gmii_mediastatus);
6499
6500 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6501 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6502 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6503 || (sc->sc_type == WM_T_I211)) {
6504 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6505 /* Attach only one port */
6506 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6507 MII_OFFSET_ANY, MIIF_DOPAUSE);
6508 } else {
6509 int i;
6510 uint32_t ctrl_ext;
6511
6512 /* Power on sgmii phy if it is disabled */
6513 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6514 CSR_WRITE(sc, WMREG_CTRL_EXT,
6515 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6516 CSR_WRITE_FLUSH(sc);
6517 delay(300*1000); /* XXX too long */
6518
6519 /* from 1 to 8 */
6520 for (i = 1; i < 8; i++)
6521 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6522 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6523
6524 /* restore previous sfp cage power state */
6525 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6526 }
6527 } else {
6528 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6529 MII_OFFSET_ANY, MIIF_DOPAUSE);
6530 }
6531
6532 /*
6533 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6534 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6535 */
6536 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6537 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6538 wm_set_mdio_slow_mode_hv(sc);
6539 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6540 MII_OFFSET_ANY, MIIF_DOPAUSE);
6541 }
6542
6543 /*
6544 * (For ICH8 variants)
6545 * If PHY detection failed, use BM's r/w function and retry.
6546 */
6547 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6548 /* if failed, retry with *_bm_* */
6549 mii->mii_readreg = wm_gmii_bm_readreg;
6550 mii->mii_writereg = wm_gmii_bm_writereg;
6551
6552 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6553 MII_OFFSET_ANY, MIIF_DOPAUSE);
6554 }
6555
6556 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6557 /* Any PHY wasn't find */
6558 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6559 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6560 sc->sc_phytype = WMPHY_NONE;
6561 } else {
6562 /*
6563 * PHY Found!
6564 * Check PHY type.
6565 */
6566 uint32_t model;
6567 struct mii_softc *child;
6568
6569 child = LIST_FIRST(&mii->mii_phys);
6570 if (device_is_a(child->mii_dev, "igphy")) {
6571 struct igphy_softc *isc = (struct igphy_softc *)child;
6572
6573 model = isc->sc_mii.mii_mpd_model;
6574 if (model == MII_MODEL_yyINTEL_I82566)
6575 sc->sc_phytype = WMPHY_IGP_3;
6576 }
6577
6578 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6579 }
6580 }
6581
6582 /*
6583 * wm_gmii_mediastatus: [ifmedia interface function]
6584 *
6585 * Get the current interface media status on a 1000BASE-T device.
6586 */
6587 static void
6588 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6589 {
6590 struct wm_softc *sc = ifp->if_softc;
6591
6592 ether_mediastatus(ifp, ifmr);
6593 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6594 | sc->sc_flowflags;
6595 }
6596
6597 /*
6598 * wm_gmii_mediachange: [ifmedia interface function]
6599 *
6600 * Set hardware to newly-selected media on a 1000BASE-T device.
6601 */
6602 static int
6603 wm_gmii_mediachange(struct ifnet *ifp)
6604 {
6605 struct wm_softc *sc = ifp->if_softc;
6606 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6607 int rc;
6608
6609 if ((ifp->if_flags & IFF_UP) == 0)
6610 return 0;
6611
6612 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6613 sc->sc_ctrl |= CTRL_SLU;
6614 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6615 || (sc->sc_type > WM_T_82543)) {
6616 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6617 } else {
6618 sc->sc_ctrl &= ~CTRL_ASDE;
6619 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6620 if (ife->ifm_media & IFM_FDX)
6621 sc->sc_ctrl |= CTRL_FD;
6622 switch (IFM_SUBTYPE(ife->ifm_media)) {
6623 case IFM_10_T:
6624 sc->sc_ctrl |= CTRL_SPEED_10;
6625 break;
6626 case IFM_100_TX:
6627 sc->sc_ctrl |= CTRL_SPEED_100;
6628 break;
6629 case IFM_1000_T:
6630 sc->sc_ctrl |= CTRL_SPEED_1000;
6631 break;
6632 default:
6633 panic("wm_gmii_mediachange: bad media 0x%x",
6634 ife->ifm_media);
6635 }
6636 }
6637 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6638 if (sc->sc_type <= WM_T_82543)
6639 wm_gmii_reset(sc);
6640
6641 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6642 return 0;
6643 return rc;
6644 }
6645
6646 #define MDI_IO CTRL_SWDPIN(2)
6647 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6648 #define MDI_CLK CTRL_SWDPIN(3)
6649
6650 static void
6651 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6652 {
6653 uint32_t i, v;
6654
6655 v = CSR_READ(sc, WMREG_CTRL);
6656 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6657 v |= MDI_DIR | CTRL_SWDPIO(3);
6658
6659 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6660 if (data & i)
6661 v |= MDI_IO;
6662 else
6663 v &= ~MDI_IO;
6664 CSR_WRITE(sc, WMREG_CTRL, v);
6665 delay(10);
6666 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6667 delay(10);
6668 CSR_WRITE(sc, WMREG_CTRL, v);
6669 delay(10);
6670 }
6671 }
6672
6673 static uint32_t
6674 i82543_mii_recvbits(struct wm_softc *sc)
6675 {
6676 uint32_t v, i, data = 0;
6677
6678 v = CSR_READ(sc, WMREG_CTRL);
6679 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6680 v |= CTRL_SWDPIO(3);
6681
6682 CSR_WRITE(sc, WMREG_CTRL, v);
6683 delay(10);
6684 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6685 delay(10);
6686 CSR_WRITE(sc, WMREG_CTRL, v);
6687 delay(10);
6688
6689 for (i = 0; i < 16; i++) {
6690 data <<= 1;
6691 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6692 delay(10);
6693 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6694 data |= 1;
6695 CSR_WRITE(sc, WMREG_CTRL, v);
6696 delay(10);
6697 }
6698
6699 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6700 delay(10);
6701 CSR_WRITE(sc, WMREG_CTRL, v);
6702 delay(10);
6703
6704 return data;
6705 }
6706
6707 #undef MDI_IO
6708 #undef MDI_DIR
6709 #undef MDI_CLK
6710
6711 /*
6712 * wm_gmii_i82543_readreg: [mii interface function]
6713 *
6714 * Read a PHY register on the GMII (i82543 version).
6715 */
6716 static int
6717 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6718 {
6719 struct wm_softc *sc = device_private(self);
6720 int rv;
6721
6722 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6723 i82543_mii_sendbits(sc, reg | (phy << 5) |
6724 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6725 rv = i82543_mii_recvbits(sc) & 0xffff;
6726
6727 DPRINTF(WM_DEBUG_GMII,
6728 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6729 device_xname(sc->sc_dev), phy, reg, rv));
6730
6731 return rv;
6732 }
6733
6734 /*
6735 * wm_gmii_i82543_writereg: [mii interface function]
6736 *
6737 * Write a PHY register on the GMII (i82543 version).
6738 */
6739 static void
6740 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6741 {
6742 struct wm_softc *sc = device_private(self);
6743
6744 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6745 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6746 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6747 (MII_COMMAND_START << 30), 32);
6748 }
6749
6750 /*
6751 * wm_gmii_i82544_readreg: [mii interface function]
6752 *
6753 * Read a PHY register on the GMII.
6754 */
6755 static int
6756 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6757 {
6758 struct wm_softc *sc = device_private(self);
6759 uint32_t mdic = 0;
6760 int i, rv;
6761
6762 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6763 MDIC_REGADD(reg));
6764
6765 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6766 mdic = CSR_READ(sc, WMREG_MDIC);
6767 if (mdic & MDIC_READY)
6768 break;
6769 delay(50);
6770 }
6771
6772 if ((mdic & MDIC_READY) == 0) {
6773 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6774 device_xname(sc->sc_dev), phy, reg);
6775 rv = 0;
6776 } else if (mdic & MDIC_E) {
6777 #if 0 /* This is normal if no PHY is present. */
6778 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6779 device_xname(sc->sc_dev), phy, reg);
6780 #endif
6781 rv = 0;
6782 } else {
6783 rv = MDIC_DATA(mdic);
6784 if (rv == 0xffff)
6785 rv = 0;
6786 }
6787
6788 return rv;
6789 }
6790
6791 /*
6792 * wm_gmii_i82544_writereg: [mii interface function]
6793 *
6794 * Write a PHY register on the GMII.
6795 */
6796 static void
6797 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6798 {
6799 struct wm_softc *sc = device_private(self);
6800 uint32_t mdic = 0;
6801 int i;
6802
6803 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6804 MDIC_REGADD(reg) | MDIC_DATA(val));
6805
6806 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6807 mdic = CSR_READ(sc, WMREG_MDIC);
6808 if (mdic & MDIC_READY)
6809 break;
6810 delay(50);
6811 }
6812
6813 if ((mdic & MDIC_READY) == 0)
6814 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6815 device_xname(sc->sc_dev), phy, reg);
6816 else if (mdic & MDIC_E)
6817 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6818 device_xname(sc->sc_dev), phy, reg);
6819 }
6820
6821 /*
6822 * wm_gmii_i80003_readreg: [mii interface function]
6823 *
6824 * Read a PHY register on the kumeran
6825 * This could be handled by the PHY layer if we didn't have to lock the
6826 * ressource ...
6827 */
6828 static int
6829 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6830 {
6831 struct wm_softc *sc = device_private(self);
6832 int sem;
6833 int rv;
6834
6835 if (phy != 1) /* only one PHY on kumeran bus */
6836 return 0;
6837
6838 sem = swfwphysem[sc->sc_funcid];
6839 if (wm_get_swfw_semaphore(sc, sem)) {
6840 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6841 __func__);
6842 return 0;
6843 }
6844
6845 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6846 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6847 reg >> GG82563_PAGE_SHIFT);
6848 } else {
6849 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6850 reg >> GG82563_PAGE_SHIFT);
6851 }
6852 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6853 delay(200);
6854 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6855 delay(200);
6856
6857 wm_put_swfw_semaphore(sc, sem);
6858 return rv;
6859 }
6860
6861 /*
6862 * wm_gmii_i80003_writereg: [mii interface function]
6863 *
6864 * Write a PHY register on the kumeran.
6865 * This could be handled by the PHY layer if we didn't have to lock the
6866 * ressource ...
6867 */
6868 static void
6869 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6870 {
6871 struct wm_softc *sc = device_private(self);
6872 int sem;
6873
6874 if (phy != 1) /* only one PHY on kumeran bus */
6875 return;
6876
6877 sem = swfwphysem[sc->sc_funcid];
6878 if (wm_get_swfw_semaphore(sc, sem)) {
6879 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6880 __func__);
6881 return;
6882 }
6883
6884 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6885 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6886 reg >> GG82563_PAGE_SHIFT);
6887 } else {
6888 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6889 reg >> GG82563_PAGE_SHIFT);
6890 }
6891 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6892 delay(200);
6893 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6894 delay(200);
6895
6896 wm_put_swfw_semaphore(sc, sem);
6897 }
6898
6899 /*
6900 * wm_gmii_bm_readreg: [mii interface function]
6901 *
6902 * Read a PHY register on the kumeran
6903 * This could be handled by the PHY layer if we didn't have to lock the
6904 * ressource ...
6905 */
6906 static int
6907 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6908 {
6909 struct wm_softc *sc = device_private(self);
6910 int sem;
6911 int rv;
6912
6913 sem = swfwphysem[sc->sc_funcid];
6914 if (wm_get_swfw_semaphore(sc, sem)) {
6915 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6916 __func__);
6917 return 0;
6918 }
6919
6920 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6921 if (phy == 1)
6922 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6923 reg);
6924 else
6925 wm_gmii_i82544_writereg(self, phy,
6926 GG82563_PHY_PAGE_SELECT,
6927 reg >> GG82563_PAGE_SHIFT);
6928 }
6929
6930 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6931 wm_put_swfw_semaphore(sc, sem);
6932 return rv;
6933 }
6934
6935 /*
6936 * wm_gmii_bm_writereg: [mii interface function]
6937 *
6938 * Write a PHY register on the kumeran.
6939 * This could be handled by the PHY layer if we didn't have to lock the
6940 * ressource ...
6941 */
6942 static void
6943 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6944 {
6945 struct wm_softc *sc = device_private(self);
6946 int sem;
6947
6948 sem = swfwphysem[sc->sc_funcid];
6949 if (wm_get_swfw_semaphore(sc, sem)) {
6950 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6951 __func__);
6952 return;
6953 }
6954
6955 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6956 if (phy == 1)
6957 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6958 reg);
6959 else
6960 wm_gmii_i82544_writereg(self, phy,
6961 GG82563_PHY_PAGE_SELECT,
6962 reg >> GG82563_PAGE_SHIFT);
6963 }
6964
6965 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6966 wm_put_swfw_semaphore(sc, sem);
6967 }
6968
6969 static void
6970 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6971 {
6972 struct wm_softc *sc = device_private(self);
6973 uint16_t regnum = BM_PHY_REG_NUM(offset);
6974 uint16_t wuce;
6975
6976 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6977 if (sc->sc_type == WM_T_PCH) {
6978 /* XXX e1000 driver do nothing... why? */
6979 }
6980
6981 /* Set page 769 */
6982 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6983 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6984
6985 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6986
6987 wuce &= ~BM_WUC_HOST_WU_BIT;
6988 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6989 wuce | BM_WUC_ENABLE_BIT);
6990
6991 /* Select page 800 */
6992 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6993 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6994
6995 /* Write page 800 */
6996 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6997
6998 if (rd)
6999 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7000 else
7001 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7002
7003 /* Set page 769 */
7004 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7005 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7006
7007 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7008 }
7009
7010 /*
7011 * wm_gmii_hv_readreg: [mii interface function]
7012 *
7013 * Read a PHY register on the kumeran
7014 * This could be handled by the PHY layer if we didn't have to lock the
7015 * ressource ...
7016 */
7017 static int
7018 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7019 {
7020 struct wm_softc *sc = device_private(self);
7021 uint16_t page = BM_PHY_REG_PAGE(reg);
7022 uint16_t regnum = BM_PHY_REG_NUM(reg);
7023 uint16_t val;
7024 int rv;
7025
7026 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
7027 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7028 __func__);
7029 return 0;
7030 }
7031
7032 /* XXX Workaround failure in MDIO access while cable is disconnected */
7033 if (sc->sc_phytype == WMPHY_82577) {
7034 /* XXX must write */
7035 }
7036
7037 /* Page 800 works differently than the rest so it has its own func */
7038 if (page == BM_WUC_PAGE) {
7039 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7040 return val;
7041 }
7042
7043 /*
7044 * Lower than page 768 works differently than the rest so it has its
7045 * own func
7046 */
7047 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7048 printf("gmii_hv_readreg!!!\n");
7049 return 0;
7050 }
7051
7052 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7053 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7054 page << BME1000_PAGE_SHIFT);
7055 }
7056
7057 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7058 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7059 return rv;
7060 }
7061
7062 /*
7063 * wm_gmii_hv_writereg: [mii interface function]
7064 *
7065 * Write a PHY register on the kumeran.
7066 * This could be handled by the PHY layer if we didn't have to lock the
7067 * ressource ...
7068 */
7069 static void
7070 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7071 {
7072 struct wm_softc *sc = device_private(self);
7073 uint16_t page = BM_PHY_REG_PAGE(reg);
7074 uint16_t regnum = BM_PHY_REG_NUM(reg);
7075
7076 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
7077 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7078 __func__);
7079 return;
7080 }
7081
7082 /* XXX Workaround failure in MDIO access while cable is disconnected */
7083
7084 /* Page 800 works differently than the rest so it has its own func */
7085 if (page == BM_WUC_PAGE) {
7086 uint16_t tmp;
7087
7088 tmp = val;
7089 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7090 return;
7091 }
7092
7093 /*
7094 * Lower than page 768 works differently than the rest so it has its
7095 * own func
7096 */
7097 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7098 printf("gmii_hv_writereg!!!\n");
7099 return;
7100 }
7101
7102 /*
7103 * XXX Workaround MDIO accesses being disabled after entering IEEE
7104 * Power Down (whenever bit 11 of the PHY control register is set)
7105 */
7106
7107 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7108 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7109 page << BME1000_PAGE_SHIFT);
7110 }
7111
7112 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7113 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7114 }
7115
7116 /*
7117 * wm_sgmii_readreg: [mii interface function]
7118 *
7119 * Read a PHY register on the SGMII
7120 * This could be handled by the PHY layer if we didn't have to lock the
7121 * ressource ...
7122 */
7123 static int
7124 wm_sgmii_readreg(device_t self, int phy, int reg)
7125 {
7126 struct wm_softc *sc = device_private(self);
7127 uint32_t i2ccmd;
7128 int i, rv;
7129
7130 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7131 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7132 __func__);
7133 return 0;
7134 }
7135
7136 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7137 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7138 | I2CCMD_OPCODE_READ;
7139 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7140
7141 /* Poll the ready bit */
7142 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7143 delay(50);
7144 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7145 if (i2ccmd & I2CCMD_READY)
7146 break;
7147 }
7148 if ((i2ccmd & I2CCMD_READY) == 0)
7149 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7150 if ((i2ccmd & I2CCMD_ERROR) != 0)
7151 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7152
7153 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7154
7155 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7156 return rv;
7157 }
7158
7159 /*
7160 * wm_sgmii_writereg: [mii interface function]
7161 *
7162 * Write a PHY register on the SGMII.
7163 * This could be handled by the PHY layer if we didn't have to lock the
7164 * ressource ...
7165 */
7166 static void
7167 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7168 {
7169 struct wm_softc *sc = device_private(self);
7170 uint32_t i2ccmd;
7171 int i;
7172
7173 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7174 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7175 __func__);
7176 return;
7177 }
7178
7179 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7180 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7181 | I2CCMD_OPCODE_WRITE;
7182 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7183
7184 /* Poll the ready bit */
7185 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7186 delay(50);
7187 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7188 if (i2ccmd & I2CCMD_READY)
7189 break;
7190 }
7191 if ((i2ccmd & I2CCMD_READY) == 0)
7192 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7193 if ((i2ccmd & I2CCMD_ERROR) != 0)
7194 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7195
7196 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7197 }
7198
7199 /*
7200 * wm_gmii_82580_readreg: [mii interface function]
7201 *
7202 * Read a PHY register on the 82580 and I350.
7203 * This could be handled by the PHY layer if we didn't have to lock the
7204 * ressource ...
7205 */
7206 static int
7207 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7208 {
7209 struct wm_softc *sc = device_private(self);
7210 int sem;
7211 int rv;
7212
7213 sem = swfwphysem[sc->sc_funcid];
7214 if (wm_get_swfw_semaphore(sc, sem)) {
7215 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7216 __func__);
7217 return 0;
7218 }
7219
7220 rv = wm_gmii_i82544_readreg(self, phy, reg);
7221
7222 wm_put_swfw_semaphore(sc, sem);
7223 return rv;
7224 }
7225
7226 /*
7227 * wm_gmii_82580_writereg: [mii interface function]
7228 *
7229 * Write a PHY register on the 82580 and I350.
7230 * This could be handled by the PHY layer if we didn't have to lock the
7231 * ressource ...
7232 */
7233 static void
7234 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7235 {
7236 struct wm_softc *sc = device_private(self);
7237 int sem;
7238
7239 sem = swfwphysem[sc->sc_funcid];
7240 if (wm_get_swfw_semaphore(sc, sem)) {
7241 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7242 __func__);
7243 return;
7244 }
7245
7246 wm_gmii_i82544_writereg(self, phy, reg, val);
7247
7248 wm_put_swfw_semaphore(sc, sem);
7249 }
7250
7251 /*
7252 * wm_gmii_statchg: [mii interface function]
7253 *
7254 * Callback from MII layer when media changes.
7255 */
7256 static void
7257 wm_gmii_statchg(struct ifnet *ifp)
7258 {
7259 struct wm_softc *sc = ifp->if_softc;
7260 struct mii_data *mii = &sc->sc_mii;
7261
7262 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7263 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7264 sc->sc_fcrtl &= ~FCRTL_XONE;
7265
7266 /*
7267 * Get flow control negotiation result.
7268 */
7269 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7270 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7271 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7272 mii->mii_media_active &= ~IFM_ETH_FMASK;
7273 }
7274
7275 if (sc->sc_flowflags & IFM_FLOW) {
7276 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7277 sc->sc_ctrl |= CTRL_TFCE;
7278 sc->sc_fcrtl |= FCRTL_XONE;
7279 }
7280 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7281 sc->sc_ctrl |= CTRL_RFCE;
7282 }
7283
7284 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7285 DPRINTF(WM_DEBUG_LINK,
7286 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7287 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7288 } else {
7289 DPRINTF(WM_DEBUG_LINK,
7290 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7291 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7292 }
7293
7294 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7295 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7296 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7297 : WMREG_FCRTL, sc->sc_fcrtl);
7298 if (sc->sc_type == WM_T_80003) {
7299 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7300 case IFM_1000_T:
7301 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7302 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7303 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7304 break;
7305 default:
7306 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7307 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7308 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7309 break;
7310 }
7311 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7312 }
7313 }
7314
7315 /*
7316 * wm_kmrn_readreg:
7317 *
7318 * Read a kumeran register
7319 */
7320 static int
7321 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7322 {
7323 int rv;
7324
7325 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7326 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7327 aprint_error_dev(sc->sc_dev,
7328 "%s: failed to get semaphore\n", __func__);
7329 return 0;
7330 }
7331 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7332 if (wm_get_swfwhw_semaphore(sc)) {
7333 aprint_error_dev(sc->sc_dev,
7334 "%s: failed to get semaphore\n", __func__);
7335 return 0;
7336 }
7337 }
7338
7339 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7340 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7341 KUMCTRLSTA_REN);
7342 delay(2);
7343
7344 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7345
7346 if (sc->sc_flags == WM_F_SWFW_SYNC)
7347 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7348 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7349 wm_put_swfwhw_semaphore(sc);
7350
7351 return rv;
7352 }
7353
7354 /*
7355 * wm_kmrn_writereg:
7356 *
7357 * Write a kumeran register
7358 */
7359 static void
7360 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7361 {
7362
7363 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7364 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7365 aprint_error_dev(sc->sc_dev,
7366 "%s: failed to get semaphore\n", __func__);
7367 return;
7368 }
7369 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7370 if (wm_get_swfwhw_semaphore(sc)) {
7371 aprint_error_dev(sc->sc_dev,
7372 "%s: failed to get semaphore\n", __func__);
7373 return;
7374 }
7375 }
7376
7377 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7378 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7379 (val & KUMCTRLSTA_MASK));
7380
7381 if (sc->sc_flags == WM_F_SWFW_SYNC)
7382 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7383 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7384 wm_put_swfwhw_semaphore(sc);
7385 }
7386
7387 static int
7388 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7389 {
7390 uint32_t eecd = 0;
7391
7392 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7393 || sc->sc_type == WM_T_82583) {
7394 eecd = CSR_READ(sc, WMREG_EECD);
7395
7396 /* Isolate bits 15 & 16 */
7397 eecd = ((eecd >> 15) & 0x03);
7398
7399 /* If both bits are set, device is Flash type */
7400 if (eecd == 0x03)
7401 return 0;
7402 }
7403 return 1;
7404 }
7405
7406 static int
7407 wm_get_swsm_semaphore(struct wm_softc *sc)
7408 {
7409 int32_t timeout;
7410 uint32_t swsm;
7411
7412 /* Get the FW semaphore. */
7413 timeout = 1000 + 1; /* XXX */
7414 while (timeout) {
7415 swsm = CSR_READ(sc, WMREG_SWSM);
7416 swsm |= SWSM_SWESMBI;
7417 CSR_WRITE(sc, WMREG_SWSM, swsm);
7418 /* if we managed to set the bit we got the semaphore. */
7419 swsm = CSR_READ(sc, WMREG_SWSM);
7420 if (swsm & SWSM_SWESMBI)
7421 break;
7422
7423 delay(50);
7424 timeout--;
7425 }
7426
7427 if (timeout == 0) {
7428 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7429 /* Release semaphores */
7430 wm_put_swsm_semaphore(sc);
7431 return 1;
7432 }
7433 return 0;
7434 }
7435
7436 static void
7437 wm_put_swsm_semaphore(struct wm_softc *sc)
7438 {
7439 uint32_t swsm;
7440
7441 swsm = CSR_READ(sc, WMREG_SWSM);
7442 swsm &= ~(SWSM_SWESMBI);
7443 CSR_WRITE(sc, WMREG_SWSM, swsm);
7444 }
7445
7446 static int
7447 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7448 {
7449 uint32_t swfw_sync;
7450 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7451 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7452 int timeout = 200;
7453
7454 for (timeout = 0; timeout < 200; timeout++) {
7455 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7456 if (wm_get_swsm_semaphore(sc)) {
7457 aprint_error_dev(sc->sc_dev,
7458 "%s: failed to get semaphore\n",
7459 __func__);
7460 return 1;
7461 }
7462 }
7463 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7464 if ((swfw_sync & (swmask | fwmask)) == 0) {
7465 swfw_sync |= swmask;
7466 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7467 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7468 wm_put_swsm_semaphore(sc);
7469 return 0;
7470 }
7471 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7472 wm_put_swsm_semaphore(sc);
7473 delay(5000);
7474 }
7475 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7476 device_xname(sc->sc_dev), mask, swfw_sync);
7477 return 1;
7478 }
7479
7480 static void
7481 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7482 {
7483 uint32_t swfw_sync;
7484
7485 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7486 while (wm_get_swsm_semaphore(sc) != 0)
7487 continue;
7488 }
7489 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7490 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7491 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7492 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7493 wm_put_swsm_semaphore(sc);
7494 }
7495
7496 static int
7497 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7498 {
7499 uint32_t ext_ctrl;
7500 int timeout = 200;
7501
7502 for (timeout = 0; timeout < 200; timeout++) {
7503 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7504 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7505 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7506
7507 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7508 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7509 return 0;
7510 delay(5000);
7511 }
7512 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7513 device_xname(sc->sc_dev), ext_ctrl);
7514 return 1;
7515 }
7516
7517 static void
7518 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7519 {
7520 uint32_t ext_ctrl;
7521 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7522 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7523 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7524 }
7525
7526 static int
7527 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7528 {
7529 uint32_t eecd;
7530 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7531 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7532 uint8_t sig_byte = 0;
7533
7534 switch (sc->sc_type) {
7535 case WM_T_ICH8:
7536 case WM_T_ICH9:
7537 eecd = CSR_READ(sc, WMREG_EECD);
7538 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7539 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7540 return 0;
7541 }
7542 /* FALLTHROUGH */
7543 default:
7544 /* Default to 0 */
7545 *bank = 0;
7546
7547 /* Check bank 0 */
7548 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7549 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7550 *bank = 0;
7551 return 0;
7552 }
7553
7554 /* Check bank 1 */
7555 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7556 &sig_byte);
7557 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7558 *bank = 1;
7559 return 0;
7560 }
7561 }
7562
7563 aprint_error_dev(sc->sc_dev, "EEPROM not present\n");
7564 return -1;
7565 }
7566
7567 /******************************************************************************
7568 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7569 * register.
7570 *
7571 * sc - Struct containing variables accessed by shared code
7572 * offset - offset of word in the EEPROM to read
7573 * data - word read from the EEPROM
7574 * words - number of words to read
7575 *****************************************************************************/
7576 static int
7577 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7578 {
7579 int32_t error = 0;
7580 uint32_t flash_bank = 0;
7581 uint32_t act_offset = 0;
7582 uint32_t bank_offset = 0;
7583 uint16_t word = 0;
7584 uint16_t i = 0;
7585
7586 /* We need to know which is the valid flash bank. In the event
7587 * that we didn't allocate eeprom_shadow_ram, we may not be
7588 * managing flash_bank. So it cannot be trusted and needs
7589 * to be updated with each read.
7590 */
7591 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7592 if (error) {
7593 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7594 __func__);
7595 return error;
7596 }
7597
7598 /*
7599 * Adjust offset appropriately if we're on bank 1 - adjust for word
7600 * size
7601 */
7602 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7603
7604 error = wm_get_swfwhw_semaphore(sc);
7605 if (error) {
7606 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7607 __func__);
7608 return error;
7609 }
7610
7611 for (i = 0; i < words; i++) {
7612 /* The NVM part needs a byte offset, hence * 2 */
7613 act_offset = bank_offset + ((offset + i) * 2);
7614 error = wm_read_ich8_word(sc, act_offset, &word);
7615 if (error) {
7616 aprint_error_dev(sc->sc_dev,
7617 "%s: failed to read NVM\n", __func__);
7618 break;
7619 }
7620 data[i] = word;
7621 }
7622
7623 wm_put_swfwhw_semaphore(sc);
7624 return error;
7625 }
7626
7627 /******************************************************************************
7628 * This function does initial flash setup so that a new read/write/erase cycle
7629 * can be started.
7630 *
7631 * sc - The pointer to the hw structure
7632 ****************************************************************************/
7633 static int32_t
7634 wm_ich8_cycle_init(struct wm_softc *sc)
7635 {
7636 uint16_t hsfsts;
7637 int32_t error = 1;
7638 int32_t i = 0;
7639
7640 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7641
7642 /* May be check the Flash Des Valid bit in Hw status */
7643 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7644 return error;
7645 }
7646
7647 /* Clear FCERR in Hw status by writing 1 */
7648 /* Clear DAEL in Hw status by writing a 1 */
7649 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7650
7651 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7652
7653 /*
7654 * Either we should have a hardware SPI cycle in progress bit to check
7655 * against, in order to start a new cycle or FDONE bit should be
7656 * changed in the hardware so that it is 1 after harware reset, which
7657 * can then be used as an indication whether a cycle is in progress or
7658 * has been completed .. we should also have some software semaphore
7659 * mechanism to guard FDONE or the cycle in progress bit so that two
7660 * threads access to those bits can be sequentiallized or a way so that
7661 * 2 threads dont start the cycle at the same time
7662 */
7663
7664 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7665 /*
7666 * There is no cycle running at present, so we can start a
7667 * cycle
7668 */
7669
7670 /* Begin by setting Flash Cycle Done. */
7671 hsfsts |= HSFSTS_DONE;
7672 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7673 error = 0;
7674 } else {
7675 /*
7676 * otherwise poll for sometime so the current cycle has a
7677 * chance to end before giving up.
7678 */
7679 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7680 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7681 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7682 error = 0;
7683 break;
7684 }
7685 delay(1);
7686 }
7687 if (error == 0) {
7688 /*
7689 * Successful in waiting for previous cycle to timeout,
7690 * now set the Flash Cycle Done.
7691 */
7692 hsfsts |= HSFSTS_DONE;
7693 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7694 }
7695 }
7696 return error;
7697 }
7698
7699 /******************************************************************************
7700 * This function starts a flash cycle and waits for its completion
7701 *
7702 * sc - The pointer to the hw structure
7703 ****************************************************************************/
7704 static int32_t
7705 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7706 {
7707 uint16_t hsflctl;
7708 uint16_t hsfsts;
7709 int32_t error = 1;
7710 uint32_t i = 0;
7711
7712 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7713 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7714 hsflctl |= HSFCTL_GO;
7715 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7716
7717 /* wait till FDONE bit is set to 1 */
7718 do {
7719 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7720 if (hsfsts & HSFSTS_DONE)
7721 break;
7722 delay(1);
7723 i++;
7724 } while (i < timeout);
7725 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7726 error = 0;
7727
7728 return error;
7729 }
7730
7731 /******************************************************************************
7732 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7733 *
7734 * sc - The pointer to the hw structure
7735 * index - The index of the byte or word to read.
7736 * size - Size of data to read, 1=byte 2=word
7737 * data - Pointer to the word to store the value read.
7738 *****************************************************************************/
7739 static int32_t
7740 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7741 uint32_t size, uint16_t* data)
7742 {
7743 uint16_t hsfsts;
7744 uint16_t hsflctl;
7745 uint32_t flash_linear_address;
7746 uint32_t flash_data = 0;
7747 int32_t error = 1;
7748 int32_t count = 0;
7749
7750 if (size < 1 || size > 2 || data == 0x0 ||
7751 index > ICH_FLASH_LINEAR_ADDR_MASK)
7752 return error;
7753
7754 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7755 sc->sc_ich8_flash_base;
7756
7757 do {
7758 delay(1);
7759 /* Steps */
7760 error = wm_ich8_cycle_init(sc);
7761 if (error)
7762 break;
7763
7764 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7765 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7766 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7767 & HSFCTL_BCOUNT_MASK;
7768 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7769 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7770
7771 /*
7772 * Write the last 24 bits of index into Flash Linear address
7773 * field in Flash Address
7774 */
7775 /* TODO: TBD maybe check the index against the size of flash */
7776
7777 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7778
7779 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7780
7781 /*
7782 * Check if FCERR is set to 1, if set to 1, clear it and try
7783 * the whole sequence a few more times, else read in (shift in)
7784 * the Flash Data0, the order is least significant byte first
7785 * msb to lsb
7786 */
7787 if (error == 0) {
7788 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7789 if (size == 1)
7790 *data = (uint8_t)(flash_data & 0x000000FF);
7791 else if (size == 2)
7792 *data = (uint16_t)(flash_data & 0x0000FFFF);
7793 break;
7794 } else {
7795 /*
7796 * If we've gotten here, then things are probably
7797 * completely hosed, but if the error condition is
7798 * detected, it won't hurt to give it another try...
7799 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7800 */
7801 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7802 if (hsfsts & HSFSTS_ERR) {
7803 /* Repeat for some time before giving up. */
7804 continue;
7805 } else if ((hsfsts & HSFSTS_DONE) == 0)
7806 break;
7807 }
7808 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7809
7810 return error;
7811 }
7812
7813 /******************************************************************************
7814 * Reads a single byte from the NVM using the ICH8 flash access registers.
7815 *
7816 * sc - pointer to wm_hw structure
7817 * index - The index of the byte to read.
7818 * data - Pointer to a byte to store the value read.
7819 *****************************************************************************/
7820 static int32_t
7821 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7822 {
7823 int32_t status;
7824 uint16_t word = 0;
7825
7826 status = wm_read_ich8_data(sc, index, 1, &word);
7827 if (status == 0)
7828 *data = (uint8_t)word;
7829 else
7830 *data = 0;
7831
7832 return status;
7833 }
7834
7835 /******************************************************************************
7836 * Reads a word from the NVM using the ICH8 flash access registers.
7837 *
7838 * sc - pointer to wm_hw structure
7839 * index - The starting byte index of the word to read.
7840 * data - Pointer to a word to store the value read.
7841 *****************************************************************************/
7842 static int32_t
7843 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7844 {
7845 int32_t status;
7846
7847 status = wm_read_ich8_data(sc, index, 2, data);
7848 return status;
7849 }
7850
7851 static int
7852 wm_check_mng_mode(struct wm_softc *sc)
7853 {
7854 int rv;
7855
7856 switch (sc->sc_type) {
7857 case WM_T_ICH8:
7858 case WM_T_ICH9:
7859 case WM_T_ICH10:
7860 case WM_T_PCH:
7861 case WM_T_PCH2:
7862 case WM_T_PCH_LPT:
7863 rv = wm_check_mng_mode_ich8lan(sc);
7864 break;
7865 case WM_T_82574:
7866 case WM_T_82583:
7867 rv = wm_check_mng_mode_82574(sc);
7868 break;
7869 case WM_T_82571:
7870 case WM_T_82572:
7871 case WM_T_82573:
7872 case WM_T_80003:
7873 rv = wm_check_mng_mode_generic(sc);
7874 break;
7875 default:
7876 /* noting to do */
7877 rv = 0;
7878 break;
7879 }
7880
7881 return rv;
7882 }
7883
7884 static int
7885 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7886 {
7887 uint32_t fwsm;
7888
7889 fwsm = CSR_READ(sc, WMREG_FWSM);
7890
7891 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7892 return 1;
7893
7894 return 0;
7895 }
7896
7897 static int
7898 wm_check_mng_mode_82574(struct wm_softc *sc)
7899 {
7900 uint16_t data;
7901
7902 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7903
7904 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7905 return 1;
7906
7907 return 0;
7908 }
7909
7910 static int
7911 wm_check_mng_mode_generic(struct wm_softc *sc)
7912 {
7913 uint32_t fwsm;
7914
7915 fwsm = CSR_READ(sc, WMREG_FWSM);
7916
7917 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7918 return 1;
7919
7920 return 0;
7921 }
7922
7923 static int
7924 wm_enable_mng_pass_thru(struct wm_softc *sc)
7925 {
7926 uint32_t manc, fwsm, factps;
7927
7928 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7929 return 0;
7930
7931 manc = CSR_READ(sc, WMREG_MANC);
7932
7933 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7934 device_xname(sc->sc_dev), manc));
7935 if (((manc & MANC_RECV_TCO_EN) == 0)
7936 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7937 return 0;
7938
7939 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7940 fwsm = CSR_READ(sc, WMREG_FWSM);
7941 factps = CSR_READ(sc, WMREG_FACTPS);
7942 if (((factps & FACTPS_MNGCG) == 0)
7943 && ((fwsm & FWSM_MODE_MASK)
7944 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7945 return 1;
7946 } else if (((manc & MANC_SMBUS_EN) != 0)
7947 && ((manc & MANC_ASF_EN) == 0))
7948 return 1;
7949
7950 return 0;
7951 }
7952
7953 static int
7954 wm_check_reset_block(struct wm_softc *sc)
7955 {
7956 uint32_t reg;
7957
7958 switch (sc->sc_type) {
7959 case WM_T_ICH8:
7960 case WM_T_ICH9:
7961 case WM_T_ICH10:
7962 case WM_T_PCH:
7963 case WM_T_PCH2:
7964 case WM_T_PCH_LPT:
7965 reg = CSR_READ(sc, WMREG_FWSM);
7966 if ((reg & FWSM_RSPCIPHY) != 0)
7967 return 0;
7968 else
7969 return -1;
7970 break;
7971 case WM_T_82571:
7972 case WM_T_82572:
7973 case WM_T_82573:
7974 case WM_T_82574:
7975 case WM_T_82583:
7976 case WM_T_80003:
7977 reg = CSR_READ(sc, WMREG_MANC);
7978 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7979 return -1;
7980 else
7981 return 0;
7982 break;
7983 default:
7984 /* no problem */
7985 break;
7986 }
7987
7988 return 0;
7989 }
7990
7991 static void
7992 wm_get_hw_control(struct wm_softc *sc)
7993 {
7994 uint32_t reg;
7995
7996 switch (sc->sc_type) {
7997 case WM_T_82573:
7998 reg = CSR_READ(sc, WMREG_SWSM);
7999 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8000 break;
8001 case WM_T_82571:
8002 case WM_T_82572:
8003 case WM_T_82574:
8004 case WM_T_82583:
8005 case WM_T_80003:
8006 case WM_T_ICH8:
8007 case WM_T_ICH9:
8008 case WM_T_ICH10:
8009 case WM_T_PCH:
8010 case WM_T_PCH2:
8011 case WM_T_PCH_LPT:
8012 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8013 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8014 break;
8015 default:
8016 break;
8017 }
8018 }
8019
8020 static void
8021 wm_release_hw_control(struct wm_softc *sc)
8022 {
8023 uint32_t reg;
8024
8025 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8026 return;
8027
8028 if (sc->sc_type == WM_T_82573) {
8029 reg = CSR_READ(sc, WMREG_SWSM);
8030 reg &= ~SWSM_DRV_LOAD;
8031 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8032 } else {
8033 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8034 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8035 }
8036 }
8037
8038 /* XXX Currently TBI only */
8039 static int
8040 wm_check_for_link(struct wm_softc *sc)
8041 {
8042 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8043 uint32_t rxcw;
8044 uint32_t ctrl;
8045 uint32_t status;
8046 uint32_t sig;
8047
8048 rxcw = CSR_READ(sc, WMREG_RXCW);
8049 ctrl = CSR_READ(sc, WMREG_CTRL);
8050 status = CSR_READ(sc, WMREG_STATUS);
8051
8052 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8053
8054 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8055 device_xname(sc->sc_dev), __func__,
8056 ((ctrl & CTRL_SWDPIN(1)) == sig),
8057 ((status & STATUS_LU) != 0),
8058 ((rxcw & RXCW_C) != 0)
8059 ));
8060
8061 /*
8062 * SWDPIN LU RXCW
8063 * 0 0 0
8064 * 0 0 1 (should not happen)
8065 * 0 1 0 (should not happen)
8066 * 0 1 1 (should not happen)
8067 * 1 0 0 Disable autonego and force linkup
8068 * 1 0 1 got /C/ but not linkup yet
8069 * 1 1 0 (linkup)
8070 * 1 1 1 If IFM_AUTO, back to autonego
8071 *
8072 */
8073 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8074 && ((status & STATUS_LU) == 0)
8075 && ((rxcw & RXCW_C) == 0)) {
8076 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8077 __func__));
8078 sc->sc_tbi_linkup = 0;
8079 /* Disable auto-negotiation in the TXCW register */
8080 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8081
8082 /*
8083 * Force link-up and also force full-duplex.
8084 *
8085 * NOTE: CTRL was updated TFCE and RFCE automatically,
8086 * so we should update sc->sc_ctrl
8087 */
8088 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8089 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8090 } else if (((status & STATUS_LU) != 0)
8091 && ((rxcw & RXCW_C) != 0)
8092 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8093 sc->sc_tbi_linkup = 1;
8094 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8095 __func__));
8096 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8097 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8098 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8099 && ((rxcw & RXCW_C) != 0)) {
8100 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8101 } else {
8102 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8103 status));
8104 }
8105
8106 return 0;
8107 }
8108
8109 /* Work-around for 82566 Kumeran PCS lock loss */
8110 static void
8111 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8112 {
8113 int miistatus, active, i;
8114 int reg;
8115
8116 miistatus = sc->sc_mii.mii_media_status;
8117
8118 /* If the link is not up, do nothing */
8119 if ((miistatus & IFM_ACTIVE) != 0)
8120 return;
8121
8122 active = sc->sc_mii.mii_media_active;
8123
8124 /* Nothing to do if the link is other than 1Gbps */
8125 if (IFM_SUBTYPE(active) != IFM_1000_T)
8126 return;
8127
8128 for (i = 0; i < 10; i++) {
8129 /* read twice */
8130 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8131 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8132 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8133 goto out; /* GOOD! */
8134
8135 /* Reset the PHY */
8136 wm_gmii_reset(sc);
8137 delay(5*1000);
8138 }
8139
8140 /* Disable GigE link negotiation */
8141 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8142 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8143 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8144
8145 /*
8146 * Call gig speed drop workaround on Gig disable before accessing
8147 * any PHY registers.
8148 */
8149 wm_gig_downshift_workaround_ich8lan(sc);
8150
8151 out:
8152 return;
8153 }
8154
8155 /* WOL from S5 stops working */
8156 static void
8157 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8158 {
8159 uint16_t kmrn_reg;
8160
8161 /* Only for igp3 */
8162 if (sc->sc_phytype == WMPHY_IGP_3) {
8163 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8164 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8165 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8166 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8167 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8168 }
8169 }
8170
8171 #ifdef WM_WOL
8172 /* Power down workaround on D3 */
8173 static void
8174 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8175 {
8176 uint32_t reg;
8177 int i;
8178
8179 for (i = 0; i < 2; i++) {
8180 /* Disable link */
8181 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8182 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8183 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8184
8185 /*
8186 * Call gig speed drop workaround on Gig disable before
8187 * accessing any PHY registers
8188 */
8189 if (sc->sc_type == WM_T_ICH8)
8190 wm_gig_downshift_workaround_ich8lan(sc);
8191
8192 /* Write VR power-down enable */
8193 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8194 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8195 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8196 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8197
8198 /* Read it back and test */
8199 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8200 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8201 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8202 break;
8203
8204 /* Issue PHY reset and repeat at most one more time */
8205 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8206 }
8207 }
8208 #endif /* WM_WOL */
8209
8210 /*
8211 * Workaround for pch's PHYs
8212 * XXX should be moved to new PHY driver?
8213 */
8214 static void
8215 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8216 {
8217 if (sc->sc_phytype == WMPHY_82577)
8218 wm_set_mdio_slow_mode_hv(sc);
8219
8220 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8221
8222 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8223
8224 /* 82578 */
8225 if (sc->sc_phytype == WMPHY_82578) {
8226 /* PCH rev. < 3 */
8227 if (sc->sc_rev < 3) {
8228 /* XXX 6 bit shift? Why? Is it page2? */
8229 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8230 0x66c0);
8231 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8232 0xffff);
8233 }
8234
8235 /* XXX phy rev. < 2 */
8236 }
8237
8238 /* Select page 0 */
8239
8240 /* XXX acquire semaphore */
8241 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8242 /* XXX release semaphore */
8243
8244 /*
8245 * Configure the K1 Si workaround during phy reset assuming there is
8246 * link so that it disables K1 if link is in 1Gbps.
8247 */
8248 wm_k1_gig_workaround_hv(sc, 1);
8249 }
8250
8251 static void
8252 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8253 {
8254
8255 wm_set_mdio_slow_mode_hv(sc);
8256 }
8257
8258 static void
8259 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8260 {
8261 int k1_enable = sc->sc_nvm_k1_enabled;
8262
8263 /* XXX acquire semaphore */
8264
8265 if (link) {
8266 k1_enable = 0;
8267
8268 /* Link stall fix for link up */
8269 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8270 } else {
8271 /* Link stall fix for link down */
8272 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8273 }
8274
8275 wm_configure_k1_ich8lan(sc, k1_enable);
8276
8277 /* XXX release semaphore */
8278 }
8279
8280 static void
8281 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8282 {
8283 uint32_t reg;
8284
8285 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8286 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8287 reg | HV_KMRN_MDIO_SLOW);
8288 }
8289
8290 static void
8291 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8292 {
8293 uint32_t ctrl, ctrl_ext, tmp;
8294 uint16_t kmrn_reg;
8295
8296 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8297
8298 if (k1_enable)
8299 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8300 else
8301 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8302
8303 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8304
8305 delay(20);
8306
8307 ctrl = CSR_READ(sc, WMREG_CTRL);
8308 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8309
8310 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8311 tmp |= CTRL_FRCSPD;
8312
8313 CSR_WRITE(sc, WMREG_CTRL, tmp);
8314 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8315 delay(20);
8316
8317 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8318 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8319 delay(20);
8320 }
8321
8322 static void
8323 wm_smbustopci(struct wm_softc *sc)
8324 {
8325 uint32_t fwsm;
8326
8327 fwsm = CSR_READ(sc, WMREG_FWSM);
8328 if (((fwsm & FWSM_FW_VALID) == 0)
8329 && ((wm_check_reset_block(sc) == 0))) {
8330 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8331 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8332 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8333 delay(10);
8334 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8335 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8336 delay(50*1000);
8337
8338 /*
8339 * Gate automatic PHY configuration by hardware on non-managed
8340 * 82579
8341 */
8342 if (sc->sc_type == WM_T_PCH2)
8343 wm_gate_hw_phy_config_ich8lan(sc, 1);
8344 }
8345 }
8346
8347 static void
8348 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8349 {
8350 uint32_t gcr;
8351 pcireg_t ctrl2;
8352
8353 gcr = CSR_READ(sc, WMREG_GCR);
8354
8355 /* Only take action if timeout value is defaulted to 0 */
8356 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8357 goto out;
8358
8359 if ((gcr & GCR_CAP_VER2) == 0) {
8360 gcr |= GCR_CMPL_TMOUT_10MS;
8361 goto out;
8362 }
8363
8364 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8365 sc->sc_pcixe_capoff + PCIE_DCSR2);
8366 ctrl2 |= WM_PCIE_DCSR2_16MS;
8367 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8368 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8369
8370 out:
8371 /* Disable completion timeout resend */
8372 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8373
8374 CSR_WRITE(sc, WMREG_GCR, gcr);
8375 }
8376
8377 /* special case - for 82575 - need to do manual init ... */
8378 static void
8379 wm_reset_init_script_82575(struct wm_softc *sc)
8380 {
8381 /*
8382 * remark: this is untested code - we have no board without EEPROM
8383 * same setup as mentioned int the freeBSD driver for the i82575
8384 */
8385
8386 /* SerDes configuration via SERDESCTRL */
8387 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8388 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8389 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8390 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8391
8392 /* CCM configuration via CCMCTL register */
8393 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8394 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8395
8396 /* PCIe lanes configuration */
8397 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8398 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8399 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8400 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8401
8402 /* PCIe PLL Configuration */
8403 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8404 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8405 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8406 }
8407
8408 static void
8409 wm_init_manageability(struct wm_softc *sc)
8410 {
8411
8412 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8413 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8414 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8415
8416 /* disabl hardware interception of ARP */
8417 manc &= ~MANC_ARP_EN;
8418
8419 /* enable receiving management packets to the host */
8420 if (sc->sc_type >= WM_T_82571) {
8421 manc |= MANC_EN_MNG2HOST;
8422 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8423 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8424
8425 }
8426
8427 CSR_WRITE(sc, WMREG_MANC, manc);
8428 }
8429 }
8430
8431 static void
8432 wm_release_manageability(struct wm_softc *sc)
8433 {
8434
8435 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8436 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8437
8438 if (sc->sc_type >= WM_T_82571)
8439 manc &= ~MANC_EN_MNG2HOST;
8440
8441 CSR_WRITE(sc, WMREG_MANC, manc);
8442 }
8443 }
8444
8445 static void
8446 wm_get_wakeup(struct wm_softc *sc)
8447 {
8448
8449 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8450 switch (sc->sc_type) {
8451 case WM_T_82573:
8452 case WM_T_82583:
8453 sc->sc_flags |= WM_F_HAS_AMT;
8454 /* FALLTHROUGH */
8455 case WM_T_80003:
8456 case WM_T_82541:
8457 case WM_T_82547:
8458 case WM_T_82571:
8459 case WM_T_82572:
8460 case WM_T_82574:
8461 case WM_T_82575:
8462 case WM_T_82576:
8463 #if 0 /* XXX */
8464 case WM_T_82580:
8465 case WM_T_82580ER:
8466 case WM_T_I350:
8467 #endif
8468 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8469 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8470 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8471 break;
8472 case WM_T_ICH8:
8473 case WM_T_ICH9:
8474 case WM_T_ICH10:
8475 case WM_T_PCH:
8476 case WM_T_PCH2:
8477 case WM_T_PCH_LPT:
8478 sc->sc_flags |= WM_F_HAS_AMT;
8479 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8480 break;
8481 default:
8482 break;
8483 }
8484
8485 /* 1: HAS_MANAGE */
8486 if (wm_enable_mng_pass_thru(sc) != 0)
8487 sc->sc_flags |= WM_F_HAS_MANAGE;
8488
8489 #ifdef WM_DEBUG
8490 printf("\n");
8491 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8492 printf("HAS_AMT,");
8493 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8494 printf("ARC_SUBSYS_VALID,");
8495 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8496 printf("ASF_FIRMWARE_PRES,");
8497 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8498 printf("HAS_MANAGE,");
8499 printf("\n");
8500 #endif
8501 /*
8502 * Note that the WOL flags is set after the resetting of the eeprom
8503 * stuff
8504 */
8505 }
8506
8507 #ifdef WM_WOL
8508 /* WOL in the newer chipset interfaces (pchlan) */
8509 static void
8510 wm_enable_phy_wakeup(struct wm_softc *sc)
8511 {
8512 #if 0
8513 uint16_t preg;
8514
8515 /* Copy MAC RARs to PHY RARs */
8516
8517 /* Copy MAC MTA to PHY MTA */
8518
8519 /* Configure PHY Rx Control register */
8520
8521 /* Enable PHY wakeup in MAC register */
8522
8523 /* Configure and enable PHY wakeup in PHY registers */
8524
8525 /* Activate PHY wakeup */
8526
8527 /* XXX */
8528 #endif
8529 }
8530
8531 static void
8532 wm_enable_wakeup(struct wm_softc *sc)
8533 {
8534 uint32_t reg, pmreg;
8535 pcireg_t pmode;
8536
8537 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8538 &pmreg, NULL) == 0)
8539 return;
8540
8541 /* Advertise the wakeup capability */
8542 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8543 | CTRL_SWDPIN(3));
8544 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8545
8546 /* ICH workaround */
8547 switch (sc->sc_type) {
8548 case WM_T_ICH8:
8549 case WM_T_ICH9:
8550 case WM_T_ICH10:
8551 case WM_T_PCH:
8552 case WM_T_PCH2:
8553 case WM_T_PCH_LPT:
8554 /* Disable gig during WOL */
8555 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8556 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8557 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8558 if (sc->sc_type == WM_T_PCH)
8559 wm_gmii_reset(sc);
8560
8561 /* Power down workaround */
8562 if (sc->sc_phytype == WMPHY_82577) {
8563 struct mii_softc *child;
8564
8565 /* Assume that the PHY is copper */
8566 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8567 if (child->mii_mpd_rev <= 2)
8568 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8569 (768 << 5) | 25, 0x0444); /* magic num */
8570 }
8571 break;
8572 default:
8573 break;
8574 }
8575
8576 /* Keep the laser running on fiber adapters */
8577 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8578 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8579 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8580 reg |= CTRL_EXT_SWDPIN(3);
8581 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8582 }
8583
8584 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8585 #if 0 /* for the multicast packet */
8586 reg |= WUFC_MC;
8587 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8588 #endif
8589
8590 if (sc->sc_type == WM_T_PCH) {
8591 wm_enable_phy_wakeup(sc);
8592 } else {
8593 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8594 CSR_WRITE(sc, WMREG_WUFC, reg);
8595 }
8596
8597 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8598 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8599 || (sc->sc_type == WM_T_PCH2))
8600 && (sc->sc_phytype == WMPHY_IGP_3))
8601 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8602
8603 /* Request PME */
8604 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8605 #if 0
8606 /* Disable WOL */
8607 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8608 #else
8609 /* For WOL */
8610 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8611 #endif
8612 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8613 }
8614 #endif /* WM_WOL */
8615
8616 static bool
8617 wm_suspend(device_t self, const pmf_qual_t *qual)
8618 {
8619 struct wm_softc *sc = device_private(self);
8620
8621 wm_release_manageability(sc);
8622 wm_release_hw_control(sc);
8623 #ifdef WM_WOL
8624 wm_enable_wakeup(sc);
8625 #endif
8626
8627 return true;
8628 }
8629
8630 static bool
8631 wm_resume(device_t self, const pmf_qual_t *qual)
8632 {
8633 struct wm_softc *sc = device_private(self);
8634
8635 wm_init_manageability(sc);
8636
8637 return true;
8638 }
8639
8640 static void
8641 wm_set_eee_i350(struct wm_softc * sc)
8642 {
8643 uint32_t ipcnfg, eeer;
8644
8645 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8646 eeer = CSR_READ(sc, WMREG_EEER);
8647
8648 if ((sc->sc_flags & WM_F_EEE) != 0) {
8649 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8650 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8651 | EEER_LPI_FC);
8652 } else {
8653 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8654 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8655 | EEER_LPI_FC);
8656 }
8657
8658 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8659 CSR_WRITE(sc, WMREG_EEER, eeer);
8660 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8661 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8662 }
8663