if_wm.c revision 1.250 1 /* $NetBSD: if_wm.c,v 1.250 2013/06/02 17:23:33 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.250 2013/06/02 17:23:33 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 /*
145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155 #define WM_NTXSEGS 256
156 #define WM_IFQUEUELEN 256
157 #define WM_TXQUEUELEN_MAX 64
158 #define WM_TXQUEUELEN_MAX_82547 16
159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162 #define WM_NTXDESC_82542 256
163 #define WM_NTXDESC_82544 4096
164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171
172 /*
173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178 #define WM_NRXDESC 256
179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182
183 /*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188 struct wm_control_data_82544 {
189 /*
190 * The receive descriptors.
191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194 /*
195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them.
197 */
198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u;
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257
258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */
260
261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak;
275
276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */
278
279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled;
283
284 /*
285 * Software state for the transmit and receive descriptors.
286 */
287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291 /*
292 * Control data structures.
293 */
294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 krndsource_t rnd_source; /* random source */
383 };
384
385 #define WM_RXCHAIN_RESET(sc) \
386 do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \
390 } while (/*CONSTCOND*/0)
391
392 #define WM_RXCHAIN_LINK(sc, m) \
393 do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \
396 } while (/*CONSTCOND*/0)
397
398 #ifdef WM_EVENT_COUNTERS
399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401 #else
402 #define WM_EVCNT_INCR(ev) /* nothing */
403 #define WM_EVCNT_ADD(ev, val) /* nothing */
404 #endif
405
406 #define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS)
412
413 #define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436 #define WM_CDTXSYNC(sc, x, n, ops) \
437 do { \
438 int __x, __n; \
439 \
440 __x = (x); \
441 __n = (n); \
442 \
443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \
450 } \
451 \
452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455 } while (/*CONSTCOND*/0)
456
457 #define WM_CDRXSYNC(sc, x, ops) \
458 do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_INIT_RXDESC(sc, x) \
464 do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \
469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \
473 * \
474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \
482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495 } while (/*CONSTCOND*/0)
496
497 static void wm_start(struct ifnet *);
498 static void wm_nq_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ifflags_cb(struct ethercom *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_check_alt_mac_addr(struct wm_softc *);
514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void wm_tick(void *);
516
517 static void wm_set_filter(struct wm_softc *);
518 static void wm_set_vlan(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_gmii_82580_readreg(device_t, int, int);
545 static void wm_gmii_82580_writereg(device_t, int, int, int);
546 static int wm_sgmii_readreg(device_t, int, int);
547 static void wm_sgmii_writereg(device_t, int, int, int);
548
549 static void wm_gmii_statchg(struct ifnet *);
550
551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int wm_gmii_mediachange(struct ifnet *);
553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554
555 static int wm_kmrn_readreg(struct wm_softc *, int);
556 static void wm_kmrn_writereg(struct wm_softc *, int, int);
557
558 static void wm_set_spiaddrbits(struct wm_softc *);
559 static int wm_match(device_t, cfdata_t, void *);
560 static void wm_attach(device_t, device_t, void *);
561 static int wm_detach(device_t, int);
562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void wm_get_auto_rd_done(struct wm_softc *);
564 static void wm_lan_init_done(struct wm_softc *);
565 static void wm_get_cfg_done(struct wm_softc *);
566 static int wm_get_swsm_semaphore(struct wm_softc *);
567 static void wm_put_swsm_semaphore(struct wm_softc *);
568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void wm_put_swfwhw_semaphore(struct wm_softc *);
573
574 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
578 uint32_t, uint16_t *);
579 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void wm_82547_txfifo_stall(void *);
582 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int wm_check_mng_mode(struct wm_softc *);
584 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int wm_check_mng_mode_82574(struct wm_softc *);
586 static int wm_check_mng_mode_generic(struct wm_softc *);
587 static int wm_enable_mng_pass_thru(struct wm_softc *);
588 static int wm_check_reset_block(struct wm_softc *);
589 static void wm_get_hw_control(struct wm_softc *);
590 static int wm_check_for_link(struct wm_softc *);
591 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void wm_smbustopci(struct wm_softc *);
602 static void wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void wm_reset_init_script_82575(struct wm_softc *);
604 static void wm_release_manageability(struct wm_softc *);
605 static void wm_release_hw_control(struct wm_softc *);
606 static void wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void wm_enable_phy_wakeup(struct wm_softc *);
609 static void wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void wm_init_manageability(struct wm_softc *);
612 static void wm_set_eee_i350(struct wm_softc *);
613
614 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
615 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
616
617 /*
618 * Devices supported by this driver.
619 */
620 static const struct wm_product {
621 pci_vendor_id_t wmp_vendor;
622 pci_product_id_t wmp_product;
623 const char *wmp_name;
624 wm_chip_type wmp_type;
625 int wmp_flags;
626 #define WMP_F_1000X 0x01
627 #define WMP_F_1000T 0x02
628 #define WMP_F_SERDES 0x04
629 } wm_products[] = {
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
631 "Intel i82542 1000BASE-X Ethernet",
632 WM_T_82542_2_1, WMP_F_1000X },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
635 "Intel i82543GC 1000BASE-X Ethernet",
636 WM_T_82543, WMP_F_1000X },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
639 "Intel i82543GC 1000BASE-T Ethernet",
640 WM_T_82543, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
643 "Intel i82544EI 1000BASE-T Ethernet",
644 WM_T_82544, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
647 "Intel i82544EI 1000BASE-X Ethernet",
648 WM_T_82544, WMP_F_1000X },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
651 "Intel i82544GC 1000BASE-T Ethernet",
652 WM_T_82544, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
655 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
659 "Intel i82540EM 1000BASE-T Ethernet",
660 WM_T_82540, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
663 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
667 "Intel i82540EP 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
679 "Intel i82545EM 1000BASE-T Ethernet",
680 WM_T_82545, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
683 "Intel i82545GM 1000BASE-T Ethernet",
684 WM_T_82545_3, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
687 "Intel i82545GM 1000BASE-X Ethernet",
688 WM_T_82545_3, WMP_F_1000X },
689 #if 0
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
691 "Intel i82545GM Gigabit Ethernet (SERDES)",
692 WM_T_82545_3, WMP_F_SERDES },
693 #endif
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
695 "Intel i82546EB 1000BASE-T Ethernet",
696 WM_T_82546, WMP_F_1000T },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
703 "Intel i82545EM 1000BASE-X Ethernet",
704 WM_T_82545, WMP_F_1000X },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
707 "Intel i82546EB 1000BASE-X Ethernet",
708 WM_T_82546, WMP_F_1000X },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
711 "Intel i82546GB 1000BASE-T Ethernet",
712 WM_T_82546_3, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
715 "Intel i82546GB 1000BASE-X Ethernet",
716 WM_T_82546_3, WMP_F_1000X },
717 #if 0
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
719 "Intel i82546GB Gigabit Ethernet (SERDES)",
720 WM_T_82546_3, WMP_F_SERDES },
721 #endif
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
723 "i82546GB quad-port Gigabit Ethernet",
724 WM_T_82546_3, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
727 "i82546GB quad-port Gigabit Ethernet (KSP3)",
728 WM_T_82546_3, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
731 "Intel PRO/1000MT (82546GB)",
732 WM_T_82546_3, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
735 "Intel i82541EI 1000BASE-T Ethernet",
736 WM_T_82541, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
739 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
743 "Intel i82541EI Mobile 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
747 "Intel i82541ER 1000BASE-T Ethernet",
748 WM_T_82541_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
751 "Intel i82541GI 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
755 "Intel i82541GI Mobile 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
759 "Intel i82541PI 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
763 "Intel i82547EI 1000BASE-T Ethernet",
764 WM_T_82547, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
767 "Intel i82547EI Mobile 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
771 "Intel i82547GI 1000BASE-T Ethernet",
772 WM_T_82547_2, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
775 "Intel PRO/1000 PT (82571EB)",
776 WM_T_82571, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
779 "Intel PRO/1000 PF (82571EB)",
780 WM_T_82571, WMP_F_1000X },
781 #if 0
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
783 "Intel PRO/1000 PB (82571EB)",
784 WM_T_82571, WMP_F_SERDES },
785 #endif
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
787 "Intel PRO/1000 QT (82571EB)",
788 WM_T_82571, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
791 "Intel i82572EI 1000baseT Ethernet",
792 WM_T_82572, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
795 "Intel PRO/1000 PT Quad Port Server Adapter",
796 WM_T_82571, WMP_F_1000T, },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
799 "Intel i82572EI 1000baseX Ethernet",
800 WM_T_82572, WMP_F_1000X },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
803 "Intel i82572EI Gigabit Ethernet (SERDES)",
804 WM_T_82572, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
808 "Intel i82572EI 1000baseT Ethernet",
809 WM_T_82572, WMP_F_1000T },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
812 "Intel i82573E",
813 WM_T_82573, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
816 "Intel i82573E IAMT",
817 WM_T_82573, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
820 "Intel i82573L Gigabit Ethernet",
821 WM_T_82573, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
824 "Intel i82574L",
825 WM_T_82574, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
828 "Intel i82583V",
829 WM_T_82583, WMP_F_1000T },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
832 "i80003 dual 1000baseT Ethernet",
833 WM_T_80003, WMP_F_1000T },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
836 "i80003 dual 1000baseX Ethernet",
837 WM_T_80003, WMP_F_1000T },
838 #if 0
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
840 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
841 WM_T_80003, WMP_F_SERDES },
842 #endif
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
845 "Intel i80003 1000baseT Ethernet",
846 WM_T_80003, WMP_F_1000T },
847 #if 0
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
849 "Intel i80003 Gigabit Ethernet (SERDES)",
850 WM_T_80003, WMP_F_SERDES },
851 #endif
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
853 "Intel i82801H (M_AMT) LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
856 "Intel i82801H (AMT) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
859 "Intel i82801H LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
862 "Intel i82801H (IFE) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
865 "Intel i82801H (M) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
868 "Intel i82801H IFE (GT) LAN Controller",
869 WM_T_ICH8, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
871 "Intel i82801H IFE (G) LAN Controller",
872 WM_T_ICH8, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
874 "82801I (AMT) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
877 "82801I LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
880 "82801I (G) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
883 "82801I (GT) LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
886 "82801I (C) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
889 "82801I mobile LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
892 "82801I mobile (V) LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
895 "82801I mobile (AMT) LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
898 "82567LM-4 LAN Controller",
899 WM_T_ICH9, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
901 "82567V-3 LAN Controller",
902 WM_T_ICH9, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
904 "82567LM-2 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
907 "82567LF-2 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
910 "82567LM-3 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
913 "82567LF-3 LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
916 "82567V-2 LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
919 "82567V-3? LAN Controller",
920 WM_T_ICH10, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
922 "HANKSVILLE LAN Controller",
923 WM_T_ICH10, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
925 "PCH LAN (82577LM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
928 "PCH LAN (82577LC) Controller",
929 WM_T_PCH, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
931 "PCH LAN (82578DM) Controller",
932 WM_T_PCH, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
934 "PCH LAN (82578DC) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
937 "PCH2 LAN (82579LM) Controller",
938 WM_T_PCH2, WMP_F_1000T },
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
940 "PCH2 LAN (82579V) Controller",
941 WM_T_PCH2, WMP_F_1000T },
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
943 "82575EB dual-1000baseT Ethernet",
944 WM_T_82575, WMP_F_1000T },
945 #if 0
946 /*
947 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
948 * disabled for now ...
949 */
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
951 "82575EB dual-1000baseX Ethernet (SERDES)",
952 WM_T_82575, WMP_F_SERDES },
953 #endif
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
955 "82575GB quad-1000baseT Ethernet",
956 WM_T_82575, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
958 "82575GB quad-1000baseT Ethernet (PM)",
959 WM_T_82575, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
961 "82576 1000BaseT Ethernet",
962 WM_T_82576, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
964 "82576 1000BaseX Ethernet",
965 WM_T_82576, WMP_F_1000X },
966 #if 0
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
968 "82576 gigabit Ethernet (SERDES)",
969 WM_T_82576, WMP_F_SERDES },
970 #endif
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
972 "82576 quad-1000BaseT Ethernet",
973 WM_T_82576, WMP_F_1000T },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
975 "82576 gigabit Ethernet",
976 WM_T_82576, WMP_F_1000T },
977 #if 0
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
979 "82576 gigabit Ethernet (SERDES)",
980 WM_T_82576, WMP_F_SERDES },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
982 "82576 quad-gigabit Ethernet (SERDES)",
983 WM_T_82576, WMP_F_SERDES },
984 #endif
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
986 "82580 1000BaseT Ethernet",
987 WM_T_82580, WMP_F_1000T },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
989 "82580 1000BaseX Ethernet",
990 WM_T_82580, WMP_F_1000X },
991 #if 0
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
993 "82580 1000BaseT Ethernet (SERDES)",
994 WM_T_82580, WMP_F_SERDES },
995 #endif
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
997 "82580 gigabit Ethernet (SGMII)",
998 WM_T_82580, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1003 "82580 1000BaseT Ethernet",
1004 WM_T_82580ER, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1006 "82580 dual-1000BaseT Ethernet",
1007 WM_T_82580ER, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1009 "82580 quad-1000BaseX Ethernet",
1010 WM_T_82580, WMP_F_1000X },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1012 "I350 Gigabit Network Connection",
1013 WM_T_I350, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1015 "I350 Gigabit Fiber Network Connection",
1016 WM_T_I350, WMP_F_1000X },
1017 #if 0
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1019 "I350 Gigabit Backplane Connection",
1020 WM_T_I350, WMP_F_SERDES },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1022 "I350 Gigabit Connection",
1023 WM_T_I350, WMP_F_1000T },
1024 #endif
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1026 "I210-T1 Ethernet Server Adapter",
1027 WM_T_I210, WMP_F_1000T },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1029 "I210 Ethernet (Copper OEM)",
1030 WM_T_I210, WMP_F_1000T },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1032 "I210 Ethernet (Copper IT)",
1033 WM_T_I210, WMP_F_1000T },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1035 "I210 Gigabit Ethernet (Fiber)",
1036 WM_T_I210, WMP_F_1000X },
1037 #if 0
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1039 "I210 Gigabit Ethernet (SERDES)",
1040 WM_T_I210, WMP_F_SERDES },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1042 "I210 Gigabit Ethernet (SGMII)",
1043 WM_T_I210, WMP_F_SERDES },
1044 #endif
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1046 "I211 Ethernet (COPPER)",
1047 WM_T_I211, WMP_F_1000T },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1049 "I217 V Ethernet Connection",
1050 WM_T_PCH_LPT, WMP_F_1000T },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1052 "I217 LM Ethernet Connection",
1053 WM_T_PCH_LPT, WMP_F_1000T },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1055 "I218 V Ethernet Connection",
1056 WM_T_PCH_LPT, WMP_F_1000T },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1058 "I218 LM Ethernet Connection",
1059 WM_T_PCH_LPT, WMP_F_1000T },
1060 { 0, 0,
1061 NULL,
1062 0, 0 },
1063 };
1064
1065 #ifdef WM_EVENT_COUNTERS
1066 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1067 #endif /* WM_EVENT_COUNTERS */
1068
1069 #if 0 /* Not currently used */
1070 static inline uint32_t
1071 wm_io_read(struct wm_softc *sc, int reg)
1072 {
1073
1074 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1075 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1076 }
1077 #endif
1078
1079 static inline void
1080 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1081 {
1082
1083 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1084 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1085 }
1086
1087 static inline void
1088 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1089 uint32_t data)
1090 {
1091 uint32_t regval;
1092 int i;
1093
1094 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1095
1096 CSR_WRITE(sc, reg, regval);
1097
1098 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1099 delay(5);
1100 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1101 break;
1102 }
1103 if (i == SCTL_CTL_POLL_TIMEOUT) {
1104 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1105 device_xname(sc->sc_dev), reg);
1106 }
1107 }
1108
1109 static inline void
1110 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1111 {
1112 wa->wa_low = htole32(v & 0xffffffffU);
1113 if (sizeof(bus_addr_t) == 8)
1114 wa->wa_high = htole32((uint64_t) v >> 32);
1115 else
1116 wa->wa_high = 0;
1117 }
1118
1119 static void
1120 wm_set_spiaddrbits(struct wm_softc *sc)
1121 {
1122 uint32_t reg;
1123
1124 sc->sc_flags |= WM_F_EEPROM_SPI;
1125 reg = CSR_READ(sc, WMREG_EECD);
1126 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1127 }
1128
1129 static const struct wm_product *
1130 wm_lookup(const struct pci_attach_args *pa)
1131 {
1132 const struct wm_product *wmp;
1133
1134 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1135 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1136 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1137 return wmp;
1138 }
1139 return NULL;
1140 }
1141
1142 static int
1143 wm_match(device_t parent, cfdata_t cf, void *aux)
1144 {
1145 struct pci_attach_args *pa = aux;
1146
1147 if (wm_lookup(pa) != NULL)
1148 return 1;
1149
1150 return 0;
1151 }
1152
1153 static void
1154 wm_attach(device_t parent, device_t self, void *aux)
1155 {
1156 struct wm_softc *sc = device_private(self);
1157 struct pci_attach_args *pa = aux;
1158 prop_dictionary_t dict;
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160 pci_chipset_tag_t pc = pa->pa_pc;
1161 pci_intr_handle_t ih;
1162 const char *intrstr = NULL;
1163 const char *eetype, *xname;
1164 bus_space_tag_t memt;
1165 bus_space_handle_t memh;
1166 bus_size_t memsize;
1167 int memh_valid;
1168 int i, error;
1169 const struct wm_product *wmp;
1170 prop_data_t ea;
1171 prop_number_t pn;
1172 uint8_t enaddr[ETHER_ADDR_LEN];
1173 uint16_t cfg1, cfg2, swdpin, io3;
1174 pcireg_t preg, memtype;
1175 uint16_t eeprom_data, apme_mask;
1176 uint32_t reg;
1177
1178 sc->sc_dev = self;
1179 callout_init(&sc->sc_tick_ch, 0);
1180
1181 sc->sc_wmp = wmp = wm_lookup(pa);
1182 if (wmp == NULL) {
1183 printf("\n");
1184 panic("wm_attach: impossible");
1185 }
1186
1187 sc->sc_pc = pa->pa_pc;
1188 sc->sc_pcitag = pa->pa_tag;
1189
1190 if (pci_dma64_available(pa))
1191 sc->sc_dmat = pa->pa_dmat64;
1192 else
1193 sc->sc_dmat = pa->pa_dmat;
1194
1195 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1196 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1197
1198 sc->sc_type = wmp->wmp_type;
1199 if (sc->sc_type < WM_T_82543) {
1200 if (sc->sc_rev < 2) {
1201 aprint_error_dev(sc->sc_dev,
1202 "i82542 must be at least rev. 2\n");
1203 return;
1204 }
1205 if (sc->sc_rev < 3)
1206 sc->sc_type = WM_T_82542_2_0;
1207 }
1208
1209 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1210 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1211 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
1212 || (sc->sc_type == WM_T_I211))
1213 sc->sc_flags |= WM_F_NEWQUEUE;
1214
1215 /* Set device properties (mactype) */
1216 dict = device_properties(sc->sc_dev);
1217 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1218
1219 /*
1220 * Map the device. All devices support memory-mapped acccess,
1221 * and it is really required for normal operation.
1222 */
1223 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1224 switch (memtype) {
1225 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1226 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1227 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1228 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1229 break;
1230 default:
1231 memh_valid = 0;
1232 break;
1233 }
1234
1235 if (memh_valid) {
1236 sc->sc_st = memt;
1237 sc->sc_sh = memh;
1238 sc->sc_ss = memsize;
1239 } else {
1240 aprint_error_dev(sc->sc_dev,
1241 "unable to map device registers\n");
1242 return;
1243 }
1244
1245 wm_get_wakeup(sc);
1246
1247 /*
1248 * In addition, i82544 and later support I/O mapped indirect
1249 * register access. It is not desirable (nor supported in
1250 * this driver) to use it for normal operation, though it is
1251 * required to work around bugs in some chip versions.
1252 */
1253 if (sc->sc_type >= WM_T_82544) {
1254 /* First we have to find the I/O BAR. */
1255 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1256 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1257 if (memtype == PCI_MAPREG_TYPE_IO)
1258 break;
1259 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1260 PCI_MAPREG_MEM_TYPE_64BIT)
1261 i += 4; /* skip high bits, too */
1262 }
1263 if (i < PCI_MAPREG_END) {
1264 /*
1265 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1266 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1267 * It's no problem because newer chips has no this
1268 * bug.
1269 *
1270 * The i8254x doesn't apparently respond when the
1271 * I/O BAR is 0, which looks somewhat like it's not
1272 * been configured.
1273 */
1274 preg = pci_conf_read(pc, pa->pa_tag, i);
1275 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1276 aprint_error_dev(sc->sc_dev,
1277 "WARNING: I/O BAR at zero.\n");
1278 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1279 0, &sc->sc_iot, &sc->sc_ioh,
1280 NULL, &sc->sc_ios) == 0) {
1281 sc->sc_flags |= WM_F_IOH_VALID;
1282 } else {
1283 aprint_error_dev(sc->sc_dev,
1284 "WARNING: unable to map I/O space\n");
1285 }
1286 }
1287
1288 }
1289
1290 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1291 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1292 preg |= PCI_COMMAND_MASTER_ENABLE;
1293 if (sc->sc_type < WM_T_82542_2_1)
1294 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1295 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1296
1297 /* power up chip */
1298 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1299 NULL)) && error != EOPNOTSUPP) {
1300 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1301 return;
1302 }
1303
1304 /*
1305 * Map and establish our interrupt.
1306 */
1307 if (pci_intr_map(pa, &ih)) {
1308 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1309 return;
1310 }
1311 intrstr = pci_intr_string(pc, ih);
1312 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1313 if (sc->sc_ih == NULL) {
1314 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1315 if (intrstr != NULL)
1316 aprint_error(" at %s", intrstr);
1317 aprint_error("\n");
1318 return;
1319 }
1320 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1321
1322 /*
1323 * Check the function ID (unit number of the chip).
1324 */
1325 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1326 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1327 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1328 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1329 || (sc->sc_type == WM_T_I350))
1330 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1331 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1332 else
1333 sc->sc_funcid = 0;
1334
1335 /*
1336 * Determine a few things about the bus we're connected to.
1337 */
1338 if (sc->sc_type < WM_T_82543) {
1339 /* We don't really know the bus characteristics here. */
1340 sc->sc_bus_speed = 33;
1341 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1342 /*
1343 * CSA (Communication Streaming Architecture) is about as fast
1344 * a 32-bit 66MHz PCI Bus.
1345 */
1346 sc->sc_flags |= WM_F_CSA;
1347 sc->sc_bus_speed = 66;
1348 aprint_verbose_dev(sc->sc_dev,
1349 "Communication Streaming Architecture\n");
1350 if (sc->sc_type == WM_T_82547) {
1351 callout_init(&sc->sc_txfifo_ch, 0);
1352 callout_setfunc(&sc->sc_txfifo_ch,
1353 wm_82547_txfifo_stall, sc);
1354 aprint_verbose_dev(sc->sc_dev,
1355 "using 82547 Tx FIFO stall work-around\n");
1356 }
1357 } else if (sc->sc_type >= WM_T_82571) {
1358 sc->sc_flags |= WM_F_PCIE;
1359 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1360 && (sc->sc_type != WM_T_ICH10)
1361 && (sc->sc_type != WM_T_PCH)
1362 && (sc->sc_type != WM_T_PCH2)
1363 && (sc->sc_type != WM_T_PCH_LPT)) {
1364 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1365 /* ICH* and PCH* have no PCIe capability registers */
1366 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1367 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1368 NULL) == 0)
1369 aprint_error_dev(sc->sc_dev,
1370 "unable to find PCIe capability\n");
1371 }
1372 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1373 } else {
1374 reg = CSR_READ(sc, WMREG_STATUS);
1375 if (reg & STATUS_BUS64)
1376 sc->sc_flags |= WM_F_BUS64;
1377 if ((reg & STATUS_PCIX_MODE) != 0) {
1378 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1379
1380 sc->sc_flags |= WM_F_PCIX;
1381 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1382 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1383 aprint_error_dev(sc->sc_dev,
1384 "unable to find PCIX capability\n");
1385 else if (sc->sc_type != WM_T_82545_3 &&
1386 sc->sc_type != WM_T_82546_3) {
1387 /*
1388 * Work around a problem caused by the BIOS
1389 * setting the max memory read byte count
1390 * incorrectly.
1391 */
1392 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1393 sc->sc_pcixe_capoff + PCIX_CMD);
1394 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1395 sc->sc_pcixe_capoff + PCIX_STATUS);
1396
1397 bytecnt =
1398 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1399 PCIX_CMD_BYTECNT_SHIFT;
1400 maxb =
1401 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1402 PCIX_STATUS_MAXB_SHIFT;
1403 if (bytecnt > maxb) {
1404 aprint_verbose_dev(sc->sc_dev,
1405 "resetting PCI-X MMRBC: %d -> %d\n",
1406 512 << bytecnt, 512 << maxb);
1407 pcix_cmd = (pcix_cmd &
1408 ~PCIX_CMD_BYTECNT_MASK) |
1409 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1410 pci_conf_write(pa->pa_pc, pa->pa_tag,
1411 sc->sc_pcixe_capoff + PCIX_CMD,
1412 pcix_cmd);
1413 }
1414 }
1415 }
1416 /*
1417 * The quad port adapter is special; it has a PCIX-PCIX
1418 * bridge on the board, and can run the secondary bus at
1419 * a higher speed.
1420 */
1421 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1422 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1423 : 66;
1424 } else if (sc->sc_flags & WM_F_PCIX) {
1425 switch (reg & STATUS_PCIXSPD_MASK) {
1426 case STATUS_PCIXSPD_50_66:
1427 sc->sc_bus_speed = 66;
1428 break;
1429 case STATUS_PCIXSPD_66_100:
1430 sc->sc_bus_speed = 100;
1431 break;
1432 case STATUS_PCIXSPD_100_133:
1433 sc->sc_bus_speed = 133;
1434 break;
1435 default:
1436 aprint_error_dev(sc->sc_dev,
1437 "unknown PCIXSPD %d; assuming 66MHz\n",
1438 reg & STATUS_PCIXSPD_MASK);
1439 sc->sc_bus_speed = 66;
1440 break;
1441 }
1442 } else
1443 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1444 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1445 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1446 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1447 }
1448
1449 /*
1450 * Allocate the control data structures, and create and load the
1451 * DMA map for it.
1452 *
1453 * NOTE: All Tx descriptors must be in the same 4G segment of
1454 * memory. So must Rx descriptors. We simplify by allocating
1455 * both sets within the same 4G segment.
1456 */
1457 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1458 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1459 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1460 sizeof(struct wm_control_data_82542) :
1461 sizeof(struct wm_control_data_82544);
1462 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1463 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1464 &sc->sc_cd_rseg, 0)) != 0) {
1465 aprint_error_dev(sc->sc_dev,
1466 "unable to allocate control data, error = %d\n",
1467 error);
1468 goto fail_0;
1469 }
1470
1471 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1472 sc->sc_cd_rseg, sc->sc_cd_size,
1473 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1474 aprint_error_dev(sc->sc_dev,
1475 "unable to map control data, error = %d\n", error);
1476 goto fail_1;
1477 }
1478
1479 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1480 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1481 aprint_error_dev(sc->sc_dev,
1482 "unable to create control data DMA map, error = %d\n",
1483 error);
1484 goto fail_2;
1485 }
1486
1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1488 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1489 aprint_error_dev(sc->sc_dev,
1490 "unable to load control data DMA map, error = %d\n",
1491 error);
1492 goto fail_3;
1493 }
1494
1495 /*
1496 * Create the transmit buffer DMA maps.
1497 */
1498 WM_TXQUEUELEN(sc) =
1499 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1500 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1501 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1502 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1503 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1504 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1505 aprint_error_dev(sc->sc_dev,
1506 "unable to create Tx DMA map %d, error = %d\n",
1507 i, error);
1508 goto fail_4;
1509 }
1510 }
1511
1512 /*
1513 * Create the receive buffer DMA maps.
1514 */
1515 for (i = 0; i < WM_NRXDESC; i++) {
1516 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1517 MCLBYTES, 0, 0,
1518 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1519 aprint_error_dev(sc->sc_dev,
1520 "unable to create Rx DMA map %d error = %d\n",
1521 i, error);
1522 goto fail_5;
1523 }
1524 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1525 }
1526
1527 /* clear interesting stat counters */
1528 CSR_READ(sc, WMREG_COLC);
1529 CSR_READ(sc, WMREG_RXERRC);
1530
1531 /* get PHY control from SMBus to PCIe */
1532 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1533 || (sc->sc_type == WM_T_PCH_LPT))
1534 wm_smbustopci(sc);
1535
1536 /*
1537 * Reset the chip to a known state.
1538 */
1539 wm_reset(sc);
1540
1541 switch (sc->sc_type) {
1542 case WM_T_82571:
1543 case WM_T_82572:
1544 case WM_T_82573:
1545 case WM_T_82574:
1546 case WM_T_82583:
1547 case WM_T_80003:
1548 case WM_T_ICH8:
1549 case WM_T_ICH9:
1550 case WM_T_ICH10:
1551 case WM_T_PCH:
1552 case WM_T_PCH2:
1553 case WM_T_PCH_LPT:
1554 if (wm_check_mng_mode(sc) != 0)
1555 wm_get_hw_control(sc);
1556 break;
1557 default:
1558 break;
1559 }
1560
1561 /*
1562 * Get some information about the EEPROM.
1563 */
1564 switch (sc->sc_type) {
1565 case WM_T_82542_2_0:
1566 case WM_T_82542_2_1:
1567 case WM_T_82543:
1568 case WM_T_82544:
1569 /* Microwire */
1570 sc->sc_ee_addrbits = 6;
1571 break;
1572 case WM_T_82540:
1573 case WM_T_82545:
1574 case WM_T_82545_3:
1575 case WM_T_82546:
1576 case WM_T_82546_3:
1577 /* Microwire */
1578 reg = CSR_READ(sc, WMREG_EECD);
1579 if (reg & EECD_EE_SIZE)
1580 sc->sc_ee_addrbits = 8;
1581 else
1582 sc->sc_ee_addrbits = 6;
1583 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1584 break;
1585 case WM_T_82541:
1586 case WM_T_82541_2:
1587 case WM_T_82547:
1588 case WM_T_82547_2:
1589 reg = CSR_READ(sc, WMREG_EECD);
1590 if (reg & EECD_EE_TYPE) {
1591 /* SPI */
1592 wm_set_spiaddrbits(sc);
1593 } else
1594 /* Microwire */
1595 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1596 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1597 break;
1598 case WM_T_82571:
1599 case WM_T_82572:
1600 /* SPI */
1601 wm_set_spiaddrbits(sc);
1602 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1603 break;
1604 case WM_T_82573:
1605 case WM_T_82574:
1606 case WM_T_82583:
1607 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1608 sc->sc_flags |= WM_F_EEPROM_FLASH;
1609 else {
1610 /* SPI */
1611 wm_set_spiaddrbits(sc);
1612 }
1613 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1614 break;
1615 case WM_T_82575:
1616 case WM_T_82576:
1617 case WM_T_82580:
1618 case WM_T_82580ER:
1619 case WM_T_I350:
1620 case WM_T_80003:
1621 /* SPI */
1622 wm_set_spiaddrbits(sc);
1623 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1624 break;
1625 case WM_T_ICH8:
1626 case WM_T_ICH9:
1627 case WM_T_ICH10:
1628 case WM_T_PCH:
1629 case WM_T_PCH2:
1630 case WM_T_PCH_LPT:
1631 /* FLASH */
1632 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1633 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1634 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1635 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1636 aprint_error_dev(sc->sc_dev,
1637 "can't map FLASH registers\n");
1638 return;
1639 }
1640 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1641 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1642 ICH_FLASH_SECTOR_SIZE;
1643 sc->sc_ich8_flash_bank_size =
1644 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1645 sc->sc_ich8_flash_bank_size -=
1646 (reg & ICH_GFPREG_BASE_MASK);
1647 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1648 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1649 break;
1650 case WM_T_I210:
1651 case WM_T_I211:
1652 #if 1
1653 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1654 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1655 #endif
1656 break;
1657 default:
1658 break;
1659 }
1660
1661 /*
1662 * Defer printing the EEPROM type until after verifying the checksum
1663 * This allows the EEPROM type to be printed correctly in the case
1664 * that no EEPROM is attached.
1665 */
1666 /*
1667 * Validate the EEPROM checksum. If the checksum fails, flag
1668 * this for later, so we can fail future reads from the EEPROM.
1669 */
1670 if (wm_validate_eeprom_checksum(sc)) {
1671 /*
1672 * Read twice again because some PCI-e parts fail the
1673 * first check due to the link being in sleep state.
1674 */
1675 if (wm_validate_eeprom_checksum(sc))
1676 sc->sc_flags |= WM_F_EEPROM_INVALID;
1677 }
1678
1679 /* Set device properties (macflags) */
1680 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1681
1682 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1683 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1684 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1685 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1686 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1687 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1688 } else {
1689 if (sc->sc_flags & WM_F_EEPROM_SPI)
1690 eetype = "SPI";
1691 else
1692 eetype = "MicroWire";
1693 aprint_verbose_dev(sc->sc_dev,
1694 "%u word (%d address bits) %s EEPROM\n",
1695 1U << sc->sc_ee_addrbits,
1696 sc->sc_ee_addrbits, eetype);
1697 }
1698
1699 /*
1700 * Read the Ethernet address from the EEPROM, if not first found
1701 * in device properties.
1702 */
1703 ea = prop_dictionary_get(dict, "mac-address");
1704 if (ea != NULL) {
1705 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1706 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1707 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1708 } else {
1709 if (wm_read_mac_addr(sc, enaddr) != 0) {
1710 aprint_error_dev(sc->sc_dev,
1711 "unable to read Ethernet address\n");
1712 return;
1713 }
1714 }
1715
1716 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1717 ether_sprintf(enaddr));
1718
1719 /*
1720 * Read the config info from the EEPROM, and set up various
1721 * bits in the control registers based on their contents.
1722 */
1723 pn = prop_dictionary_get(dict, "i82543-cfg1");
1724 if (pn != NULL) {
1725 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1726 cfg1 = (uint16_t) prop_number_integer_value(pn);
1727 } else {
1728 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1729 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1730 return;
1731 }
1732 }
1733
1734 pn = prop_dictionary_get(dict, "i82543-cfg2");
1735 if (pn != NULL) {
1736 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1737 cfg2 = (uint16_t) prop_number_integer_value(pn);
1738 } else {
1739 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1740 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1741 return;
1742 }
1743 }
1744
1745 /* check for WM_F_WOL */
1746 switch (sc->sc_type) {
1747 case WM_T_82542_2_0:
1748 case WM_T_82542_2_1:
1749 case WM_T_82543:
1750 /* dummy? */
1751 eeprom_data = 0;
1752 apme_mask = EEPROM_CFG3_APME;
1753 break;
1754 case WM_T_82544:
1755 apme_mask = EEPROM_CFG2_82544_APM_EN;
1756 eeprom_data = cfg2;
1757 break;
1758 case WM_T_82546:
1759 case WM_T_82546_3:
1760 case WM_T_82571:
1761 case WM_T_82572:
1762 case WM_T_82573:
1763 case WM_T_82574:
1764 case WM_T_82583:
1765 case WM_T_80003:
1766 default:
1767 apme_mask = EEPROM_CFG3_APME;
1768 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1769 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1770 break;
1771 case WM_T_82575:
1772 case WM_T_82576:
1773 case WM_T_82580:
1774 case WM_T_82580ER:
1775 case WM_T_I350:
1776 case WM_T_ICH8:
1777 case WM_T_ICH9:
1778 case WM_T_ICH10:
1779 case WM_T_PCH:
1780 case WM_T_PCH2:
1781 case WM_T_PCH_LPT:
1782 /* XXX The funcid should be checked on some devices */
1783 apme_mask = WUC_APME;
1784 eeprom_data = CSR_READ(sc, WMREG_WUC);
1785 break;
1786 }
1787
1788 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1789 if ((eeprom_data & apme_mask) != 0)
1790 sc->sc_flags |= WM_F_WOL;
1791 #ifdef WM_DEBUG
1792 if ((sc->sc_flags & WM_F_WOL) != 0)
1793 printf("WOL\n");
1794 #endif
1795
1796 /*
1797 * XXX need special handling for some multiple port cards
1798 * to disable a paticular port.
1799 */
1800
1801 if (sc->sc_type >= WM_T_82544) {
1802 pn = prop_dictionary_get(dict, "i82543-swdpin");
1803 if (pn != NULL) {
1804 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1805 swdpin = (uint16_t) prop_number_integer_value(pn);
1806 } else {
1807 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1808 aprint_error_dev(sc->sc_dev,
1809 "unable to read SWDPIN\n");
1810 return;
1811 }
1812 }
1813 }
1814
1815 if (cfg1 & EEPROM_CFG1_ILOS)
1816 sc->sc_ctrl |= CTRL_ILOS;
1817 if (sc->sc_type >= WM_T_82544) {
1818 sc->sc_ctrl |=
1819 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1820 CTRL_SWDPIO_SHIFT;
1821 sc->sc_ctrl |=
1822 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1823 CTRL_SWDPINS_SHIFT;
1824 } else {
1825 sc->sc_ctrl |=
1826 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1827 CTRL_SWDPIO_SHIFT;
1828 }
1829
1830 #if 0
1831 if (sc->sc_type >= WM_T_82544) {
1832 if (cfg1 & EEPROM_CFG1_IPS0)
1833 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1834 if (cfg1 & EEPROM_CFG1_IPS1)
1835 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1836 sc->sc_ctrl_ext |=
1837 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1838 CTRL_EXT_SWDPIO_SHIFT;
1839 sc->sc_ctrl_ext |=
1840 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1841 CTRL_EXT_SWDPINS_SHIFT;
1842 } else {
1843 sc->sc_ctrl_ext |=
1844 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1845 CTRL_EXT_SWDPIO_SHIFT;
1846 }
1847 #endif
1848
1849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1850 #if 0
1851 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1852 #endif
1853
1854 /*
1855 * Set up some register offsets that are different between
1856 * the i82542 and the i82543 and later chips.
1857 */
1858 if (sc->sc_type < WM_T_82543) {
1859 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1860 sc->sc_tdt_reg = WMREG_OLD_TDT;
1861 } else {
1862 sc->sc_rdt_reg = WMREG_RDT;
1863 sc->sc_tdt_reg = WMREG_TDT;
1864 }
1865
1866 if (sc->sc_type == WM_T_PCH) {
1867 uint16_t val;
1868
1869 /* Save the NVM K1 bit setting */
1870 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1871
1872 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1873 sc->sc_nvm_k1_enabled = 1;
1874 else
1875 sc->sc_nvm_k1_enabled = 0;
1876 }
1877
1878 /*
1879 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1880 * media structures accordingly.
1881 */
1882 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1883 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1884 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1885 || sc->sc_type == WM_T_82573
1886 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1887 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1888 wm_gmii_mediainit(sc, wmp->wmp_product);
1889 } else if (sc->sc_type < WM_T_82543 ||
1890 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1891 if (wmp->wmp_flags & WMP_F_1000T)
1892 aprint_error_dev(sc->sc_dev,
1893 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1894 wm_tbi_mediainit(sc);
1895 } else {
1896 switch (sc->sc_type) {
1897 case WM_T_82575:
1898 case WM_T_82576:
1899 case WM_T_82580:
1900 case WM_T_82580ER:
1901 case WM_T_I350:
1902 case WM_T_I210:
1903 case WM_T_I211:
1904 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1905 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1906 case CTRL_EXT_LINK_MODE_SGMII:
1907 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1908 sc->sc_flags |= WM_F_SGMII;
1909 CSR_WRITE(sc, WMREG_CTRL_EXT,
1910 reg | CTRL_EXT_I2C_ENA);
1911 wm_gmii_mediainit(sc, wmp->wmp_product);
1912 break;
1913 case CTRL_EXT_LINK_MODE_1000KX:
1914 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1915 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1916 CSR_WRITE(sc, WMREG_CTRL_EXT,
1917 reg | CTRL_EXT_I2C_ENA);
1918 panic("not supported yet\n");
1919 break;
1920 case CTRL_EXT_LINK_MODE_GMII:
1921 default:
1922 CSR_WRITE(sc, WMREG_CTRL_EXT,
1923 reg & ~CTRL_EXT_I2C_ENA);
1924 wm_gmii_mediainit(sc, wmp->wmp_product);
1925 break;
1926 }
1927 break;
1928 default:
1929 if (wmp->wmp_flags & WMP_F_1000X)
1930 aprint_error_dev(sc->sc_dev,
1931 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1932 wm_gmii_mediainit(sc, wmp->wmp_product);
1933 }
1934 }
1935
1936 ifp = &sc->sc_ethercom.ec_if;
1937 xname = device_xname(sc->sc_dev);
1938 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1939 ifp->if_softc = sc;
1940 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1941 ifp->if_ioctl = wm_ioctl;
1942 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1943 ifp->if_start = wm_nq_start;
1944 else
1945 ifp->if_start = wm_start;
1946 ifp->if_watchdog = wm_watchdog;
1947 ifp->if_init = wm_init;
1948 ifp->if_stop = wm_stop;
1949 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1950 IFQ_SET_READY(&ifp->if_snd);
1951
1952 /* Check for jumbo frame */
1953 switch (sc->sc_type) {
1954 case WM_T_82573:
1955 /* XXX limited to 9234 if ASPM is disabled */
1956 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1957 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1958 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1959 break;
1960 case WM_T_82571:
1961 case WM_T_82572:
1962 case WM_T_82574:
1963 case WM_T_82575:
1964 case WM_T_82576:
1965 case WM_T_82580:
1966 case WM_T_82580ER:
1967 case WM_T_I350:
1968 case WM_T_I210:
1969 case WM_T_I211:
1970 case WM_T_80003:
1971 case WM_T_ICH9:
1972 case WM_T_ICH10:
1973 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1974 case WM_T_PCH_LPT:
1975 /* XXX limited to 9234 */
1976 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1977 break;
1978 case WM_T_PCH:
1979 /* XXX limited to 4096 */
1980 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1981 break;
1982 case WM_T_82542_2_0:
1983 case WM_T_82542_2_1:
1984 case WM_T_82583:
1985 case WM_T_ICH8:
1986 /* No support for jumbo frame */
1987 break;
1988 default:
1989 /* ETHER_MAX_LEN_JUMBO */
1990 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1991 break;
1992 }
1993
1994 /*
1995 * If we're a i82543 or greater, we can support VLANs.
1996 */
1997 if (sc->sc_type >= WM_T_82543)
1998 sc->sc_ethercom.ec_capabilities |=
1999 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2000
2001 /*
2002 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2003 * on i82543 and later.
2004 */
2005 if (sc->sc_type >= WM_T_82543) {
2006 ifp->if_capabilities |=
2007 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2008 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2009 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2010 IFCAP_CSUM_TCPv6_Tx |
2011 IFCAP_CSUM_UDPv6_Tx;
2012 }
2013
2014 /*
2015 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2016 *
2017 * 82541GI (8086:1076) ... no
2018 * 82572EI (8086:10b9) ... yes
2019 */
2020 if (sc->sc_type >= WM_T_82571) {
2021 ifp->if_capabilities |=
2022 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2023 }
2024
2025 /*
2026 * If we're a i82544 or greater (except i82547), we can do
2027 * TCP segmentation offload.
2028 */
2029 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2030 ifp->if_capabilities |= IFCAP_TSOv4;
2031 }
2032
2033 if (sc->sc_type >= WM_T_82571) {
2034 ifp->if_capabilities |= IFCAP_TSOv6;
2035 }
2036
2037 /*
2038 * Attach the interface.
2039 */
2040 if_attach(ifp);
2041 ether_ifattach(ifp, enaddr);
2042 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2043 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2044
2045 #ifdef WM_EVENT_COUNTERS
2046 /* Attach event counters. */
2047 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2048 NULL, xname, "txsstall");
2049 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2050 NULL, xname, "txdstall");
2051 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2052 NULL, xname, "txfifo_stall");
2053 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2054 NULL, xname, "txdw");
2055 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2056 NULL, xname, "txqe");
2057 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2058 NULL, xname, "rxintr");
2059 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2060 NULL, xname, "linkintr");
2061
2062 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2063 NULL, xname, "rxipsum");
2064 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2065 NULL, xname, "rxtusum");
2066 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2067 NULL, xname, "txipsum");
2068 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2069 NULL, xname, "txtusum");
2070 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2071 NULL, xname, "txtusum6");
2072
2073 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2074 NULL, xname, "txtso");
2075 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2076 NULL, xname, "txtso6");
2077 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2078 NULL, xname, "txtsopain");
2079
2080 for (i = 0; i < WM_NTXSEGS; i++) {
2081 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2082 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2083 NULL, xname, wm_txseg_evcnt_names[i]);
2084 }
2085
2086 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2087 NULL, xname, "txdrop");
2088
2089 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2090 NULL, xname, "tu");
2091
2092 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2093 NULL, xname, "tx_xoff");
2094 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2095 NULL, xname, "tx_xon");
2096 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2097 NULL, xname, "rx_xoff");
2098 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2099 NULL, xname, "rx_xon");
2100 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2101 NULL, xname, "rx_macctl");
2102 #endif /* WM_EVENT_COUNTERS */
2103
2104 if (pmf_device_register(self, wm_suspend, wm_resume))
2105 pmf_class_network_register(self, ifp);
2106 else
2107 aprint_error_dev(self, "couldn't establish power handler\n");
2108
2109 return;
2110
2111 /*
2112 * Free any resources we've allocated during the failed attach
2113 * attempt. Do this in reverse order and fall through.
2114 */
2115 fail_5:
2116 for (i = 0; i < WM_NRXDESC; i++) {
2117 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2118 bus_dmamap_destroy(sc->sc_dmat,
2119 sc->sc_rxsoft[i].rxs_dmamap);
2120 }
2121 fail_4:
2122 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2123 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2124 bus_dmamap_destroy(sc->sc_dmat,
2125 sc->sc_txsoft[i].txs_dmamap);
2126 }
2127 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2128 fail_3:
2129 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2130 fail_2:
2131 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2132 sc->sc_cd_size);
2133 fail_1:
2134 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2135 fail_0:
2136 return;
2137 }
2138
2139 static int
2140 wm_detach(device_t self, int flags __unused)
2141 {
2142 struct wm_softc *sc = device_private(self);
2143 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2144 int i, s;
2145
2146 s = splnet();
2147 /* Stop the interface. Callouts are stopped in it. */
2148 wm_stop(ifp, 1);
2149 splx(s);
2150
2151 pmf_device_deregister(self);
2152
2153 /* Tell the firmware about the release */
2154 wm_release_manageability(sc);
2155 wm_release_hw_control(sc);
2156
2157 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2158
2159 /* Delete all remaining media. */
2160 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2161
2162 ether_ifdetach(ifp);
2163 if_detach(ifp);
2164
2165
2166 /* Unload RX dmamaps and free mbufs */
2167 wm_rxdrain(sc);
2168
2169 /* Free dmamap. It's the same as the end of the wm_attach() function */
2170 for (i = 0; i < WM_NRXDESC; i++) {
2171 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2172 bus_dmamap_destroy(sc->sc_dmat,
2173 sc->sc_rxsoft[i].rxs_dmamap);
2174 }
2175 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2176 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2177 bus_dmamap_destroy(sc->sc_dmat,
2178 sc->sc_txsoft[i].txs_dmamap);
2179 }
2180 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2181 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2182 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2183 sc->sc_cd_size);
2184 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2185
2186 /* Disestablish the interrupt handler */
2187 if (sc->sc_ih != NULL) {
2188 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2189 sc->sc_ih = NULL;
2190 }
2191
2192 /* Unmap the registers */
2193 if (sc->sc_ss) {
2194 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2195 sc->sc_ss = 0;
2196 }
2197
2198 if (sc->sc_ios) {
2199 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2200 sc->sc_ios = 0;
2201 }
2202
2203 return 0;
2204 }
2205
2206 /*
2207 * wm_tx_offload:
2208 *
2209 * Set up TCP/IP checksumming parameters for the
2210 * specified packet.
2211 */
2212 static int
2213 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2214 uint8_t *fieldsp)
2215 {
2216 struct mbuf *m0 = txs->txs_mbuf;
2217 struct livengood_tcpip_ctxdesc *t;
2218 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2219 uint32_t ipcse;
2220 struct ether_header *eh;
2221 int offset, iphl;
2222 uint8_t fields;
2223
2224 /*
2225 * XXX It would be nice if the mbuf pkthdr had offset
2226 * fields for the protocol headers.
2227 */
2228
2229 eh = mtod(m0, struct ether_header *);
2230 switch (htons(eh->ether_type)) {
2231 case ETHERTYPE_IP:
2232 case ETHERTYPE_IPV6:
2233 offset = ETHER_HDR_LEN;
2234 break;
2235
2236 case ETHERTYPE_VLAN:
2237 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2238 break;
2239
2240 default:
2241 /*
2242 * Don't support this protocol or encapsulation.
2243 */
2244 *fieldsp = 0;
2245 *cmdp = 0;
2246 return 0;
2247 }
2248
2249 if ((m0->m_pkthdr.csum_flags &
2250 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2251 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2252 } else {
2253 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2254 }
2255 ipcse = offset + iphl - 1;
2256
2257 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2258 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2259 seg = 0;
2260 fields = 0;
2261
2262 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2263 int hlen = offset + iphl;
2264 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2265
2266 if (__predict_false(m0->m_len <
2267 (hlen + sizeof(struct tcphdr)))) {
2268 /*
2269 * TCP/IP headers are not in the first mbuf; we need
2270 * to do this the slow and painful way. Let's just
2271 * hope this doesn't happen very often.
2272 */
2273 struct tcphdr th;
2274
2275 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2276
2277 m_copydata(m0, hlen, sizeof(th), &th);
2278 if (v4) {
2279 struct ip ip;
2280
2281 m_copydata(m0, offset, sizeof(ip), &ip);
2282 ip.ip_len = 0;
2283 m_copyback(m0,
2284 offset + offsetof(struct ip, ip_len),
2285 sizeof(ip.ip_len), &ip.ip_len);
2286 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2287 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2288 } else {
2289 struct ip6_hdr ip6;
2290
2291 m_copydata(m0, offset, sizeof(ip6), &ip6);
2292 ip6.ip6_plen = 0;
2293 m_copyback(m0,
2294 offset + offsetof(struct ip6_hdr, ip6_plen),
2295 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2296 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2297 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2298 }
2299 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2300 sizeof(th.th_sum), &th.th_sum);
2301
2302 hlen += th.th_off << 2;
2303 } else {
2304 /*
2305 * TCP/IP headers are in the first mbuf; we can do
2306 * this the easy way.
2307 */
2308 struct tcphdr *th;
2309
2310 if (v4) {
2311 struct ip *ip =
2312 (void *)(mtod(m0, char *) + offset);
2313 th = (void *)(mtod(m0, char *) + hlen);
2314
2315 ip->ip_len = 0;
2316 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2317 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2318 } else {
2319 struct ip6_hdr *ip6 =
2320 (void *)(mtod(m0, char *) + offset);
2321 th = (void *)(mtod(m0, char *) + hlen);
2322
2323 ip6->ip6_plen = 0;
2324 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2325 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2326 }
2327 hlen += th->th_off << 2;
2328 }
2329
2330 if (v4) {
2331 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2332 cmdlen |= WTX_TCPIP_CMD_IP;
2333 } else {
2334 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2335 ipcse = 0;
2336 }
2337 cmd |= WTX_TCPIP_CMD_TSE;
2338 cmdlen |= WTX_TCPIP_CMD_TSE |
2339 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2340 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2341 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2342 }
2343
2344 /*
2345 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2346 * offload feature, if we load the context descriptor, we
2347 * MUST provide valid values for IPCSS and TUCSS fields.
2348 */
2349
2350 ipcs = WTX_TCPIP_IPCSS(offset) |
2351 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2352 WTX_TCPIP_IPCSE(ipcse);
2353 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2354 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2355 fields |= WTX_IXSM;
2356 }
2357
2358 offset += iphl;
2359
2360 if (m0->m_pkthdr.csum_flags &
2361 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2362 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2363 fields |= WTX_TXSM;
2364 tucs = WTX_TCPIP_TUCSS(offset) |
2365 WTX_TCPIP_TUCSO(offset +
2366 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2367 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2368 } else if ((m0->m_pkthdr.csum_flags &
2369 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2370 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2371 fields |= WTX_TXSM;
2372 tucs = WTX_TCPIP_TUCSS(offset) |
2373 WTX_TCPIP_TUCSO(offset +
2374 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2375 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2376 } else {
2377 /* Just initialize it to a valid TCP context. */
2378 tucs = WTX_TCPIP_TUCSS(offset) |
2379 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2380 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2381 }
2382
2383 /* Fill in the context descriptor. */
2384 t = (struct livengood_tcpip_ctxdesc *)
2385 &sc->sc_txdescs[sc->sc_txnext];
2386 t->tcpip_ipcs = htole32(ipcs);
2387 t->tcpip_tucs = htole32(tucs);
2388 t->tcpip_cmdlen = htole32(cmdlen);
2389 t->tcpip_seg = htole32(seg);
2390 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2391
2392 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2393 txs->txs_ndesc++;
2394
2395 *cmdp = cmd;
2396 *fieldsp = fields;
2397
2398 return 0;
2399 }
2400
2401 static void
2402 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2403 {
2404 struct mbuf *m;
2405 int i;
2406
2407 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2408 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2409 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2410 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2411 m->m_data, m->m_len, m->m_flags);
2412 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2413 i, i == 1 ? "" : "s");
2414 }
2415
2416 /*
2417 * wm_82547_txfifo_stall:
2418 *
2419 * Callout used to wait for the 82547 Tx FIFO to drain,
2420 * reset the FIFO pointers, and restart packet transmission.
2421 */
2422 static void
2423 wm_82547_txfifo_stall(void *arg)
2424 {
2425 struct wm_softc *sc = arg;
2426 int s;
2427
2428 s = splnet();
2429
2430 if (sc->sc_txfifo_stall) {
2431 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2432 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2433 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2434 /*
2435 * Packets have drained. Stop transmitter, reset
2436 * FIFO pointers, restart transmitter, and kick
2437 * the packet queue.
2438 */
2439 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2440 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2441 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2442 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2443 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2444 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2445 CSR_WRITE(sc, WMREG_TCTL, tctl);
2446 CSR_WRITE_FLUSH(sc);
2447
2448 sc->sc_txfifo_head = 0;
2449 sc->sc_txfifo_stall = 0;
2450 wm_start(&sc->sc_ethercom.ec_if);
2451 } else {
2452 /*
2453 * Still waiting for packets to drain; try again in
2454 * another tick.
2455 */
2456 callout_schedule(&sc->sc_txfifo_ch, 1);
2457 }
2458 }
2459
2460 splx(s);
2461 }
2462
2463 static void
2464 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2465 {
2466 uint32_t reg;
2467
2468 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2469
2470 if (on != 0)
2471 reg |= EXTCNFCTR_GATE_PHY_CFG;
2472 else
2473 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2474
2475 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2476 }
2477
2478 /*
2479 * wm_82547_txfifo_bugchk:
2480 *
2481 * Check for bug condition in the 82547 Tx FIFO. We need to
2482 * prevent enqueueing a packet that would wrap around the end
2483 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2484 *
2485 * We do this by checking the amount of space before the end
2486 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2487 * the Tx FIFO, wait for all remaining packets to drain, reset
2488 * the internal FIFO pointers to the beginning, and restart
2489 * transmission on the interface.
2490 */
2491 #define WM_FIFO_HDR 0x10
2492 #define WM_82547_PAD_LEN 0x3e0
2493 static int
2494 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2495 {
2496 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2497 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2498
2499 /* Just return if already stalled. */
2500 if (sc->sc_txfifo_stall)
2501 return 1;
2502
2503 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2504 /* Stall only occurs in half-duplex mode. */
2505 goto send_packet;
2506 }
2507
2508 if (len >= WM_82547_PAD_LEN + space) {
2509 sc->sc_txfifo_stall = 1;
2510 callout_schedule(&sc->sc_txfifo_ch, 1);
2511 return 1;
2512 }
2513
2514 send_packet:
2515 sc->sc_txfifo_head += len;
2516 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2517 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2518
2519 return 0;
2520 }
2521
2522 /*
2523 * wm_start: [ifnet interface function]
2524 *
2525 * Start packet transmission on the interface.
2526 */
2527 static void
2528 wm_start(struct ifnet *ifp)
2529 {
2530 struct wm_softc *sc = ifp->if_softc;
2531 struct mbuf *m0;
2532 struct m_tag *mtag;
2533 struct wm_txsoft *txs;
2534 bus_dmamap_t dmamap;
2535 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2536 bus_addr_t curaddr;
2537 bus_size_t seglen, curlen;
2538 uint32_t cksumcmd;
2539 uint8_t cksumfields;
2540
2541 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2542 return;
2543
2544 /*
2545 * Remember the previous number of free descriptors.
2546 */
2547 ofree = sc->sc_txfree;
2548
2549 /*
2550 * Loop through the send queue, setting up transmit descriptors
2551 * until we drain the queue, or use up all available transmit
2552 * descriptors.
2553 */
2554 for (;;) {
2555 /* Grab a packet off the queue. */
2556 IFQ_POLL(&ifp->if_snd, m0);
2557 if (m0 == NULL)
2558 break;
2559
2560 DPRINTF(WM_DEBUG_TX,
2561 ("%s: TX: have packet to transmit: %p\n",
2562 device_xname(sc->sc_dev), m0));
2563
2564 /* Get a work queue entry. */
2565 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2566 wm_txintr(sc);
2567 if (sc->sc_txsfree == 0) {
2568 DPRINTF(WM_DEBUG_TX,
2569 ("%s: TX: no free job descriptors\n",
2570 device_xname(sc->sc_dev)));
2571 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2572 break;
2573 }
2574 }
2575
2576 txs = &sc->sc_txsoft[sc->sc_txsnext];
2577 dmamap = txs->txs_dmamap;
2578
2579 use_tso = (m0->m_pkthdr.csum_flags &
2580 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2581
2582 /*
2583 * So says the Linux driver:
2584 * The controller does a simple calculation to make sure
2585 * there is enough room in the FIFO before initiating the
2586 * DMA for each buffer. The calc is:
2587 * 4 = ceil(buffer len / MSS)
2588 * To make sure we don't overrun the FIFO, adjust the max
2589 * buffer len if the MSS drops.
2590 */
2591 dmamap->dm_maxsegsz =
2592 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2593 ? m0->m_pkthdr.segsz << 2
2594 : WTX_MAX_LEN;
2595
2596 /*
2597 * Load the DMA map. If this fails, the packet either
2598 * didn't fit in the allotted number of segments, or we
2599 * were short on resources. For the too-many-segments
2600 * case, we simply report an error and drop the packet,
2601 * since we can't sanely copy a jumbo packet to a single
2602 * buffer.
2603 */
2604 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2605 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2606 if (error) {
2607 if (error == EFBIG) {
2608 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2609 log(LOG_ERR, "%s: Tx packet consumes too many "
2610 "DMA segments, dropping...\n",
2611 device_xname(sc->sc_dev));
2612 IFQ_DEQUEUE(&ifp->if_snd, m0);
2613 wm_dump_mbuf_chain(sc, m0);
2614 m_freem(m0);
2615 continue;
2616 }
2617 /*
2618 * Short on resources, just stop for now.
2619 */
2620 DPRINTF(WM_DEBUG_TX,
2621 ("%s: TX: dmamap load failed: %d\n",
2622 device_xname(sc->sc_dev), error));
2623 break;
2624 }
2625
2626 segs_needed = dmamap->dm_nsegs;
2627 if (use_tso) {
2628 /* For sentinel descriptor; see below. */
2629 segs_needed++;
2630 }
2631
2632 /*
2633 * Ensure we have enough descriptors free to describe
2634 * the packet. Note, we always reserve one descriptor
2635 * at the end of the ring due to the semantics of the
2636 * TDT register, plus one more in the event we need
2637 * to load offload context.
2638 */
2639 if (segs_needed > sc->sc_txfree - 2) {
2640 /*
2641 * Not enough free descriptors to transmit this
2642 * packet. We haven't committed anything yet,
2643 * so just unload the DMA map, put the packet
2644 * pack on the queue, and punt. Notify the upper
2645 * layer that there are no more slots left.
2646 */
2647 DPRINTF(WM_DEBUG_TX,
2648 ("%s: TX: need %d (%d) descriptors, have %d\n",
2649 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2650 segs_needed, sc->sc_txfree - 1));
2651 ifp->if_flags |= IFF_OACTIVE;
2652 bus_dmamap_unload(sc->sc_dmat, dmamap);
2653 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2654 break;
2655 }
2656
2657 /*
2658 * Check for 82547 Tx FIFO bug. We need to do this
2659 * once we know we can transmit the packet, since we
2660 * do some internal FIFO space accounting here.
2661 */
2662 if (sc->sc_type == WM_T_82547 &&
2663 wm_82547_txfifo_bugchk(sc, m0)) {
2664 DPRINTF(WM_DEBUG_TX,
2665 ("%s: TX: 82547 Tx FIFO bug detected\n",
2666 device_xname(sc->sc_dev)));
2667 ifp->if_flags |= IFF_OACTIVE;
2668 bus_dmamap_unload(sc->sc_dmat, dmamap);
2669 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2670 break;
2671 }
2672
2673 IFQ_DEQUEUE(&ifp->if_snd, m0);
2674
2675 /*
2676 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2677 */
2678
2679 DPRINTF(WM_DEBUG_TX,
2680 ("%s: TX: packet has %d (%d) DMA segments\n",
2681 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2682
2683 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2684
2685 /*
2686 * Store a pointer to the packet so that we can free it
2687 * later.
2688 *
2689 * Initially, we consider the number of descriptors the
2690 * packet uses the number of DMA segments. This may be
2691 * incremented by 1 if we do checksum offload (a descriptor
2692 * is used to set the checksum context).
2693 */
2694 txs->txs_mbuf = m0;
2695 txs->txs_firstdesc = sc->sc_txnext;
2696 txs->txs_ndesc = segs_needed;
2697
2698 /* Set up offload parameters for this packet. */
2699 if (m0->m_pkthdr.csum_flags &
2700 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2701 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2702 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2703 if (wm_tx_offload(sc, txs, &cksumcmd,
2704 &cksumfields) != 0) {
2705 /* Error message already displayed. */
2706 bus_dmamap_unload(sc->sc_dmat, dmamap);
2707 continue;
2708 }
2709 } else {
2710 cksumcmd = 0;
2711 cksumfields = 0;
2712 }
2713
2714 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2715
2716 /* Sync the DMA map. */
2717 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2718 BUS_DMASYNC_PREWRITE);
2719
2720 /*
2721 * Initialize the transmit descriptor.
2722 */
2723 for (nexttx = sc->sc_txnext, seg = 0;
2724 seg < dmamap->dm_nsegs; seg++) {
2725 for (seglen = dmamap->dm_segs[seg].ds_len,
2726 curaddr = dmamap->dm_segs[seg].ds_addr;
2727 seglen != 0;
2728 curaddr += curlen, seglen -= curlen,
2729 nexttx = WM_NEXTTX(sc, nexttx)) {
2730 curlen = seglen;
2731
2732 /*
2733 * So says the Linux driver:
2734 * Work around for premature descriptor
2735 * write-backs in TSO mode. Append a
2736 * 4-byte sentinel descriptor.
2737 */
2738 if (use_tso &&
2739 seg == dmamap->dm_nsegs - 1 &&
2740 curlen > 8)
2741 curlen -= 4;
2742
2743 wm_set_dma_addr(
2744 &sc->sc_txdescs[nexttx].wtx_addr,
2745 curaddr);
2746 sc->sc_txdescs[nexttx].wtx_cmdlen =
2747 htole32(cksumcmd | curlen);
2748 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2749 0;
2750 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2751 cksumfields;
2752 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2753 lasttx = nexttx;
2754
2755 DPRINTF(WM_DEBUG_TX,
2756 ("%s: TX: desc %d: low %#" PRIx64 ", "
2757 "len %#04zx\n",
2758 device_xname(sc->sc_dev), nexttx,
2759 (uint64_t)curaddr, curlen));
2760 }
2761 }
2762
2763 KASSERT(lasttx != -1);
2764
2765 /*
2766 * Set up the command byte on the last descriptor of
2767 * the packet. If we're in the interrupt delay window,
2768 * delay the interrupt.
2769 */
2770 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2771 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2772
2773 /*
2774 * If VLANs are enabled and the packet has a VLAN tag, set
2775 * up the descriptor to encapsulate the packet for us.
2776 *
2777 * This is only valid on the last descriptor of the packet.
2778 */
2779 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2780 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2781 htole32(WTX_CMD_VLE);
2782 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2783 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2784 }
2785
2786 txs->txs_lastdesc = lasttx;
2787
2788 DPRINTF(WM_DEBUG_TX,
2789 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2790 device_xname(sc->sc_dev),
2791 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2792
2793 /* Sync the descriptors we're using. */
2794 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2795 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2796
2797 /* Give the packet to the chip. */
2798 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2799
2800 DPRINTF(WM_DEBUG_TX,
2801 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2802
2803 DPRINTF(WM_DEBUG_TX,
2804 ("%s: TX: finished transmitting packet, job %d\n",
2805 device_xname(sc->sc_dev), sc->sc_txsnext));
2806
2807 /* Advance the tx pointer. */
2808 sc->sc_txfree -= txs->txs_ndesc;
2809 sc->sc_txnext = nexttx;
2810
2811 sc->sc_txsfree--;
2812 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2813
2814 /* Pass the packet to any BPF listeners. */
2815 bpf_mtap(ifp, m0);
2816 }
2817
2818 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2819 /* No more slots; notify upper layer. */
2820 ifp->if_flags |= IFF_OACTIVE;
2821 }
2822
2823 if (sc->sc_txfree != ofree) {
2824 /* Set a watchdog timer in case the chip flakes out. */
2825 ifp->if_timer = 5;
2826 }
2827 }
2828
2829 /*
2830 * wm_nq_tx_offload:
2831 *
2832 * Set up TCP/IP checksumming parameters for the
2833 * specified packet, for NEWQUEUE devices
2834 */
2835 static int
2836 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2837 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2838 {
2839 struct mbuf *m0 = txs->txs_mbuf;
2840 struct m_tag *mtag;
2841 uint32_t vl_len, mssidx, cmdc;
2842 struct ether_header *eh;
2843 int offset, iphl;
2844
2845 /*
2846 * XXX It would be nice if the mbuf pkthdr had offset
2847 * fields for the protocol headers.
2848 */
2849 *cmdlenp = 0;
2850 *fieldsp = 0;
2851
2852 eh = mtod(m0, struct ether_header *);
2853 switch (htons(eh->ether_type)) {
2854 case ETHERTYPE_IP:
2855 case ETHERTYPE_IPV6:
2856 offset = ETHER_HDR_LEN;
2857 break;
2858
2859 case ETHERTYPE_VLAN:
2860 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2861 break;
2862
2863 default:
2864 /*
2865 * Don't support this protocol or encapsulation.
2866 */
2867 *do_csum = false;
2868 return 0;
2869 }
2870 *do_csum = true;
2871 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2872 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2873
2874 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2875 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2876
2877 if ((m0->m_pkthdr.csum_flags &
2878 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2879 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2880 } else {
2881 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2882 }
2883 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2884 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2885
2886 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2887 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2888 << NQTXC_VLLEN_VLAN_SHIFT);
2889 *cmdlenp |= NQTX_CMD_VLE;
2890 }
2891
2892 mssidx = 0;
2893
2894 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2895 int hlen = offset + iphl;
2896 int tcp_hlen;
2897 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2898
2899 if (__predict_false(m0->m_len <
2900 (hlen + sizeof(struct tcphdr)))) {
2901 /*
2902 * TCP/IP headers are not in the first mbuf; we need
2903 * to do this the slow and painful way. Let's just
2904 * hope this doesn't happen very often.
2905 */
2906 struct tcphdr th;
2907
2908 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2909
2910 m_copydata(m0, hlen, sizeof(th), &th);
2911 if (v4) {
2912 struct ip ip;
2913
2914 m_copydata(m0, offset, sizeof(ip), &ip);
2915 ip.ip_len = 0;
2916 m_copyback(m0,
2917 offset + offsetof(struct ip, ip_len),
2918 sizeof(ip.ip_len), &ip.ip_len);
2919 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2920 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2921 } else {
2922 struct ip6_hdr ip6;
2923
2924 m_copydata(m0, offset, sizeof(ip6), &ip6);
2925 ip6.ip6_plen = 0;
2926 m_copyback(m0,
2927 offset + offsetof(struct ip6_hdr, ip6_plen),
2928 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2929 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2930 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2931 }
2932 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2933 sizeof(th.th_sum), &th.th_sum);
2934
2935 tcp_hlen = th.th_off << 2;
2936 } else {
2937 /*
2938 * TCP/IP headers are in the first mbuf; we can do
2939 * this the easy way.
2940 */
2941 struct tcphdr *th;
2942
2943 if (v4) {
2944 struct ip *ip =
2945 (void *)(mtod(m0, char *) + offset);
2946 th = (void *)(mtod(m0, char *) + hlen);
2947
2948 ip->ip_len = 0;
2949 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2950 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2951 } else {
2952 struct ip6_hdr *ip6 =
2953 (void *)(mtod(m0, char *) + offset);
2954 th = (void *)(mtod(m0, char *) + hlen);
2955
2956 ip6->ip6_plen = 0;
2957 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2958 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2959 }
2960 tcp_hlen = th->th_off << 2;
2961 }
2962 hlen += tcp_hlen;
2963 *cmdlenp |= NQTX_CMD_TSE;
2964
2965 if (v4) {
2966 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2967 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2968 } else {
2969 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2970 *fieldsp |= NQTXD_FIELDS_TUXSM;
2971 }
2972 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2973 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2974 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2975 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2976 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2977 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2978 } else {
2979 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2980 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2981 }
2982
2983 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2984 *fieldsp |= NQTXD_FIELDS_IXSM;
2985 cmdc |= NQTXC_CMD_IP4;
2986 }
2987
2988 if (m0->m_pkthdr.csum_flags &
2989 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2990 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2991 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2992 cmdc |= NQTXC_CMD_TCP;
2993 } else {
2994 cmdc |= NQTXC_CMD_UDP;
2995 }
2996 cmdc |= NQTXC_CMD_IP4;
2997 *fieldsp |= NQTXD_FIELDS_TUXSM;
2998 }
2999 if (m0->m_pkthdr.csum_flags &
3000 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3001 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3002 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3003 cmdc |= NQTXC_CMD_TCP;
3004 } else {
3005 cmdc |= NQTXC_CMD_UDP;
3006 }
3007 cmdc |= NQTXC_CMD_IP6;
3008 *fieldsp |= NQTXD_FIELDS_TUXSM;
3009 }
3010
3011 /* Fill in the context descriptor. */
3012 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3013 htole32(vl_len);
3014 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3015 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3016 htole32(cmdc);
3017 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3018 htole32(mssidx);
3019 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3020 DPRINTF(WM_DEBUG_TX,
3021 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3022 sc->sc_txnext, 0, vl_len));
3023 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3024 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3025 txs->txs_ndesc++;
3026 return 0;
3027 }
3028
3029 /*
3030 * wm_nq_start: [ifnet interface function]
3031 *
3032 * Start packet transmission on the interface for NEWQUEUE devices
3033 */
3034 static void
3035 wm_nq_start(struct ifnet *ifp)
3036 {
3037 struct wm_softc *sc = ifp->if_softc;
3038 struct mbuf *m0;
3039 struct m_tag *mtag;
3040 struct wm_txsoft *txs;
3041 bus_dmamap_t dmamap;
3042 int error, nexttx, lasttx = -1, seg, segs_needed;
3043 bool do_csum, sent;
3044
3045 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3046 return;
3047
3048 sent = false;
3049
3050 /*
3051 * Loop through the send queue, setting up transmit descriptors
3052 * until we drain the queue, or use up all available transmit
3053 * descriptors.
3054 */
3055 for (;;) {
3056 /* Grab a packet off the queue. */
3057 IFQ_POLL(&ifp->if_snd, m0);
3058 if (m0 == NULL)
3059 break;
3060
3061 DPRINTF(WM_DEBUG_TX,
3062 ("%s: TX: have packet to transmit: %p\n",
3063 device_xname(sc->sc_dev), m0));
3064
3065 /* Get a work queue entry. */
3066 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3067 wm_txintr(sc);
3068 if (sc->sc_txsfree == 0) {
3069 DPRINTF(WM_DEBUG_TX,
3070 ("%s: TX: no free job descriptors\n",
3071 device_xname(sc->sc_dev)));
3072 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3073 break;
3074 }
3075 }
3076
3077 txs = &sc->sc_txsoft[sc->sc_txsnext];
3078 dmamap = txs->txs_dmamap;
3079
3080 /*
3081 * Load the DMA map. If this fails, the packet either
3082 * didn't fit in the allotted number of segments, or we
3083 * were short on resources. For the too-many-segments
3084 * case, we simply report an error and drop the packet,
3085 * since we can't sanely copy a jumbo packet to a single
3086 * buffer.
3087 */
3088 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3089 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3090 if (error) {
3091 if (error == EFBIG) {
3092 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3093 log(LOG_ERR, "%s: Tx packet consumes too many "
3094 "DMA segments, dropping...\n",
3095 device_xname(sc->sc_dev));
3096 IFQ_DEQUEUE(&ifp->if_snd, m0);
3097 wm_dump_mbuf_chain(sc, m0);
3098 m_freem(m0);
3099 continue;
3100 }
3101 /*
3102 * Short on resources, just stop for now.
3103 */
3104 DPRINTF(WM_DEBUG_TX,
3105 ("%s: TX: dmamap load failed: %d\n",
3106 device_xname(sc->sc_dev), error));
3107 break;
3108 }
3109
3110 segs_needed = dmamap->dm_nsegs;
3111
3112 /*
3113 * Ensure we have enough descriptors free to describe
3114 * the packet. Note, we always reserve one descriptor
3115 * at the end of the ring due to the semantics of the
3116 * TDT register, plus one more in the event we need
3117 * to load offload context.
3118 */
3119 if (segs_needed > sc->sc_txfree - 2) {
3120 /*
3121 * Not enough free descriptors to transmit this
3122 * packet. We haven't committed anything yet,
3123 * so just unload the DMA map, put the packet
3124 * pack on the queue, and punt. Notify the upper
3125 * layer that there are no more slots left.
3126 */
3127 DPRINTF(WM_DEBUG_TX,
3128 ("%s: TX: need %d (%d) descriptors, have %d\n",
3129 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3130 segs_needed, sc->sc_txfree - 1));
3131 ifp->if_flags |= IFF_OACTIVE;
3132 bus_dmamap_unload(sc->sc_dmat, dmamap);
3133 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3134 break;
3135 }
3136
3137 IFQ_DEQUEUE(&ifp->if_snd, m0);
3138
3139 /*
3140 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3141 */
3142
3143 DPRINTF(WM_DEBUG_TX,
3144 ("%s: TX: packet has %d (%d) DMA segments\n",
3145 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3146
3147 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3148
3149 /*
3150 * Store a pointer to the packet so that we can free it
3151 * later.
3152 *
3153 * Initially, we consider the number of descriptors the
3154 * packet uses the number of DMA segments. This may be
3155 * incremented by 1 if we do checksum offload (a descriptor
3156 * is used to set the checksum context).
3157 */
3158 txs->txs_mbuf = m0;
3159 txs->txs_firstdesc = sc->sc_txnext;
3160 txs->txs_ndesc = segs_needed;
3161
3162 /* Set up offload parameters for this packet. */
3163 uint32_t cmdlen, fields, dcmdlen;
3164 if (m0->m_pkthdr.csum_flags &
3165 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3166 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3167 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3168 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3169 &do_csum) != 0) {
3170 /* Error message already displayed. */
3171 bus_dmamap_unload(sc->sc_dmat, dmamap);
3172 continue;
3173 }
3174 } else {
3175 do_csum = false;
3176 cmdlen = 0;
3177 fields = 0;
3178 }
3179
3180 /* Sync the DMA map. */
3181 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3182 BUS_DMASYNC_PREWRITE);
3183
3184 /*
3185 * Initialize the first transmit descriptor.
3186 */
3187 nexttx = sc->sc_txnext;
3188 if (!do_csum) {
3189 /* setup a legacy descriptor */
3190 wm_set_dma_addr(
3191 &sc->sc_txdescs[nexttx].wtx_addr,
3192 dmamap->dm_segs[0].ds_addr);
3193 sc->sc_txdescs[nexttx].wtx_cmdlen =
3194 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3195 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3196 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3197 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3198 NULL) {
3199 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3200 htole32(WTX_CMD_VLE);
3201 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3202 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3203 } else {
3204 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3205 }
3206 dcmdlen = 0;
3207 } else {
3208 /* setup an advanced data descriptor */
3209 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3210 htole64(dmamap->dm_segs[0].ds_addr);
3211 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3212 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3213 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3214 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3215 htole32(fields);
3216 DPRINTF(WM_DEBUG_TX,
3217 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3218 device_xname(sc->sc_dev), nexttx,
3219 (uint64_t)dmamap->dm_segs[0].ds_addr));
3220 DPRINTF(WM_DEBUG_TX,
3221 ("\t 0x%08x%08x\n", fields,
3222 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3223 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3224 }
3225
3226 lasttx = nexttx;
3227 nexttx = WM_NEXTTX(sc, nexttx);
3228 /*
3229 * fill in the next descriptors. legacy or adcanced format
3230 * is the same here
3231 */
3232 for (seg = 1; seg < dmamap->dm_nsegs;
3233 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3234 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3235 htole64(dmamap->dm_segs[seg].ds_addr);
3236 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3237 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3238 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3239 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3240 lasttx = nexttx;
3241
3242 DPRINTF(WM_DEBUG_TX,
3243 ("%s: TX: desc %d: %#" PRIx64 ", "
3244 "len %#04zx\n",
3245 device_xname(sc->sc_dev), nexttx,
3246 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3247 dmamap->dm_segs[seg].ds_len));
3248 }
3249
3250 KASSERT(lasttx != -1);
3251
3252 /*
3253 * Set up the command byte on the last descriptor of
3254 * the packet. If we're in the interrupt delay window,
3255 * delay the interrupt.
3256 */
3257 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3258 (NQTX_CMD_EOP | NQTX_CMD_RS));
3259 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3260 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3261
3262 txs->txs_lastdesc = lasttx;
3263
3264 DPRINTF(WM_DEBUG_TX,
3265 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3266 device_xname(sc->sc_dev),
3267 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3268
3269 /* Sync the descriptors we're using. */
3270 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3271 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3272
3273 /* Give the packet to the chip. */
3274 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3275 sent = true;
3276
3277 DPRINTF(WM_DEBUG_TX,
3278 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3279
3280 DPRINTF(WM_DEBUG_TX,
3281 ("%s: TX: finished transmitting packet, job %d\n",
3282 device_xname(sc->sc_dev), sc->sc_txsnext));
3283
3284 /* Advance the tx pointer. */
3285 sc->sc_txfree -= txs->txs_ndesc;
3286 sc->sc_txnext = nexttx;
3287
3288 sc->sc_txsfree--;
3289 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3290
3291 /* Pass the packet to any BPF listeners. */
3292 bpf_mtap(ifp, m0);
3293 }
3294
3295 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3296 /* No more slots; notify upper layer. */
3297 ifp->if_flags |= IFF_OACTIVE;
3298 }
3299
3300 if (sent) {
3301 /* Set a watchdog timer in case the chip flakes out. */
3302 ifp->if_timer = 5;
3303 }
3304 }
3305
3306 /*
3307 * wm_watchdog: [ifnet interface function]
3308 *
3309 * Watchdog timer handler.
3310 */
3311 static void
3312 wm_watchdog(struct ifnet *ifp)
3313 {
3314 struct wm_softc *sc = ifp->if_softc;
3315
3316 /*
3317 * Since we're using delayed interrupts, sweep up
3318 * before we report an error.
3319 */
3320 wm_txintr(sc);
3321
3322 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3323 #ifdef WM_DEBUG
3324 int i, j;
3325 struct wm_txsoft *txs;
3326 #endif
3327 log(LOG_ERR,
3328 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3329 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3330 sc->sc_txnext);
3331 ifp->if_oerrors++;
3332 #ifdef WM_DEBUG
3333 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3334 i = WM_NEXTTXS(sc, i)) {
3335 txs = &sc->sc_txsoft[i];
3336 printf("txs %d tx %d -> %d\n",
3337 i, txs->txs_firstdesc, txs->txs_lastdesc);
3338 for (j = txs->txs_firstdesc; ;
3339 j = WM_NEXTTX(sc, j)) {
3340 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3341 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3342 printf("\t %#08x%08x\n",
3343 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3344 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3345 if (j == txs->txs_lastdesc)
3346 break;
3347 }
3348 }
3349 #endif
3350 /* Reset the interface. */
3351 (void) wm_init(ifp);
3352 }
3353
3354 /* Try to get more packets going. */
3355 ifp->if_start(ifp);
3356 }
3357
3358 static int
3359 wm_ifflags_cb(struct ethercom *ec)
3360 {
3361 struct ifnet *ifp = &ec->ec_if;
3362 struct wm_softc *sc = ifp->if_softc;
3363 int change = ifp->if_flags ^ sc->sc_if_flags;
3364
3365 if (change != 0)
3366 sc->sc_if_flags = ifp->if_flags;
3367
3368 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3369 return ENETRESET;
3370
3371 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3372 wm_set_filter(sc);
3373
3374 wm_set_vlan(sc);
3375
3376 return 0;
3377 }
3378
3379 /*
3380 * wm_ioctl: [ifnet interface function]
3381 *
3382 * Handle control requests from the operator.
3383 */
3384 static int
3385 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3386 {
3387 struct wm_softc *sc = ifp->if_softc;
3388 struct ifreq *ifr = (struct ifreq *) data;
3389 struct ifaddr *ifa = (struct ifaddr *)data;
3390 struct sockaddr_dl *sdl;
3391 int s, error;
3392
3393 s = splnet();
3394
3395 switch (cmd) {
3396 case SIOCSIFMEDIA:
3397 case SIOCGIFMEDIA:
3398 /* Flow control requires full-duplex mode. */
3399 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3400 (ifr->ifr_media & IFM_FDX) == 0)
3401 ifr->ifr_media &= ~IFM_ETH_FMASK;
3402 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3403 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3404 /* We can do both TXPAUSE and RXPAUSE. */
3405 ifr->ifr_media |=
3406 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3407 }
3408 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3409 }
3410 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3411 break;
3412 case SIOCINITIFADDR:
3413 if (ifa->ifa_addr->sa_family == AF_LINK) {
3414 sdl = satosdl(ifp->if_dl->ifa_addr);
3415 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3416 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3417 /* unicast address is first multicast entry */
3418 wm_set_filter(sc);
3419 error = 0;
3420 break;
3421 }
3422 /*FALLTHROUGH*/
3423 default:
3424 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3425 break;
3426
3427 error = 0;
3428
3429 if (cmd == SIOCSIFCAP)
3430 error = (*ifp->if_init)(ifp);
3431 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3432 ;
3433 else if (ifp->if_flags & IFF_RUNNING) {
3434 /*
3435 * Multicast list has changed; set the hardware filter
3436 * accordingly.
3437 */
3438 wm_set_filter(sc);
3439 }
3440 break;
3441 }
3442
3443 /* Try to get more packets going. */
3444 ifp->if_start(ifp);
3445
3446 splx(s);
3447 return error;
3448 }
3449
3450 /*
3451 * wm_intr:
3452 *
3453 * Interrupt service routine.
3454 */
3455 static int
3456 wm_intr(void *arg)
3457 {
3458 struct wm_softc *sc = arg;
3459 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3460 uint32_t icr;
3461 int handled = 0;
3462
3463 while (1 /* CONSTCOND */) {
3464 icr = CSR_READ(sc, WMREG_ICR);
3465 if ((icr & sc->sc_icr) == 0)
3466 break;
3467 rnd_add_uint32(&sc->rnd_source, icr);
3468
3469 handled = 1;
3470
3471 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3472 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3473 DPRINTF(WM_DEBUG_RX,
3474 ("%s: RX: got Rx intr 0x%08x\n",
3475 device_xname(sc->sc_dev),
3476 icr & (ICR_RXDMT0|ICR_RXT0)));
3477 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3478 }
3479 #endif
3480 wm_rxintr(sc);
3481
3482 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3483 if (icr & ICR_TXDW) {
3484 DPRINTF(WM_DEBUG_TX,
3485 ("%s: TX: got TXDW interrupt\n",
3486 device_xname(sc->sc_dev)));
3487 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3488 }
3489 #endif
3490 wm_txintr(sc);
3491
3492 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3493 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3494 wm_linkintr(sc, icr);
3495 }
3496
3497 if (icr & ICR_RXO) {
3498 #if defined(WM_DEBUG)
3499 log(LOG_WARNING, "%s: Receive overrun\n",
3500 device_xname(sc->sc_dev));
3501 #endif /* defined(WM_DEBUG) */
3502 }
3503 }
3504
3505 if (handled) {
3506 /* Try to get more packets going. */
3507 ifp->if_start(ifp);
3508 }
3509
3510 return handled;
3511 }
3512
3513 /*
3514 * wm_txintr:
3515 *
3516 * Helper; handle transmit interrupts.
3517 */
3518 static void
3519 wm_txintr(struct wm_softc *sc)
3520 {
3521 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3522 struct wm_txsoft *txs;
3523 uint8_t status;
3524 int i;
3525
3526 ifp->if_flags &= ~IFF_OACTIVE;
3527
3528 /*
3529 * Go through the Tx list and free mbufs for those
3530 * frames which have been transmitted.
3531 */
3532 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3533 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3534 txs = &sc->sc_txsoft[i];
3535
3536 DPRINTF(WM_DEBUG_TX,
3537 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3538
3539 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3540 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3541
3542 status =
3543 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3544 if ((status & WTX_ST_DD) == 0) {
3545 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3546 BUS_DMASYNC_PREREAD);
3547 break;
3548 }
3549
3550 DPRINTF(WM_DEBUG_TX,
3551 ("%s: TX: job %d done: descs %d..%d\n",
3552 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3553 txs->txs_lastdesc));
3554
3555 /*
3556 * XXX We should probably be using the statistics
3557 * XXX registers, but I don't know if they exist
3558 * XXX on chips before the i82544.
3559 */
3560
3561 #ifdef WM_EVENT_COUNTERS
3562 if (status & WTX_ST_TU)
3563 WM_EVCNT_INCR(&sc->sc_ev_tu);
3564 #endif /* WM_EVENT_COUNTERS */
3565
3566 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3567 ifp->if_oerrors++;
3568 if (status & WTX_ST_LC)
3569 log(LOG_WARNING, "%s: late collision\n",
3570 device_xname(sc->sc_dev));
3571 else if (status & WTX_ST_EC) {
3572 ifp->if_collisions += 16;
3573 log(LOG_WARNING, "%s: excessive collisions\n",
3574 device_xname(sc->sc_dev));
3575 }
3576 } else
3577 ifp->if_opackets++;
3578
3579 sc->sc_txfree += txs->txs_ndesc;
3580 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3581 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3582 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3583 m_freem(txs->txs_mbuf);
3584 txs->txs_mbuf = NULL;
3585 }
3586
3587 /* Update the dirty transmit buffer pointer. */
3588 sc->sc_txsdirty = i;
3589 DPRINTF(WM_DEBUG_TX,
3590 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3591
3592 /*
3593 * If there are no more pending transmissions, cancel the watchdog
3594 * timer.
3595 */
3596 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3597 ifp->if_timer = 0;
3598 }
3599
3600 /*
3601 * wm_rxintr:
3602 *
3603 * Helper; handle receive interrupts.
3604 */
3605 static void
3606 wm_rxintr(struct wm_softc *sc)
3607 {
3608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3609 struct wm_rxsoft *rxs;
3610 struct mbuf *m;
3611 int i, len;
3612 uint8_t status, errors;
3613 uint16_t vlantag;
3614
3615 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3616 rxs = &sc->sc_rxsoft[i];
3617
3618 DPRINTF(WM_DEBUG_RX,
3619 ("%s: RX: checking descriptor %d\n",
3620 device_xname(sc->sc_dev), i));
3621
3622 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3623
3624 status = sc->sc_rxdescs[i].wrx_status;
3625 errors = sc->sc_rxdescs[i].wrx_errors;
3626 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3627 vlantag = sc->sc_rxdescs[i].wrx_special;
3628
3629 if ((status & WRX_ST_DD) == 0) {
3630 /*
3631 * We have processed all of the receive descriptors.
3632 */
3633 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3634 break;
3635 }
3636
3637 if (__predict_false(sc->sc_rxdiscard)) {
3638 DPRINTF(WM_DEBUG_RX,
3639 ("%s: RX: discarding contents of descriptor %d\n",
3640 device_xname(sc->sc_dev), i));
3641 WM_INIT_RXDESC(sc, i);
3642 if (status & WRX_ST_EOP) {
3643 /* Reset our state. */
3644 DPRINTF(WM_DEBUG_RX,
3645 ("%s: RX: resetting rxdiscard -> 0\n",
3646 device_xname(sc->sc_dev)));
3647 sc->sc_rxdiscard = 0;
3648 }
3649 continue;
3650 }
3651
3652 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3653 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3654
3655 m = rxs->rxs_mbuf;
3656
3657 /*
3658 * Add a new receive buffer to the ring, unless of
3659 * course the length is zero. Treat the latter as a
3660 * failed mapping.
3661 */
3662 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3663 /*
3664 * Failed, throw away what we've done so
3665 * far, and discard the rest of the packet.
3666 */
3667 ifp->if_ierrors++;
3668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3670 WM_INIT_RXDESC(sc, i);
3671 if ((status & WRX_ST_EOP) == 0)
3672 sc->sc_rxdiscard = 1;
3673 if (sc->sc_rxhead != NULL)
3674 m_freem(sc->sc_rxhead);
3675 WM_RXCHAIN_RESET(sc);
3676 DPRINTF(WM_DEBUG_RX,
3677 ("%s: RX: Rx buffer allocation failed, "
3678 "dropping packet%s\n", device_xname(sc->sc_dev),
3679 sc->sc_rxdiscard ? " (discard)" : ""));
3680 continue;
3681 }
3682
3683 m->m_len = len;
3684 sc->sc_rxlen += len;
3685 DPRINTF(WM_DEBUG_RX,
3686 ("%s: RX: buffer at %p len %d\n",
3687 device_xname(sc->sc_dev), m->m_data, len));
3688
3689 /*
3690 * If this is not the end of the packet, keep
3691 * looking.
3692 */
3693 if ((status & WRX_ST_EOP) == 0) {
3694 WM_RXCHAIN_LINK(sc, m);
3695 DPRINTF(WM_DEBUG_RX,
3696 ("%s: RX: not yet EOP, rxlen -> %d\n",
3697 device_xname(sc->sc_dev), sc->sc_rxlen));
3698 continue;
3699 }
3700
3701 /*
3702 * Okay, we have the entire packet now. The chip is
3703 * configured to include the FCS except I350 and I21[01]
3704 * (not all chips can be configured to strip it),
3705 * so we need to trim it.
3706 * May need to adjust length of previous mbuf in the
3707 * chain if the current mbuf is too short.
3708 * For an eratta, the RCTL_SECRC bit in RCTL register
3709 * is always set in I350, so we don't trim it.
3710 */
3711 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210)
3712 && (sc->sc_type != WM_T_I211)) {
3713 if (m->m_len < ETHER_CRC_LEN) {
3714 sc->sc_rxtail->m_len
3715 -= (ETHER_CRC_LEN - m->m_len);
3716 m->m_len = 0;
3717 } else
3718 m->m_len -= ETHER_CRC_LEN;
3719 len = sc->sc_rxlen - ETHER_CRC_LEN;
3720 } else
3721 len = sc->sc_rxlen;
3722
3723 WM_RXCHAIN_LINK(sc, m);
3724
3725 *sc->sc_rxtailp = NULL;
3726 m = sc->sc_rxhead;
3727
3728 WM_RXCHAIN_RESET(sc);
3729
3730 DPRINTF(WM_DEBUG_RX,
3731 ("%s: RX: have entire packet, len -> %d\n",
3732 device_xname(sc->sc_dev), len));
3733
3734 /*
3735 * If an error occurred, update stats and drop the packet.
3736 */
3737 if (errors &
3738 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3739 if (errors & WRX_ER_SE)
3740 log(LOG_WARNING, "%s: symbol error\n",
3741 device_xname(sc->sc_dev));
3742 else if (errors & WRX_ER_SEQ)
3743 log(LOG_WARNING, "%s: receive sequence error\n",
3744 device_xname(sc->sc_dev));
3745 else if (errors & WRX_ER_CE)
3746 log(LOG_WARNING, "%s: CRC error\n",
3747 device_xname(sc->sc_dev));
3748 m_freem(m);
3749 continue;
3750 }
3751
3752 /*
3753 * No errors. Receive the packet.
3754 */
3755 m->m_pkthdr.rcvif = ifp;
3756 m->m_pkthdr.len = len;
3757
3758 /*
3759 * If VLANs are enabled, VLAN packets have been unwrapped
3760 * for us. Associate the tag with the packet.
3761 */
3762 if ((status & WRX_ST_VP) != 0) {
3763 VLAN_INPUT_TAG(ifp, m,
3764 le16toh(vlantag),
3765 continue);
3766 }
3767
3768 /*
3769 * Set up checksum info for this packet.
3770 */
3771 if ((status & WRX_ST_IXSM) == 0) {
3772 if (status & WRX_ST_IPCS) {
3773 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3774 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3775 if (errors & WRX_ER_IPE)
3776 m->m_pkthdr.csum_flags |=
3777 M_CSUM_IPv4_BAD;
3778 }
3779 if (status & WRX_ST_TCPCS) {
3780 /*
3781 * Note: we don't know if this was TCP or UDP,
3782 * so we just set both bits, and expect the
3783 * upper layers to deal.
3784 */
3785 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3786 m->m_pkthdr.csum_flags |=
3787 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3788 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3789 if (errors & WRX_ER_TCPE)
3790 m->m_pkthdr.csum_flags |=
3791 M_CSUM_TCP_UDP_BAD;
3792 }
3793 }
3794
3795 ifp->if_ipackets++;
3796
3797 /* Pass this up to any BPF listeners. */
3798 bpf_mtap(ifp, m);
3799
3800 /* Pass it on. */
3801 (*ifp->if_input)(ifp, m);
3802 }
3803
3804 /* Update the receive pointer. */
3805 sc->sc_rxptr = i;
3806
3807 DPRINTF(WM_DEBUG_RX,
3808 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3809 }
3810
3811 /*
3812 * wm_linkintr_gmii:
3813 *
3814 * Helper; handle link interrupts for GMII.
3815 */
3816 static void
3817 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3818 {
3819
3820 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3821 __func__));
3822
3823 if (icr & ICR_LSC) {
3824 DPRINTF(WM_DEBUG_LINK,
3825 ("%s: LINK: LSC -> mii_tick\n",
3826 device_xname(sc->sc_dev)));
3827 mii_tick(&sc->sc_mii);
3828 if (sc->sc_type == WM_T_82543) {
3829 int miistatus, active;
3830
3831 /*
3832 * With 82543, we need to force speed and
3833 * duplex on the MAC equal to what the PHY
3834 * speed and duplex configuration is.
3835 */
3836 miistatus = sc->sc_mii.mii_media_status;
3837
3838 if (miistatus & IFM_ACTIVE) {
3839 active = sc->sc_mii.mii_media_active;
3840 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3841 switch (IFM_SUBTYPE(active)) {
3842 case IFM_10_T:
3843 sc->sc_ctrl |= CTRL_SPEED_10;
3844 break;
3845 case IFM_100_TX:
3846 sc->sc_ctrl |= CTRL_SPEED_100;
3847 break;
3848 case IFM_1000_T:
3849 sc->sc_ctrl |= CTRL_SPEED_1000;
3850 break;
3851 default:
3852 /*
3853 * fiber?
3854 * Shoud not enter here.
3855 */
3856 printf("unknown media (%x)\n",
3857 active);
3858 break;
3859 }
3860 if (active & IFM_FDX)
3861 sc->sc_ctrl |= CTRL_FD;
3862 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3863 }
3864 } else if ((sc->sc_type == WM_T_ICH8)
3865 && (sc->sc_phytype == WMPHY_IGP_3)) {
3866 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3867 } else if (sc->sc_type == WM_T_PCH) {
3868 wm_k1_gig_workaround_hv(sc,
3869 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3870 }
3871
3872 if ((sc->sc_phytype == WMPHY_82578)
3873 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3874 == IFM_1000_T)) {
3875
3876 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3877 delay(200*1000); /* XXX too big */
3878
3879 /* Link stall fix for link up */
3880 wm_gmii_hv_writereg(sc->sc_dev, 1,
3881 HV_MUX_DATA_CTRL,
3882 HV_MUX_DATA_CTRL_GEN_TO_MAC
3883 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3884 wm_gmii_hv_writereg(sc->sc_dev, 1,
3885 HV_MUX_DATA_CTRL,
3886 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3887 }
3888 }
3889 } else if (icr & ICR_RXSEQ) {
3890 DPRINTF(WM_DEBUG_LINK,
3891 ("%s: LINK Receive sequence error\n",
3892 device_xname(sc->sc_dev)));
3893 }
3894 }
3895
3896 /*
3897 * wm_linkintr_tbi:
3898 *
3899 * Helper; handle link interrupts for TBI mode.
3900 */
3901 static void
3902 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3903 {
3904 uint32_t status;
3905
3906 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3907 __func__));
3908
3909 status = CSR_READ(sc, WMREG_STATUS);
3910 if (icr & ICR_LSC) {
3911 if (status & STATUS_LU) {
3912 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3913 device_xname(sc->sc_dev),
3914 (status & STATUS_FD) ? "FDX" : "HDX"));
3915 /*
3916 * NOTE: CTRL will update TFCE and RFCE automatically,
3917 * so we should update sc->sc_ctrl
3918 */
3919
3920 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3921 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3922 sc->sc_fcrtl &= ~FCRTL_XONE;
3923 if (status & STATUS_FD)
3924 sc->sc_tctl |=
3925 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3926 else
3927 sc->sc_tctl |=
3928 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3929 if (sc->sc_ctrl & CTRL_TFCE)
3930 sc->sc_fcrtl |= FCRTL_XONE;
3931 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3932 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3933 WMREG_OLD_FCRTL : WMREG_FCRTL,
3934 sc->sc_fcrtl);
3935 sc->sc_tbi_linkup = 1;
3936 } else {
3937 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3938 device_xname(sc->sc_dev)));
3939 sc->sc_tbi_linkup = 0;
3940 }
3941 wm_tbi_set_linkled(sc);
3942 } else if (icr & ICR_RXCFG) {
3943 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3944 device_xname(sc->sc_dev)));
3945 sc->sc_tbi_nrxcfg++;
3946 wm_check_for_link(sc);
3947 } else if (icr & ICR_RXSEQ) {
3948 DPRINTF(WM_DEBUG_LINK,
3949 ("%s: LINK: Receive sequence error\n",
3950 device_xname(sc->sc_dev)));
3951 }
3952 }
3953
3954 /*
3955 * wm_linkintr:
3956 *
3957 * Helper; handle link interrupts.
3958 */
3959 static void
3960 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3961 {
3962
3963 if (sc->sc_flags & WM_F_HAS_MII)
3964 wm_linkintr_gmii(sc, icr);
3965 else
3966 wm_linkintr_tbi(sc, icr);
3967 }
3968
3969 /*
3970 * wm_tick:
3971 *
3972 * One second timer, used to check link status, sweep up
3973 * completed transmit jobs, etc.
3974 */
3975 static void
3976 wm_tick(void *arg)
3977 {
3978 struct wm_softc *sc = arg;
3979 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3980 int s;
3981
3982 s = splnet();
3983
3984 if (sc->sc_type >= WM_T_82542_2_1) {
3985 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3986 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3987 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3988 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3989 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3990 }
3991
3992 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3993 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3994 + CSR_READ(sc, WMREG_CRCERRS)
3995 + CSR_READ(sc, WMREG_ALGNERRC)
3996 + CSR_READ(sc, WMREG_SYMERRC)
3997 + CSR_READ(sc, WMREG_RXERRC)
3998 + CSR_READ(sc, WMREG_SEC)
3999 + CSR_READ(sc, WMREG_CEXTERR)
4000 + CSR_READ(sc, WMREG_RLEC);
4001 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4002
4003 if (sc->sc_flags & WM_F_HAS_MII)
4004 mii_tick(&sc->sc_mii);
4005 else
4006 wm_tbi_check_link(sc);
4007
4008 splx(s);
4009
4010 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4011 }
4012
4013 /*
4014 * wm_reset:
4015 *
4016 * Reset the i82542 chip.
4017 */
4018 static void
4019 wm_reset(struct wm_softc *sc)
4020 {
4021 int phy_reset = 0;
4022 uint32_t reg, mask;
4023 int i;
4024
4025 /*
4026 * Allocate on-chip memory according to the MTU size.
4027 * The Packet Buffer Allocation register must be written
4028 * before the chip is reset.
4029 */
4030 switch (sc->sc_type) {
4031 case WM_T_82547:
4032 case WM_T_82547_2:
4033 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4034 PBA_22K : PBA_30K;
4035 sc->sc_txfifo_head = 0;
4036 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4037 sc->sc_txfifo_size =
4038 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4039 sc->sc_txfifo_stall = 0;
4040 break;
4041 case WM_T_82571:
4042 case WM_T_82572:
4043 case WM_T_82575: /* XXX need special handing for jumbo frames */
4044 case WM_T_I350:
4045 case WM_T_80003:
4046 sc->sc_pba = PBA_32K;
4047 break;
4048 case WM_T_82580:
4049 case WM_T_82580ER:
4050 sc->sc_pba = PBA_35K;
4051 break;
4052 case WM_T_I210:
4053 case WM_T_I211:
4054 sc->sc_pba = PBA_34K;
4055 break;
4056 case WM_T_82576:
4057 sc->sc_pba = PBA_64K;
4058 break;
4059 case WM_T_82573:
4060 sc->sc_pba = PBA_12K;
4061 break;
4062 case WM_T_82574:
4063 case WM_T_82583:
4064 sc->sc_pba = PBA_20K;
4065 break;
4066 case WM_T_ICH8:
4067 sc->sc_pba = PBA_8K;
4068 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4069 break;
4070 case WM_T_ICH9:
4071 case WM_T_ICH10:
4072 sc->sc_pba = PBA_10K;
4073 break;
4074 case WM_T_PCH:
4075 case WM_T_PCH2:
4076 case WM_T_PCH_LPT:
4077 sc->sc_pba = PBA_26K;
4078 break;
4079 default:
4080 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4081 PBA_40K : PBA_48K;
4082 break;
4083 }
4084 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4085
4086 /* Prevent the PCI-E bus from sticking */
4087 if (sc->sc_flags & WM_F_PCIE) {
4088 int timeout = 800;
4089
4090 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4091 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4092
4093 while (timeout--) {
4094 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4095 == 0)
4096 break;
4097 delay(100);
4098 }
4099 }
4100
4101 /* Set the completion timeout for interface */
4102 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4103 || (sc->sc_type == WM_T_I350))
4104 wm_set_pcie_completion_timeout(sc);
4105
4106 /* Clear interrupt */
4107 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4108
4109 /* Stop the transmit and receive processes. */
4110 CSR_WRITE(sc, WMREG_RCTL, 0);
4111 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4112 sc->sc_rctl &= ~RCTL_EN;
4113
4114 /* XXX set_tbi_sbp_82543() */
4115
4116 delay(10*1000);
4117
4118 /* Must acquire the MDIO ownership before MAC reset */
4119 switch (sc->sc_type) {
4120 case WM_T_82573:
4121 case WM_T_82574:
4122 case WM_T_82583:
4123 i = 0;
4124 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4125 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4126 do {
4127 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4128 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4129 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4130 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4131 break;
4132 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4133 delay(2*1000);
4134 i++;
4135 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4136 break;
4137 default:
4138 break;
4139 }
4140
4141 /*
4142 * 82541 Errata 29? & 82547 Errata 28?
4143 * See also the description about PHY_RST bit in CTRL register
4144 * in 8254x_GBe_SDM.pdf.
4145 */
4146 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4147 CSR_WRITE(sc, WMREG_CTRL,
4148 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4149 delay(5000);
4150 }
4151
4152 switch (sc->sc_type) {
4153 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4154 case WM_T_82541:
4155 case WM_T_82541_2:
4156 case WM_T_82547:
4157 case WM_T_82547_2:
4158 /*
4159 * On some chipsets, a reset through a memory-mapped write
4160 * cycle can cause the chip to reset before completing the
4161 * write cycle. This causes major headache that can be
4162 * avoided by issuing the reset via indirect register writes
4163 * through I/O space.
4164 *
4165 * So, if we successfully mapped the I/O BAR at attach time,
4166 * use that. Otherwise, try our luck with a memory-mapped
4167 * reset.
4168 */
4169 if (sc->sc_flags & WM_F_IOH_VALID)
4170 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4171 else
4172 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4173 break;
4174 case WM_T_82545_3:
4175 case WM_T_82546_3:
4176 /* Use the shadow control register on these chips. */
4177 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4178 break;
4179 case WM_T_80003:
4180 mask = swfwphysem[sc->sc_funcid];
4181 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4182 wm_get_swfw_semaphore(sc, mask);
4183 CSR_WRITE(sc, WMREG_CTRL, reg);
4184 wm_put_swfw_semaphore(sc, mask);
4185 break;
4186 case WM_T_ICH8:
4187 case WM_T_ICH9:
4188 case WM_T_ICH10:
4189 case WM_T_PCH:
4190 case WM_T_PCH2:
4191 case WM_T_PCH_LPT:
4192 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4193 if (wm_check_reset_block(sc) == 0) {
4194 /*
4195 * Gate automatic PHY configuration by hardware on
4196 * non-managed 82579
4197 */
4198 if ((sc->sc_type == WM_T_PCH2)
4199 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4200 != 0))
4201 wm_gate_hw_phy_config_ich8lan(sc, 1);
4202
4203
4204 reg |= CTRL_PHY_RESET;
4205 phy_reset = 1;
4206 }
4207 wm_get_swfwhw_semaphore(sc);
4208 CSR_WRITE(sc, WMREG_CTRL, reg);
4209 delay(20*1000);
4210 wm_put_swfwhw_semaphore(sc);
4211 break;
4212 case WM_T_82542_2_0:
4213 case WM_T_82542_2_1:
4214 case WM_T_82543:
4215 case WM_T_82540:
4216 case WM_T_82545:
4217 case WM_T_82546:
4218 case WM_T_82571:
4219 case WM_T_82572:
4220 case WM_T_82573:
4221 case WM_T_82574:
4222 case WM_T_82575:
4223 case WM_T_82576:
4224 case WM_T_82580:
4225 case WM_T_82580ER:
4226 case WM_T_82583:
4227 case WM_T_I350:
4228 case WM_T_I210:
4229 case WM_T_I211:
4230 default:
4231 /* Everything else can safely use the documented method. */
4232 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4233 break;
4234 }
4235
4236 if (phy_reset != 0)
4237 wm_get_cfg_done(sc);
4238
4239 /* reload EEPROM */
4240 switch (sc->sc_type) {
4241 case WM_T_82542_2_0:
4242 case WM_T_82542_2_1:
4243 case WM_T_82543:
4244 case WM_T_82544:
4245 delay(10);
4246 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4247 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4248 delay(2000);
4249 break;
4250 case WM_T_82540:
4251 case WM_T_82545:
4252 case WM_T_82545_3:
4253 case WM_T_82546:
4254 case WM_T_82546_3:
4255 delay(5*1000);
4256 /* XXX Disable HW ARPs on ASF enabled adapters */
4257 break;
4258 case WM_T_82541:
4259 case WM_T_82541_2:
4260 case WM_T_82547:
4261 case WM_T_82547_2:
4262 delay(20000);
4263 /* XXX Disable HW ARPs on ASF enabled adapters */
4264 break;
4265 case WM_T_82571:
4266 case WM_T_82572:
4267 case WM_T_82573:
4268 case WM_T_82574:
4269 case WM_T_82583:
4270 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4271 delay(10);
4272 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4273 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4274 }
4275 /* check EECD_EE_AUTORD */
4276 wm_get_auto_rd_done(sc);
4277 /*
4278 * Phy configuration from NVM just starts after EECD_AUTO_RD
4279 * is set.
4280 */
4281 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4282 || (sc->sc_type == WM_T_82583))
4283 delay(25*1000);
4284 break;
4285 case WM_T_82575:
4286 case WM_T_82576:
4287 case WM_T_82580:
4288 case WM_T_82580ER:
4289 case WM_T_I350:
4290 case WM_T_I210:
4291 case WM_T_I211:
4292 case WM_T_80003:
4293 case WM_T_ICH8:
4294 case WM_T_ICH9:
4295 /* check EECD_EE_AUTORD */
4296 wm_get_auto_rd_done(sc);
4297 break;
4298 case WM_T_ICH10:
4299 case WM_T_PCH:
4300 case WM_T_PCH2:
4301 case WM_T_PCH_LPT:
4302 wm_lan_init_done(sc);
4303 break;
4304 default:
4305 panic("%s: unknown type\n", __func__);
4306 }
4307
4308 /* Check whether EEPROM is present or not */
4309 switch (sc->sc_type) {
4310 case WM_T_82575:
4311 case WM_T_82576:
4312 #if 0 /* XXX */
4313 case WM_T_82580:
4314 case WM_T_82580ER:
4315 #endif
4316 case WM_T_I350:
4317 case WM_T_ICH8:
4318 case WM_T_ICH9:
4319 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4320 /* Not found */
4321 sc->sc_flags |= WM_F_EEPROM_INVALID;
4322 if ((sc->sc_type == WM_T_82575)
4323 || (sc->sc_type == WM_T_82576)
4324 || (sc->sc_type == WM_T_82580)
4325 || (sc->sc_type == WM_T_82580ER)
4326 || (sc->sc_type == WM_T_I350))
4327 wm_reset_init_script_82575(sc);
4328 }
4329 break;
4330 default:
4331 break;
4332 }
4333
4334 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4335 || (sc->sc_type == WM_T_I350)) {
4336 /* clear global device reset status bit */
4337 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4338 }
4339
4340 /* Clear any pending interrupt events. */
4341 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4342 reg = CSR_READ(sc, WMREG_ICR);
4343
4344 /* reload sc_ctrl */
4345 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4346
4347 if (sc->sc_type == WM_T_I350)
4348 wm_set_eee_i350(sc);
4349
4350 /* dummy read from WUC */
4351 if (sc->sc_type == WM_T_PCH)
4352 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4353 /*
4354 * For PCH, this write will make sure that any noise will be detected
4355 * as a CRC error and be dropped rather than show up as a bad packet
4356 * to the DMA engine
4357 */
4358 if (sc->sc_type == WM_T_PCH)
4359 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4360
4361 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4362 CSR_WRITE(sc, WMREG_WUC, 0);
4363
4364 /* XXX need special handling for 82580 */
4365 }
4366
4367 static void
4368 wm_set_vlan(struct wm_softc *sc)
4369 {
4370 /* Deal with VLAN enables. */
4371 if (VLAN_ATTACHED(&sc->sc_ethercom))
4372 sc->sc_ctrl |= CTRL_VME;
4373 else
4374 sc->sc_ctrl &= ~CTRL_VME;
4375
4376 /* Write the control registers. */
4377 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4378 }
4379
4380 /*
4381 * wm_init: [ifnet interface function]
4382 *
4383 * Initialize the interface. Must be called at splnet().
4384 */
4385 static int
4386 wm_init(struct ifnet *ifp)
4387 {
4388 struct wm_softc *sc = ifp->if_softc;
4389 struct wm_rxsoft *rxs;
4390 int i, j, trynum, error = 0;
4391 uint32_t reg;
4392
4393 /*
4394 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4395 * There is a small but measurable benefit to avoiding the adjusment
4396 * of the descriptor so that the headers are aligned, for normal mtu,
4397 * on such platforms. One possibility is that the DMA itself is
4398 * slightly more efficient if the front of the entire packet (instead
4399 * of the front of the headers) is aligned.
4400 *
4401 * Note we must always set align_tweak to 0 if we are using
4402 * jumbo frames.
4403 */
4404 #ifdef __NO_STRICT_ALIGNMENT
4405 sc->sc_align_tweak = 0;
4406 #else
4407 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4408 sc->sc_align_tweak = 0;
4409 else
4410 sc->sc_align_tweak = 2;
4411 #endif /* __NO_STRICT_ALIGNMENT */
4412
4413 /* Cancel any pending I/O. */
4414 wm_stop(ifp, 0);
4415
4416 /* update statistics before reset */
4417 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4418 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4419
4420 /* Reset the chip to a known state. */
4421 wm_reset(sc);
4422
4423 switch (sc->sc_type) {
4424 case WM_T_82571:
4425 case WM_T_82572:
4426 case WM_T_82573:
4427 case WM_T_82574:
4428 case WM_T_82583:
4429 case WM_T_80003:
4430 case WM_T_ICH8:
4431 case WM_T_ICH9:
4432 case WM_T_ICH10:
4433 case WM_T_PCH:
4434 case WM_T_PCH2:
4435 case WM_T_PCH_LPT:
4436 if (wm_check_mng_mode(sc) != 0)
4437 wm_get_hw_control(sc);
4438 break;
4439 default:
4440 break;
4441 }
4442
4443 /* Reset the PHY. */
4444 if (sc->sc_flags & WM_F_HAS_MII)
4445 wm_gmii_reset(sc);
4446
4447 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4448 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4449 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
4450 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4451
4452 /* Initialize the transmit descriptor ring. */
4453 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4454 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4455 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4456 sc->sc_txfree = WM_NTXDESC(sc);
4457 sc->sc_txnext = 0;
4458
4459 if (sc->sc_type < WM_T_82543) {
4460 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4461 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4462 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4463 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4464 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4465 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4466 } else {
4467 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4468 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4469 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4470 CSR_WRITE(sc, WMREG_TDH, 0);
4471 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4472 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4473
4474 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4475 /*
4476 * Don't write TDT before TCTL.EN is set.
4477 * See the document.
4478 */
4479 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4480 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4481 | TXDCTL_WTHRESH(0));
4482 else {
4483 CSR_WRITE(sc, WMREG_TDT, 0);
4484 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4485 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4486 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4487 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4488 }
4489 }
4490 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4491 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4492
4493 /* Initialize the transmit job descriptors. */
4494 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4495 sc->sc_txsoft[i].txs_mbuf = NULL;
4496 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4497 sc->sc_txsnext = 0;
4498 sc->sc_txsdirty = 0;
4499
4500 /*
4501 * Initialize the receive descriptor and receive job
4502 * descriptor rings.
4503 */
4504 if (sc->sc_type < WM_T_82543) {
4505 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4506 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4507 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4508 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4509 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4510 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4511
4512 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4513 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4514 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4515 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4516 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4517 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4518 } else {
4519 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4520 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4521 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4522 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4523 CSR_WRITE(sc, WMREG_EITR(0), 450);
4524 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4525 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4526 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4527 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4528 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4529 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4530 | RXDCTL_WTHRESH(1));
4531 } else {
4532 CSR_WRITE(sc, WMREG_RDH, 0);
4533 CSR_WRITE(sc, WMREG_RDT, 0);
4534 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4535 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4536 }
4537 }
4538 for (i = 0; i < WM_NRXDESC; i++) {
4539 rxs = &sc->sc_rxsoft[i];
4540 if (rxs->rxs_mbuf == NULL) {
4541 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4542 log(LOG_ERR, "%s: unable to allocate or map "
4543 "rx buffer %d, error = %d\n",
4544 device_xname(sc->sc_dev), i, error);
4545 /*
4546 * XXX Should attempt to run with fewer receive
4547 * XXX buffers instead of just failing.
4548 */
4549 wm_rxdrain(sc);
4550 goto out;
4551 }
4552 } else {
4553 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4554 WM_INIT_RXDESC(sc, i);
4555 /*
4556 * For 82575 and newer device, the RX descriptors
4557 * must be initialized after the setting of RCTL.EN in
4558 * wm_set_filter()
4559 */
4560 }
4561 }
4562 sc->sc_rxptr = 0;
4563 sc->sc_rxdiscard = 0;
4564 WM_RXCHAIN_RESET(sc);
4565
4566 /*
4567 * Clear out the VLAN table -- we don't use it (yet).
4568 */
4569 CSR_WRITE(sc, WMREG_VET, 0);
4570 if (sc->sc_type == WM_T_I350)
4571 trynum = 10; /* Due to hw errata */
4572 else
4573 trynum = 1;
4574 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4575 for (j = 0; j < trynum; j++)
4576 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4577
4578 /*
4579 * Set up flow-control parameters.
4580 *
4581 * XXX Values could probably stand some tuning.
4582 */
4583 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4584 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4585 && (sc->sc_type != WM_T_PCH2)) {
4586 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4587 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4588 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4589 }
4590
4591 sc->sc_fcrtl = FCRTL_DFLT;
4592 if (sc->sc_type < WM_T_82543) {
4593 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4594 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4595 } else {
4596 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4597 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4598 }
4599
4600 if (sc->sc_type == WM_T_80003)
4601 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4602 else
4603 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4604
4605 /* Writes the control register. */
4606 wm_set_vlan(sc);
4607
4608 if (sc->sc_flags & WM_F_HAS_MII) {
4609 int val;
4610
4611 switch (sc->sc_type) {
4612 case WM_T_80003:
4613 case WM_T_ICH8:
4614 case WM_T_ICH9:
4615 case WM_T_ICH10:
4616 case WM_T_PCH:
4617 case WM_T_PCH2:
4618 case WM_T_PCH_LPT:
4619 /*
4620 * Set the mac to wait the maximum time between each
4621 * iteration and increase the max iterations when
4622 * polling the phy; this fixes erroneous timeouts at
4623 * 10Mbps.
4624 */
4625 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4626 0xFFFF);
4627 val = wm_kmrn_readreg(sc,
4628 KUMCTRLSTA_OFFSET_INB_PARAM);
4629 val |= 0x3F;
4630 wm_kmrn_writereg(sc,
4631 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4632 break;
4633 default:
4634 break;
4635 }
4636
4637 if (sc->sc_type == WM_T_80003) {
4638 val = CSR_READ(sc, WMREG_CTRL_EXT);
4639 val &= ~CTRL_EXT_LINK_MODE_MASK;
4640 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4641
4642 /* Bypass RX and TX FIFO's */
4643 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4644 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4645 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4646 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4647 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4648 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4649 }
4650 }
4651 #if 0
4652 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4653 #endif
4654
4655 /*
4656 * Set up checksum offload parameters.
4657 */
4658 reg = CSR_READ(sc, WMREG_RXCSUM);
4659 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4660 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4661 reg |= RXCSUM_IPOFL;
4662 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4663 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4664 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4665 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4666 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4667
4668 /* Reset TBI's RXCFG count */
4669 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4670
4671 /*
4672 * Set up the interrupt registers.
4673 */
4674 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4675 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4676 ICR_RXO | ICR_RXT0;
4677 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4678 sc->sc_icr |= ICR_RXCFG;
4679 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4680
4681 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4682 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4683 || (sc->sc_type == WM_T_PCH2)) {
4684 reg = CSR_READ(sc, WMREG_KABGTXD);
4685 reg |= KABGTXD_BGSQLBIAS;
4686 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4687 }
4688
4689 /* Set up the inter-packet gap. */
4690 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4691
4692 if (sc->sc_type >= WM_T_82543) {
4693 /*
4694 * Set up the interrupt throttling register (units of 256ns)
4695 * Note that a footnote in Intel's documentation says this
4696 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4697 * or 10Mbit mode. Empirically, it appears to be the case
4698 * that that is also true for the 1024ns units of the other
4699 * interrupt-related timer registers -- so, really, we ought
4700 * to divide this value by 4 when the link speed is low.
4701 *
4702 * XXX implement this division at link speed change!
4703 */
4704
4705 /*
4706 * For N interrupts/sec, set this value to:
4707 * 1000000000 / (N * 256). Note that we set the
4708 * absolute and packet timer values to this value
4709 * divided by 4 to get "simple timer" behavior.
4710 */
4711
4712 sc->sc_itr = 1500; /* 2604 ints/sec */
4713 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4714 }
4715
4716 /* Set the VLAN ethernetype. */
4717 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4718
4719 /*
4720 * Set up the transmit control register; we start out with
4721 * a collision distance suitable for FDX, but update it whe
4722 * we resolve the media type.
4723 */
4724 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4725 | TCTL_CT(TX_COLLISION_THRESHOLD)
4726 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4727 if (sc->sc_type >= WM_T_82571)
4728 sc->sc_tctl |= TCTL_MULR;
4729 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4730
4731 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4732 /*
4733 * Write TDT after TCTL.EN is set.
4734 * See the document.
4735 */
4736 CSR_WRITE(sc, WMREG_TDT, 0);
4737 }
4738
4739 if (sc->sc_type == WM_T_80003) {
4740 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4741 reg &= ~TCTL_EXT_GCEX_MASK;
4742 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4743 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4744 }
4745
4746 /* Set the media. */
4747 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4748 goto out;
4749
4750 /* Configure for OS presence */
4751 wm_init_manageability(sc);
4752
4753 /*
4754 * Set up the receive control register; we actually program
4755 * the register when we set the receive filter. Use multicast
4756 * address offset type 0.
4757 *
4758 * Only the i82544 has the ability to strip the incoming
4759 * CRC, so we don't enable that feature.
4760 */
4761 sc->sc_mchash_type = 0;
4762 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4763 | RCTL_MO(sc->sc_mchash_type);
4764
4765 /*
4766 * The I350 has a bug where it always strips the CRC whether
4767 * asked to or not. So ask for stripped CRC here and cope in rxeof
4768 */
4769 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210))
4770 sc->sc_rctl |= RCTL_SECRC;
4771
4772 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4773 && (ifp->if_mtu > ETHERMTU)) {
4774 sc->sc_rctl |= RCTL_LPE;
4775 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4776 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4777 }
4778
4779 if (MCLBYTES == 2048) {
4780 sc->sc_rctl |= RCTL_2k;
4781 } else {
4782 if (sc->sc_type >= WM_T_82543) {
4783 switch (MCLBYTES) {
4784 case 4096:
4785 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4786 break;
4787 case 8192:
4788 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4789 break;
4790 case 16384:
4791 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4792 break;
4793 default:
4794 panic("wm_init: MCLBYTES %d unsupported",
4795 MCLBYTES);
4796 break;
4797 }
4798 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4799 }
4800
4801 /* Set the receive filter. */
4802 wm_set_filter(sc);
4803
4804 /* On 575 and later set RDT only if RX enabled */
4805 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4806 for (i = 0; i < WM_NRXDESC; i++)
4807 WM_INIT_RXDESC(sc, i);
4808
4809 /* Start the one second link check clock. */
4810 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4811
4812 /* ...all done! */
4813 ifp->if_flags |= IFF_RUNNING;
4814 ifp->if_flags &= ~IFF_OACTIVE;
4815
4816 out:
4817 sc->sc_if_flags = ifp->if_flags;
4818 if (error)
4819 log(LOG_ERR, "%s: interface not running\n",
4820 device_xname(sc->sc_dev));
4821 return error;
4822 }
4823
4824 /*
4825 * wm_rxdrain:
4826 *
4827 * Drain the receive queue.
4828 */
4829 static void
4830 wm_rxdrain(struct wm_softc *sc)
4831 {
4832 struct wm_rxsoft *rxs;
4833 int i;
4834
4835 for (i = 0; i < WM_NRXDESC; i++) {
4836 rxs = &sc->sc_rxsoft[i];
4837 if (rxs->rxs_mbuf != NULL) {
4838 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4839 m_freem(rxs->rxs_mbuf);
4840 rxs->rxs_mbuf = NULL;
4841 }
4842 }
4843 }
4844
4845 /*
4846 * wm_stop: [ifnet interface function]
4847 *
4848 * Stop transmission on the interface.
4849 */
4850 static void
4851 wm_stop(struct ifnet *ifp, int disable)
4852 {
4853 struct wm_softc *sc = ifp->if_softc;
4854 struct wm_txsoft *txs;
4855 int i;
4856
4857 /* Stop the one second clock. */
4858 callout_stop(&sc->sc_tick_ch);
4859
4860 /* Stop the 82547 Tx FIFO stall check timer. */
4861 if (sc->sc_type == WM_T_82547)
4862 callout_stop(&sc->sc_txfifo_ch);
4863
4864 if (sc->sc_flags & WM_F_HAS_MII) {
4865 /* Down the MII. */
4866 mii_down(&sc->sc_mii);
4867 } else {
4868 #if 0
4869 /* Should we clear PHY's status properly? */
4870 wm_reset(sc);
4871 #endif
4872 }
4873
4874 /* Stop the transmit and receive processes. */
4875 CSR_WRITE(sc, WMREG_TCTL, 0);
4876 CSR_WRITE(sc, WMREG_RCTL, 0);
4877 sc->sc_rctl &= ~RCTL_EN;
4878
4879 /*
4880 * Clear the interrupt mask to ensure the device cannot assert its
4881 * interrupt line.
4882 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4883 * any currently pending or shared interrupt.
4884 */
4885 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4886 sc->sc_icr = 0;
4887
4888 /* Release any queued transmit buffers. */
4889 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4890 txs = &sc->sc_txsoft[i];
4891 if (txs->txs_mbuf != NULL) {
4892 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4893 m_freem(txs->txs_mbuf);
4894 txs->txs_mbuf = NULL;
4895 }
4896 }
4897
4898 /* Mark the interface as down and cancel the watchdog timer. */
4899 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4900 ifp->if_timer = 0;
4901
4902 if (disable)
4903 wm_rxdrain(sc);
4904
4905 #if 0 /* notyet */
4906 if (sc->sc_type >= WM_T_82544)
4907 CSR_WRITE(sc, WMREG_WUC, 0);
4908 #endif
4909 }
4910
4911 void
4912 wm_get_auto_rd_done(struct wm_softc *sc)
4913 {
4914 int i;
4915
4916 /* wait for eeprom to reload */
4917 switch (sc->sc_type) {
4918 case WM_T_82571:
4919 case WM_T_82572:
4920 case WM_T_82573:
4921 case WM_T_82574:
4922 case WM_T_82583:
4923 case WM_T_82575:
4924 case WM_T_82576:
4925 case WM_T_82580:
4926 case WM_T_82580ER:
4927 case WM_T_I350:
4928 case WM_T_I210:
4929 case WM_T_I211:
4930 case WM_T_80003:
4931 case WM_T_ICH8:
4932 case WM_T_ICH9:
4933 for (i = 0; i < 10; i++) {
4934 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4935 break;
4936 delay(1000);
4937 }
4938 if (i == 10) {
4939 log(LOG_ERR, "%s: auto read from eeprom failed to "
4940 "complete\n", device_xname(sc->sc_dev));
4941 }
4942 break;
4943 default:
4944 break;
4945 }
4946 }
4947
4948 void
4949 wm_lan_init_done(struct wm_softc *sc)
4950 {
4951 uint32_t reg = 0;
4952 int i;
4953
4954 /* wait for eeprom to reload */
4955 switch (sc->sc_type) {
4956 case WM_T_ICH10:
4957 case WM_T_PCH:
4958 case WM_T_PCH2:
4959 case WM_T_PCH_LPT:
4960 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4961 reg = CSR_READ(sc, WMREG_STATUS);
4962 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4963 break;
4964 delay(100);
4965 }
4966 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4967 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4968 "complete\n", device_xname(sc->sc_dev), __func__);
4969 }
4970 break;
4971 default:
4972 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4973 __func__);
4974 break;
4975 }
4976
4977 reg &= ~STATUS_LAN_INIT_DONE;
4978 CSR_WRITE(sc, WMREG_STATUS, reg);
4979 }
4980
4981 void
4982 wm_get_cfg_done(struct wm_softc *sc)
4983 {
4984 int mask;
4985 uint32_t reg;
4986 int i;
4987
4988 /* wait for eeprom to reload */
4989 switch (sc->sc_type) {
4990 case WM_T_82542_2_0:
4991 case WM_T_82542_2_1:
4992 /* null */
4993 break;
4994 case WM_T_82543:
4995 case WM_T_82544:
4996 case WM_T_82540:
4997 case WM_T_82545:
4998 case WM_T_82545_3:
4999 case WM_T_82546:
5000 case WM_T_82546_3:
5001 case WM_T_82541:
5002 case WM_T_82541_2:
5003 case WM_T_82547:
5004 case WM_T_82547_2:
5005 case WM_T_82573:
5006 case WM_T_82574:
5007 case WM_T_82583:
5008 /* generic */
5009 delay(10*1000);
5010 break;
5011 case WM_T_80003:
5012 case WM_T_82571:
5013 case WM_T_82572:
5014 case WM_T_82575:
5015 case WM_T_82576:
5016 case WM_T_82580:
5017 case WM_T_82580ER:
5018 case WM_T_I350:
5019 case WM_T_I210:
5020 case WM_T_I211:
5021 if (sc->sc_type == WM_T_82571) {
5022 /* Only 82571 shares port 0 */
5023 mask = EEMNGCTL_CFGDONE_0;
5024 } else
5025 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5026 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5027 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5028 break;
5029 delay(1000);
5030 }
5031 if (i >= WM_PHY_CFG_TIMEOUT) {
5032 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5033 device_xname(sc->sc_dev), __func__));
5034 }
5035 break;
5036 case WM_T_ICH8:
5037 case WM_T_ICH9:
5038 case WM_T_ICH10:
5039 case WM_T_PCH:
5040 case WM_T_PCH2:
5041 case WM_T_PCH_LPT:
5042 if (sc->sc_type >= WM_T_PCH) {
5043 reg = CSR_READ(sc, WMREG_STATUS);
5044 if ((reg & STATUS_PHYRA) != 0)
5045 CSR_WRITE(sc, WMREG_STATUS,
5046 reg & ~STATUS_PHYRA);
5047 }
5048 delay(10*1000);
5049 break;
5050 default:
5051 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5052 __func__);
5053 break;
5054 }
5055 }
5056
5057 /*
5058 * wm_acquire_eeprom:
5059 *
5060 * Perform the EEPROM handshake required on some chips.
5061 */
5062 static int
5063 wm_acquire_eeprom(struct wm_softc *sc)
5064 {
5065 uint32_t reg;
5066 int x;
5067 int ret = 0;
5068
5069 /* always success */
5070 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5071 return 0;
5072
5073 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5074 ret = wm_get_swfwhw_semaphore(sc);
5075 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5076 /* this will also do wm_get_swsm_semaphore() if needed */
5077 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5078 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5079 ret = wm_get_swsm_semaphore(sc);
5080 }
5081
5082 if (ret) {
5083 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5084 __func__);
5085 return 1;
5086 }
5087
5088 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5089 reg = CSR_READ(sc, WMREG_EECD);
5090
5091 /* Request EEPROM access. */
5092 reg |= EECD_EE_REQ;
5093 CSR_WRITE(sc, WMREG_EECD, reg);
5094
5095 /* ..and wait for it to be granted. */
5096 for (x = 0; x < 1000; x++) {
5097 reg = CSR_READ(sc, WMREG_EECD);
5098 if (reg & EECD_EE_GNT)
5099 break;
5100 delay(5);
5101 }
5102 if ((reg & EECD_EE_GNT) == 0) {
5103 aprint_error_dev(sc->sc_dev,
5104 "could not acquire EEPROM GNT\n");
5105 reg &= ~EECD_EE_REQ;
5106 CSR_WRITE(sc, WMREG_EECD, reg);
5107 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5108 wm_put_swfwhw_semaphore(sc);
5109 if (sc->sc_flags & WM_F_SWFW_SYNC)
5110 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5111 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5112 wm_put_swsm_semaphore(sc);
5113 return 1;
5114 }
5115 }
5116
5117 return 0;
5118 }
5119
5120 /*
5121 * wm_release_eeprom:
5122 *
5123 * Release the EEPROM mutex.
5124 */
5125 static void
5126 wm_release_eeprom(struct wm_softc *sc)
5127 {
5128 uint32_t reg;
5129
5130 /* always success */
5131 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5132 return;
5133
5134 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5135 reg = CSR_READ(sc, WMREG_EECD);
5136 reg &= ~EECD_EE_REQ;
5137 CSR_WRITE(sc, WMREG_EECD, reg);
5138 }
5139
5140 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5141 wm_put_swfwhw_semaphore(sc);
5142 if (sc->sc_flags & WM_F_SWFW_SYNC)
5143 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5144 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5145 wm_put_swsm_semaphore(sc);
5146 }
5147
5148 /*
5149 * wm_eeprom_sendbits:
5150 *
5151 * Send a series of bits to the EEPROM.
5152 */
5153 static void
5154 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5155 {
5156 uint32_t reg;
5157 int x;
5158
5159 reg = CSR_READ(sc, WMREG_EECD);
5160
5161 for (x = nbits; x > 0; x--) {
5162 if (bits & (1U << (x - 1)))
5163 reg |= EECD_DI;
5164 else
5165 reg &= ~EECD_DI;
5166 CSR_WRITE(sc, WMREG_EECD, reg);
5167 delay(2);
5168 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5169 delay(2);
5170 CSR_WRITE(sc, WMREG_EECD, reg);
5171 delay(2);
5172 }
5173 }
5174
5175 /*
5176 * wm_eeprom_recvbits:
5177 *
5178 * Receive a series of bits from the EEPROM.
5179 */
5180 static void
5181 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5182 {
5183 uint32_t reg, val;
5184 int x;
5185
5186 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5187
5188 val = 0;
5189 for (x = nbits; x > 0; x--) {
5190 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5191 delay(2);
5192 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5193 val |= (1U << (x - 1));
5194 CSR_WRITE(sc, WMREG_EECD, reg);
5195 delay(2);
5196 }
5197 *valp = val;
5198 }
5199
5200 /*
5201 * wm_read_eeprom_uwire:
5202 *
5203 * Read a word from the EEPROM using the MicroWire protocol.
5204 */
5205 static int
5206 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5207 {
5208 uint32_t reg, val;
5209 int i;
5210
5211 for (i = 0; i < wordcnt; i++) {
5212 /* Clear SK and DI. */
5213 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5214 CSR_WRITE(sc, WMREG_EECD, reg);
5215
5216 /*
5217 * XXX: workaround for a bug in qemu-0.12.x and prior
5218 * and Xen.
5219 *
5220 * We use this workaround only for 82540 because qemu's
5221 * e1000 act as 82540.
5222 */
5223 if (sc->sc_type == WM_T_82540) {
5224 reg |= EECD_SK;
5225 CSR_WRITE(sc, WMREG_EECD, reg);
5226 reg &= ~EECD_SK;
5227 CSR_WRITE(sc, WMREG_EECD, reg);
5228 delay(2);
5229 }
5230 /* XXX: end of workaround */
5231
5232 /* Set CHIP SELECT. */
5233 reg |= EECD_CS;
5234 CSR_WRITE(sc, WMREG_EECD, reg);
5235 delay(2);
5236
5237 /* Shift in the READ command. */
5238 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5239
5240 /* Shift in address. */
5241 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5242
5243 /* Shift out the data. */
5244 wm_eeprom_recvbits(sc, &val, 16);
5245 data[i] = val & 0xffff;
5246
5247 /* Clear CHIP SELECT. */
5248 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5249 CSR_WRITE(sc, WMREG_EECD, reg);
5250 delay(2);
5251 }
5252
5253 return 0;
5254 }
5255
5256 /*
5257 * wm_spi_eeprom_ready:
5258 *
5259 * Wait for a SPI EEPROM to be ready for commands.
5260 */
5261 static int
5262 wm_spi_eeprom_ready(struct wm_softc *sc)
5263 {
5264 uint32_t val;
5265 int usec;
5266
5267 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5268 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5269 wm_eeprom_recvbits(sc, &val, 8);
5270 if ((val & SPI_SR_RDY) == 0)
5271 break;
5272 }
5273 if (usec >= SPI_MAX_RETRIES) {
5274 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5275 return 1;
5276 }
5277 return 0;
5278 }
5279
5280 /*
5281 * wm_read_eeprom_spi:
5282 *
5283 * Read a work from the EEPROM using the SPI protocol.
5284 */
5285 static int
5286 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5287 {
5288 uint32_t reg, val;
5289 int i;
5290 uint8_t opc;
5291
5292 /* Clear SK and CS. */
5293 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5294 CSR_WRITE(sc, WMREG_EECD, reg);
5295 delay(2);
5296
5297 if (wm_spi_eeprom_ready(sc))
5298 return 1;
5299
5300 /* Toggle CS to flush commands. */
5301 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5302 delay(2);
5303 CSR_WRITE(sc, WMREG_EECD, reg);
5304 delay(2);
5305
5306 opc = SPI_OPC_READ;
5307 if (sc->sc_ee_addrbits == 8 && word >= 128)
5308 opc |= SPI_OPC_A8;
5309
5310 wm_eeprom_sendbits(sc, opc, 8);
5311 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5312
5313 for (i = 0; i < wordcnt; i++) {
5314 wm_eeprom_recvbits(sc, &val, 16);
5315 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5316 }
5317
5318 /* Raise CS and clear SK. */
5319 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5320 CSR_WRITE(sc, WMREG_EECD, reg);
5321 delay(2);
5322
5323 return 0;
5324 }
5325
5326 #define NVM_CHECKSUM 0xBABA
5327 #define EEPROM_SIZE 0x0040
5328 #define NVM_COMPAT 0x0003
5329 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
5330 #define NVM_FUTURE_INIT_WORD1 0x0019
5331 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
5332
5333 /*
5334 * wm_validate_eeprom_checksum
5335 *
5336 * The checksum is defined as the sum of the first 64 (16 bit) words.
5337 */
5338 static int
5339 wm_validate_eeprom_checksum(struct wm_softc *sc)
5340 {
5341 uint16_t checksum, valid_checksum;
5342 uint16_t eeprom_data;
5343 uint16_t csum_wordaddr;
5344 int i;
5345
5346 checksum = 0;
5347
5348 /* Don't check for I211 */
5349 if (sc->sc_type == WM_T_I211)
5350 return 0;
5351
5352 if (sc->sc_type == WM_T_PCH_LPT) {
5353 printf("[PCH_LPT]");
5354 csum_wordaddr = NVM_COMPAT;
5355 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5356 } else {
5357 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5358 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5359 }
5360
5361 #ifdef WM_DEBUG
5362 /* Dump EEPROM image for debug */
5363 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5364 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5365 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5366 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5367 if ((eeprom_data & valid_checksum) == 0) {
5368 DPRINTF(WM_DEBUG_NVM,
5369 ("%s: NVM need to be updated (%04x != %04x)\n",
5370 device_xname(sc->sc_dev), eeprom_data,
5371 valid_checksum));
5372 }
5373 }
5374
5375 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5376 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5377 for (i = 0; i < EEPROM_SIZE; i++) {
5378 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5379 printf("XX ");
5380 else
5381 printf("%04x ", eeprom_data);
5382 if (i % 8 == 7)
5383 printf("\n");
5384 }
5385 }
5386
5387 #endif /* WM_DEBUG */
5388
5389 for (i = 0; i < EEPROM_SIZE; i++) {
5390 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5391 return 1;
5392 checksum += eeprom_data;
5393 }
5394
5395 if (checksum != (uint16_t) NVM_CHECKSUM) {
5396 #ifdef WM_DEBUG
5397 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5398 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5399 #endif
5400 }
5401
5402 return 0;
5403 }
5404
5405 /*
5406 * wm_read_eeprom:
5407 *
5408 * Read data from the serial EEPROM.
5409 */
5410 static int
5411 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5412 {
5413 int rv;
5414
5415 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5416 return 1;
5417
5418 if (wm_acquire_eeprom(sc))
5419 return 1;
5420
5421 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5422 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5423 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5424 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5425 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5426 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5427 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5428 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5429 else
5430 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5431
5432 wm_release_eeprom(sc);
5433 return rv;
5434 }
5435
5436 static int
5437 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5438 uint16_t *data)
5439 {
5440 int i, eerd = 0;
5441 int error = 0;
5442
5443 for (i = 0; i < wordcnt; i++) {
5444 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5445
5446 CSR_WRITE(sc, WMREG_EERD, eerd);
5447 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5448 if (error != 0)
5449 break;
5450
5451 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5452 }
5453
5454 return error;
5455 }
5456
5457 static int
5458 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5459 {
5460 uint32_t attempts = 100000;
5461 uint32_t i, reg = 0;
5462 int32_t done = -1;
5463
5464 for (i = 0; i < attempts; i++) {
5465 reg = CSR_READ(sc, rw);
5466
5467 if (reg & EERD_DONE) {
5468 done = 0;
5469 break;
5470 }
5471 delay(5);
5472 }
5473
5474 return done;
5475 }
5476
5477 static int
5478 wm_check_alt_mac_addr(struct wm_softc *sc)
5479 {
5480 uint16_t myea[ETHER_ADDR_LEN / 2];
5481 uint16_t offset = EEPROM_OFF_MACADDR;
5482
5483 /* Try to read alternative MAC address pointer */
5484 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5485 return -1;
5486
5487 /* Check pointer */
5488 if (offset == 0xffff)
5489 return -1;
5490
5491 /*
5492 * Check whether alternative MAC address is valid or not.
5493 * Some cards have non 0xffff pointer but those don't use
5494 * alternative MAC address in reality.
5495 *
5496 * Check whether the broadcast bit is set or not.
5497 */
5498 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5499 if (((myea[0] & 0xff) & 0x01) == 0)
5500 return 0; /* found! */
5501
5502 /* not found */
5503 return -1;
5504 }
5505
5506 static int
5507 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5508 {
5509 uint16_t myea[ETHER_ADDR_LEN / 2];
5510 uint16_t offset = EEPROM_OFF_MACADDR;
5511 int do_invert = 0;
5512
5513 switch (sc->sc_type) {
5514 case WM_T_82580:
5515 case WM_T_82580ER:
5516 case WM_T_I350:
5517 switch (sc->sc_funcid) {
5518 case 0:
5519 /* default value (== EEPROM_OFF_MACADDR) */
5520 break;
5521 case 1:
5522 offset = EEPROM_OFF_LAN1;
5523 break;
5524 case 2:
5525 offset = EEPROM_OFF_LAN2;
5526 break;
5527 case 3:
5528 offset = EEPROM_OFF_LAN3;
5529 break;
5530 default:
5531 goto bad;
5532 /* NOTREACHED */
5533 break;
5534 }
5535 break;
5536 case WM_T_82571:
5537 case WM_T_82575:
5538 case WM_T_82576:
5539 case WM_T_80003:
5540 case WM_T_I210:
5541 case WM_T_I211:
5542 if (wm_check_alt_mac_addr(sc) != 0) {
5543 /* reset the offset to LAN0 */
5544 offset = EEPROM_OFF_MACADDR;
5545 if ((sc->sc_funcid & 0x01) == 1)
5546 do_invert = 1;
5547 goto do_read;
5548 }
5549 switch (sc->sc_funcid) {
5550 case 0:
5551 /*
5552 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5553 * itself.
5554 */
5555 break;
5556 case 1:
5557 offset += EEPROM_OFF_MACADDR_LAN1;
5558 break;
5559 case 2:
5560 offset += EEPROM_OFF_MACADDR_LAN2;
5561 break;
5562 case 3:
5563 offset += EEPROM_OFF_MACADDR_LAN3;
5564 break;
5565 default:
5566 goto bad;
5567 /* NOTREACHED */
5568 break;
5569 }
5570 break;
5571 default:
5572 if ((sc->sc_funcid & 0x01) == 1)
5573 do_invert = 1;
5574 break;
5575 }
5576
5577 do_read:
5578 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5579 myea) != 0) {
5580 goto bad;
5581 }
5582
5583 enaddr[0] = myea[0] & 0xff;
5584 enaddr[1] = myea[0] >> 8;
5585 enaddr[2] = myea[1] & 0xff;
5586 enaddr[3] = myea[1] >> 8;
5587 enaddr[4] = myea[2] & 0xff;
5588 enaddr[5] = myea[2] >> 8;
5589
5590 /*
5591 * Toggle the LSB of the MAC address on the second port
5592 * of some dual port cards.
5593 */
5594 if (do_invert != 0)
5595 enaddr[5] ^= 1;
5596
5597 return 0;
5598
5599 bad:
5600 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5601
5602 return -1;
5603 }
5604
5605 /*
5606 * wm_add_rxbuf:
5607 *
5608 * Add a receive buffer to the indiciated descriptor.
5609 */
5610 static int
5611 wm_add_rxbuf(struct wm_softc *sc, int idx)
5612 {
5613 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5614 struct mbuf *m;
5615 int error;
5616
5617 MGETHDR(m, M_DONTWAIT, MT_DATA);
5618 if (m == NULL)
5619 return ENOBUFS;
5620
5621 MCLGET(m, M_DONTWAIT);
5622 if ((m->m_flags & M_EXT) == 0) {
5623 m_freem(m);
5624 return ENOBUFS;
5625 }
5626
5627 if (rxs->rxs_mbuf != NULL)
5628 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5629
5630 rxs->rxs_mbuf = m;
5631
5632 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5633 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5634 BUS_DMA_READ|BUS_DMA_NOWAIT);
5635 if (error) {
5636 /* XXX XXX XXX */
5637 aprint_error_dev(sc->sc_dev,
5638 "unable to load rx DMA map %d, error = %d\n",
5639 idx, error);
5640 panic("wm_add_rxbuf");
5641 }
5642
5643 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5644 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5645
5646 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5647 if ((sc->sc_rctl & RCTL_EN) != 0)
5648 WM_INIT_RXDESC(sc, idx);
5649 } else
5650 WM_INIT_RXDESC(sc, idx);
5651
5652 return 0;
5653 }
5654
5655 /*
5656 * wm_set_ral:
5657 *
5658 * Set an entery in the receive address list.
5659 */
5660 static void
5661 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5662 {
5663 uint32_t ral_lo, ral_hi;
5664
5665 if (enaddr != NULL) {
5666 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5667 (enaddr[3] << 24);
5668 ral_hi = enaddr[4] | (enaddr[5] << 8);
5669 ral_hi |= RAL_AV;
5670 } else {
5671 ral_lo = 0;
5672 ral_hi = 0;
5673 }
5674
5675 if (sc->sc_type >= WM_T_82544) {
5676 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5677 ral_lo);
5678 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5679 ral_hi);
5680 } else {
5681 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5682 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5683 }
5684 }
5685
5686 /*
5687 * wm_mchash:
5688 *
5689 * Compute the hash of the multicast address for the 4096-bit
5690 * multicast filter.
5691 */
5692 static uint32_t
5693 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5694 {
5695 static const int lo_shift[4] = { 4, 3, 2, 0 };
5696 static const int hi_shift[4] = { 4, 5, 6, 8 };
5697 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5698 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5699 uint32_t hash;
5700
5701 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5702 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5703 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5704 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5705 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5706 return (hash & 0x3ff);
5707 }
5708 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5709 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5710
5711 return (hash & 0xfff);
5712 }
5713
5714 /*
5715 * wm_set_filter:
5716 *
5717 * Set up the receive filter.
5718 */
5719 static void
5720 wm_set_filter(struct wm_softc *sc)
5721 {
5722 struct ethercom *ec = &sc->sc_ethercom;
5723 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5724 struct ether_multi *enm;
5725 struct ether_multistep step;
5726 bus_addr_t mta_reg;
5727 uint32_t hash, reg, bit;
5728 int i, size;
5729
5730 if (sc->sc_type >= WM_T_82544)
5731 mta_reg = WMREG_CORDOVA_MTA;
5732 else
5733 mta_reg = WMREG_MTA;
5734
5735 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5736
5737 if (ifp->if_flags & IFF_BROADCAST)
5738 sc->sc_rctl |= RCTL_BAM;
5739 if (ifp->if_flags & IFF_PROMISC) {
5740 sc->sc_rctl |= RCTL_UPE;
5741 goto allmulti;
5742 }
5743
5744 /*
5745 * Set the station address in the first RAL slot, and
5746 * clear the remaining slots.
5747 */
5748 if (sc->sc_type == WM_T_ICH8)
5749 size = WM_RAL_TABSIZE_ICH8 -1;
5750 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5751 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5752 || (sc->sc_type == WM_T_PCH_LPT))
5753 size = WM_RAL_TABSIZE_ICH8;
5754 else if (sc->sc_type == WM_T_82575)
5755 size = WM_RAL_TABSIZE_82575;
5756 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5757 size = WM_RAL_TABSIZE_82576;
5758 else if (sc->sc_type == WM_T_I350)
5759 size = WM_RAL_TABSIZE_I350;
5760 else
5761 size = WM_RAL_TABSIZE;
5762 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5763 for (i = 1; i < size; i++)
5764 wm_set_ral(sc, NULL, i);
5765
5766 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5767 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5768 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5769 size = WM_ICH8_MC_TABSIZE;
5770 else
5771 size = WM_MC_TABSIZE;
5772 /* Clear out the multicast table. */
5773 for (i = 0; i < size; i++)
5774 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5775
5776 ETHER_FIRST_MULTI(step, ec, enm);
5777 while (enm != NULL) {
5778 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5779 /*
5780 * We must listen to a range of multicast addresses.
5781 * For now, just accept all multicasts, rather than
5782 * trying to set only those filter bits needed to match
5783 * the range. (At this time, the only use of address
5784 * ranges is for IP multicast routing, for which the
5785 * range is big enough to require all bits set.)
5786 */
5787 goto allmulti;
5788 }
5789
5790 hash = wm_mchash(sc, enm->enm_addrlo);
5791
5792 reg = (hash >> 5);
5793 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5794 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5795 || (sc->sc_type == WM_T_PCH2)
5796 || (sc->sc_type == WM_T_PCH_LPT))
5797 reg &= 0x1f;
5798 else
5799 reg &= 0x7f;
5800 bit = hash & 0x1f;
5801
5802 hash = CSR_READ(sc, mta_reg + (reg << 2));
5803 hash |= 1U << bit;
5804
5805 /* XXX Hardware bug?? */
5806 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5807 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5808 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5809 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5810 } else
5811 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5812
5813 ETHER_NEXT_MULTI(step, enm);
5814 }
5815
5816 ifp->if_flags &= ~IFF_ALLMULTI;
5817 goto setit;
5818
5819 allmulti:
5820 ifp->if_flags |= IFF_ALLMULTI;
5821 sc->sc_rctl |= RCTL_MPE;
5822
5823 setit:
5824 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5825 }
5826
5827 /*
5828 * wm_tbi_mediainit:
5829 *
5830 * Initialize media for use on 1000BASE-X devices.
5831 */
5832 static void
5833 wm_tbi_mediainit(struct wm_softc *sc)
5834 {
5835 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5836 const char *sep = "";
5837
5838 if (sc->sc_type < WM_T_82543)
5839 sc->sc_tipg = TIPG_WM_DFLT;
5840 else
5841 sc->sc_tipg = TIPG_LG_DFLT;
5842
5843 sc->sc_tbi_anegticks = 5;
5844
5845 /* Initialize our media structures */
5846 sc->sc_mii.mii_ifp = ifp;
5847
5848 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5849 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5850 wm_tbi_mediastatus);
5851
5852 /*
5853 * SWD Pins:
5854 *
5855 * 0 = Link LED (output)
5856 * 1 = Loss Of Signal (input)
5857 */
5858 sc->sc_ctrl |= CTRL_SWDPIO(0);
5859 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5860
5861 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5862
5863 #define ADD(ss, mm, dd) \
5864 do { \
5865 aprint_normal("%s%s", sep, ss); \
5866 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5867 sep = ", "; \
5868 } while (/*CONSTCOND*/0)
5869
5870 aprint_normal_dev(sc->sc_dev, "");
5871 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5872 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5873 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5874 aprint_normal("\n");
5875
5876 #undef ADD
5877
5878 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5879 }
5880
5881 /*
5882 * wm_tbi_mediastatus: [ifmedia interface function]
5883 *
5884 * Get the current interface media status on a 1000BASE-X device.
5885 */
5886 static void
5887 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5888 {
5889 struct wm_softc *sc = ifp->if_softc;
5890 uint32_t ctrl, status;
5891
5892 ifmr->ifm_status = IFM_AVALID;
5893 ifmr->ifm_active = IFM_ETHER;
5894
5895 status = CSR_READ(sc, WMREG_STATUS);
5896 if ((status & STATUS_LU) == 0) {
5897 ifmr->ifm_active |= IFM_NONE;
5898 return;
5899 }
5900
5901 ifmr->ifm_status |= IFM_ACTIVE;
5902 ifmr->ifm_active |= IFM_1000_SX;
5903 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5904 ifmr->ifm_active |= IFM_FDX;
5905 ctrl = CSR_READ(sc, WMREG_CTRL);
5906 if (ctrl & CTRL_RFCE)
5907 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5908 if (ctrl & CTRL_TFCE)
5909 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5910 }
5911
5912 /*
5913 * wm_tbi_mediachange: [ifmedia interface function]
5914 *
5915 * Set hardware to newly-selected media on a 1000BASE-X device.
5916 */
5917 static int
5918 wm_tbi_mediachange(struct ifnet *ifp)
5919 {
5920 struct wm_softc *sc = ifp->if_softc;
5921 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5922 uint32_t status;
5923 int i;
5924
5925 sc->sc_txcw = 0;
5926 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5927 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5928 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5929 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5930 sc->sc_txcw |= TXCW_ANE;
5931 } else {
5932 /*
5933 * If autonegotiation is turned off, force link up and turn on
5934 * full duplex
5935 */
5936 sc->sc_txcw &= ~TXCW_ANE;
5937 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5938 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5939 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5940 delay(1000);
5941 }
5942
5943 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5944 device_xname(sc->sc_dev),sc->sc_txcw));
5945 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5946 delay(10000);
5947
5948 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5949 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5950
5951 /*
5952 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5953 * optics detect a signal, 0 if they don't.
5954 */
5955 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5956 /* Have signal; wait for the link to come up. */
5957
5958 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5959 /*
5960 * Reset the link, and let autonegotiation do its thing
5961 */
5962 sc->sc_ctrl |= CTRL_LRST;
5963 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5964 delay(1000);
5965 sc->sc_ctrl &= ~CTRL_LRST;
5966 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5967 delay(1000);
5968 }
5969
5970 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5971 delay(10000);
5972 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5973 break;
5974 }
5975
5976 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5977 device_xname(sc->sc_dev),i));
5978
5979 status = CSR_READ(sc, WMREG_STATUS);
5980 DPRINTF(WM_DEBUG_LINK,
5981 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5982 device_xname(sc->sc_dev),status, STATUS_LU));
5983 if (status & STATUS_LU) {
5984 /* Link is up. */
5985 DPRINTF(WM_DEBUG_LINK,
5986 ("%s: LINK: set media -> link up %s\n",
5987 device_xname(sc->sc_dev),
5988 (status & STATUS_FD) ? "FDX" : "HDX"));
5989
5990 /*
5991 * NOTE: CTRL will update TFCE and RFCE automatically,
5992 * so we should update sc->sc_ctrl
5993 */
5994 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5995 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5996 sc->sc_fcrtl &= ~FCRTL_XONE;
5997 if (status & STATUS_FD)
5998 sc->sc_tctl |=
5999 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6000 else
6001 sc->sc_tctl |=
6002 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6003 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6004 sc->sc_fcrtl |= FCRTL_XONE;
6005 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6006 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6007 WMREG_OLD_FCRTL : WMREG_FCRTL,
6008 sc->sc_fcrtl);
6009 sc->sc_tbi_linkup = 1;
6010 } else {
6011 if (i == WM_LINKUP_TIMEOUT)
6012 wm_check_for_link(sc);
6013 /* Link is down. */
6014 DPRINTF(WM_DEBUG_LINK,
6015 ("%s: LINK: set media -> link down\n",
6016 device_xname(sc->sc_dev)));
6017 sc->sc_tbi_linkup = 0;
6018 }
6019 } else {
6020 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6021 device_xname(sc->sc_dev)));
6022 sc->sc_tbi_linkup = 0;
6023 }
6024
6025 wm_tbi_set_linkled(sc);
6026
6027 return 0;
6028 }
6029
6030 /*
6031 * wm_tbi_set_linkled:
6032 *
6033 * Update the link LED on 1000BASE-X devices.
6034 */
6035 static void
6036 wm_tbi_set_linkled(struct wm_softc *sc)
6037 {
6038
6039 if (sc->sc_tbi_linkup)
6040 sc->sc_ctrl |= CTRL_SWDPIN(0);
6041 else
6042 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6043
6044 /* 82540 or newer devices are active low */
6045 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6046
6047 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6048 }
6049
6050 /*
6051 * wm_tbi_check_link:
6052 *
6053 * Check the link on 1000BASE-X devices.
6054 */
6055 static void
6056 wm_tbi_check_link(struct wm_softc *sc)
6057 {
6058 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6059 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6060 uint32_t rxcw, ctrl, status;
6061
6062 status = CSR_READ(sc, WMREG_STATUS);
6063
6064 rxcw = CSR_READ(sc, WMREG_RXCW);
6065 ctrl = CSR_READ(sc, WMREG_CTRL);
6066
6067 /* set link status */
6068 if ((status & STATUS_LU) == 0) {
6069 DPRINTF(WM_DEBUG_LINK,
6070 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6071 sc->sc_tbi_linkup = 0;
6072 } else if (sc->sc_tbi_linkup == 0) {
6073 DPRINTF(WM_DEBUG_LINK,
6074 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6075 (status & STATUS_FD) ? "FDX" : "HDX"));
6076 sc->sc_tbi_linkup = 1;
6077 }
6078
6079 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6080 && ((status & STATUS_LU) == 0)) {
6081 sc->sc_tbi_linkup = 0;
6082 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6083 /* RXCFG storm! */
6084 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6085 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6086 wm_init(ifp);
6087 ifp->if_start(ifp);
6088 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6089 /* If the timer expired, retry autonegotiation */
6090 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6091 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6092 sc->sc_tbi_ticks = 0;
6093 /*
6094 * Reset the link, and let autonegotiation do
6095 * its thing
6096 */
6097 sc->sc_ctrl |= CTRL_LRST;
6098 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6099 delay(1000);
6100 sc->sc_ctrl &= ~CTRL_LRST;
6101 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6102 delay(1000);
6103 CSR_WRITE(sc, WMREG_TXCW,
6104 sc->sc_txcw & ~TXCW_ANE);
6105 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6106 }
6107 }
6108 }
6109
6110 wm_tbi_set_linkled(sc);
6111 }
6112
6113 /*
6114 * wm_gmii_reset:
6115 *
6116 * Reset the PHY.
6117 */
6118 static void
6119 wm_gmii_reset(struct wm_softc *sc)
6120 {
6121 uint32_t reg;
6122 int rv;
6123
6124 /* get phy semaphore */
6125 switch (sc->sc_type) {
6126 case WM_T_82571:
6127 case WM_T_82572:
6128 case WM_T_82573:
6129 case WM_T_82574:
6130 case WM_T_82583:
6131 /* XXX should get sw semaphore, too */
6132 rv = wm_get_swsm_semaphore(sc);
6133 break;
6134 case WM_T_82575:
6135 case WM_T_82576:
6136 case WM_T_82580:
6137 case WM_T_82580ER:
6138 case WM_T_I350:
6139 case WM_T_I210:
6140 case WM_T_I211:
6141 case WM_T_80003:
6142 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6143 break;
6144 case WM_T_ICH8:
6145 case WM_T_ICH9:
6146 case WM_T_ICH10:
6147 case WM_T_PCH:
6148 case WM_T_PCH2:
6149 case WM_T_PCH_LPT:
6150 rv = wm_get_swfwhw_semaphore(sc);
6151 break;
6152 default:
6153 /* nothing to do*/
6154 rv = 0;
6155 break;
6156 }
6157 if (rv != 0) {
6158 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6159 __func__);
6160 return;
6161 }
6162
6163 switch (sc->sc_type) {
6164 case WM_T_82542_2_0:
6165 case WM_T_82542_2_1:
6166 /* null */
6167 break;
6168 case WM_T_82543:
6169 /*
6170 * With 82543, we need to force speed and duplex on the MAC
6171 * equal to what the PHY speed and duplex configuration is.
6172 * In addition, we need to perform a hardware reset on the PHY
6173 * to take it out of reset.
6174 */
6175 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6176 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6177
6178 /* The PHY reset pin is active-low. */
6179 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6180 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6181 CTRL_EXT_SWDPIN(4));
6182 reg |= CTRL_EXT_SWDPIO(4);
6183
6184 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6185 delay(10*1000);
6186
6187 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6188 delay(150);
6189 #if 0
6190 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6191 #endif
6192 delay(20*1000); /* XXX extra delay to get PHY ID? */
6193 break;
6194 case WM_T_82544: /* reset 10000us */
6195 case WM_T_82540:
6196 case WM_T_82545:
6197 case WM_T_82545_3:
6198 case WM_T_82546:
6199 case WM_T_82546_3:
6200 case WM_T_82541:
6201 case WM_T_82541_2:
6202 case WM_T_82547:
6203 case WM_T_82547_2:
6204 case WM_T_82571: /* reset 100us */
6205 case WM_T_82572:
6206 case WM_T_82573:
6207 case WM_T_82574:
6208 case WM_T_82575:
6209 case WM_T_82576:
6210 case WM_T_82580:
6211 case WM_T_82580ER:
6212 case WM_T_I350:
6213 case WM_T_I210:
6214 case WM_T_I211:
6215 case WM_T_82583:
6216 case WM_T_80003:
6217 /* generic reset */
6218 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6219 delay(20000);
6220 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6221 delay(20000);
6222
6223 if ((sc->sc_type == WM_T_82541)
6224 || (sc->sc_type == WM_T_82541_2)
6225 || (sc->sc_type == WM_T_82547)
6226 || (sc->sc_type == WM_T_82547_2)) {
6227 /* workaround for igp are done in igp_reset() */
6228 /* XXX add code to set LED after phy reset */
6229 }
6230 break;
6231 case WM_T_ICH8:
6232 case WM_T_ICH9:
6233 case WM_T_ICH10:
6234 case WM_T_PCH:
6235 case WM_T_PCH2:
6236 case WM_T_PCH_LPT:
6237 /* generic reset */
6238 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6239 delay(100);
6240 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6241 delay(150);
6242 break;
6243 default:
6244 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6245 __func__);
6246 break;
6247 }
6248
6249 /* release PHY semaphore */
6250 switch (sc->sc_type) {
6251 case WM_T_82571:
6252 case WM_T_82572:
6253 case WM_T_82573:
6254 case WM_T_82574:
6255 case WM_T_82583:
6256 /* XXX should put sw semaphore, too */
6257 wm_put_swsm_semaphore(sc);
6258 break;
6259 case WM_T_82575:
6260 case WM_T_82576:
6261 case WM_T_82580:
6262 case WM_T_82580ER:
6263 case WM_T_I350:
6264 case WM_T_I210:
6265 case WM_T_I211:
6266 case WM_T_80003:
6267 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6268 break;
6269 case WM_T_ICH8:
6270 case WM_T_ICH9:
6271 case WM_T_ICH10:
6272 case WM_T_PCH:
6273 case WM_T_PCH2:
6274 case WM_T_PCH_LPT:
6275 wm_put_swfwhw_semaphore(sc);
6276 break;
6277 default:
6278 /* nothing to do*/
6279 rv = 0;
6280 break;
6281 }
6282
6283 /* get_cfg_done */
6284 wm_get_cfg_done(sc);
6285
6286 /* extra setup */
6287 switch (sc->sc_type) {
6288 case WM_T_82542_2_0:
6289 case WM_T_82542_2_1:
6290 case WM_T_82543:
6291 case WM_T_82544:
6292 case WM_T_82540:
6293 case WM_T_82545:
6294 case WM_T_82545_3:
6295 case WM_T_82546:
6296 case WM_T_82546_3:
6297 case WM_T_82541_2:
6298 case WM_T_82547_2:
6299 case WM_T_82571:
6300 case WM_T_82572:
6301 case WM_T_82573:
6302 case WM_T_82574:
6303 case WM_T_82575:
6304 case WM_T_82576:
6305 case WM_T_82580:
6306 case WM_T_82580ER:
6307 case WM_T_I350:
6308 case WM_T_I210:
6309 case WM_T_I211:
6310 case WM_T_82583:
6311 case WM_T_80003:
6312 /* null */
6313 break;
6314 case WM_T_82541:
6315 case WM_T_82547:
6316 /* XXX Configure actively LED after PHY reset */
6317 break;
6318 case WM_T_ICH8:
6319 case WM_T_ICH9:
6320 case WM_T_ICH10:
6321 case WM_T_PCH:
6322 case WM_T_PCH2:
6323 case WM_T_PCH_LPT:
6324 /* Allow time for h/w to get to a quiescent state afer reset */
6325 delay(10*1000);
6326
6327 if (sc->sc_type == WM_T_PCH)
6328 wm_hv_phy_workaround_ich8lan(sc);
6329
6330 if (sc->sc_type == WM_T_PCH2)
6331 wm_lv_phy_workaround_ich8lan(sc);
6332
6333 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6334 /*
6335 * dummy read to clear the phy wakeup bit after lcd
6336 * reset
6337 */
6338 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6339 }
6340
6341 /*
6342 * XXX Configure the LCD with th extended configuration region
6343 * in NVM
6344 */
6345
6346 /* Configure the LCD with the OEM bits in NVM */
6347 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6348 /*
6349 * Disable LPLU.
6350 * XXX It seems that 82567 has LPLU, too.
6351 */
6352 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6353 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6354 reg |= HV_OEM_BITS_ANEGNOW;
6355 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6356 }
6357 break;
6358 default:
6359 panic("%s: unknown type\n", __func__);
6360 break;
6361 }
6362 }
6363
6364 /*
6365 * wm_gmii_mediainit:
6366 *
6367 * Initialize media for use on 1000BASE-T devices.
6368 */
6369 static void
6370 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6371 {
6372 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6373 struct mii_data *mii = &sc->sc_mii;
6374
6375 /* We have MII. */
6376 sc->sc_flags |= WM_F_HAS_MII;
6377
6378 if (sc->sc_type == WM_T_80003)
6379 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6380 else
6381 sc->sc_tipg = TIPG_1000T_DFLT;
6382
6383 /*
6384 * Let the chip set speed/duplex on its own based on
6385 * signals from the PHY.
6386 * XXXbouyer - I'm not sure this is right for the 80003,
6387 * the em driver only sets CTRL_SLU here - but it seems to work.
6388 */
6389 sc->sc_ctrl |= CTRL_SLU;
6390 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6391
6392 /* Initialize our media structures and probe the GMII. */
6393 mii->mii_ifp = ifp;
6394
6395 /*
6396 * Determine the PHY access method.
6397 *
6398 * For SGMII, use SGMII specific method.
6399 *
6400 * For some devices, we can determine the PHY access method
6401 * from sc_type.
6402 *
6403 * For ICH8 variants, it's difficult to detemine the PHY access
6404 * method by sc_type, so use the PCI product ID for some devices.
6405 * For other ICH8 variants, try to use igp's method. If the PHY
6406 * can't detect, then use bm's method.
6407 */
6408 switch (prodid) {
6409 case PCI_PRODUCT_INTEL_PCH_M_LM:
6410 case PCI_PRODUCT_INTEL_PCH_M_LC:
6411 /* 82577 */
6412 sc->sc_phytype = WMPHY_82577;
6413 mii->mii_readreg = wm_gmii_hv_readreg;
6414 mii->mii_writereg = wm_gmii_hv_writereg;
6415 break;
6416 case PCI_PRODUCT_INTEL_PCH_D_DM:
6417 case PCI_PRODUCT_INTEL_PCH_D_DC:
6418 /* 82578 */
6419 sc->sc_phytype = WMPHY_82578;
6420 mii->mii_readreg = wm_gmii_hv_readreg;
6421 mii->mii_writereg = wm_gmii_hv_writereg;
6422 break;
6423 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6424 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6425 /* 82579 */
6426 sc->sc_phytype = WMPHY_82579;
6427 mii->mii_readreg = wm_gmii_hv_readreg;
6428 mii->mii_writereg = wm_gmii_hv_writereg;
6429 break;
6430 case PCI_PRODUCT_INTEL_82801I_BM:
6431 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6432 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6433 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6434 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6435 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6436 /* 82567 */
6437 sc->sc_phytype = WMPHY_BM;
6438 mii->mii_readreg = wm_gmii_bm_readreg;
6439 mii->mii_writereg = wm_gmii_bm_writereg;
6440 break;
6441 default:
6442 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6443 mii->mii_readreg = wm_sgmii_readreg;
6444 mii->mii_writereg = wm_sgmii_writereg;
6445 } else if (sc->sc_type >= WM_T_80003) {
6446 mii->mii_readreg = wm_gmii_i80003_readreg;
6447 mii->mii_writereg = wm_gmii_i80003_writereg;
6448 } else if (sc->sc_type >= WM_T_I210) {
6449 mii->mii_readreg = wm_gmii_i82544_readreg;
6450 mii->mii_writereg = wm_gmii_i82544_writereg;
6451 } else if (sc->sc_type >= WM_T_82580) {
6452 sc->sc_phytype = WMPHY_82580;
6453 mii->mii_readreg = wm_gmii_82580_readreg;
6454 mii->mii_writereg = wm_gmii_82580_writereg;
6455 } else if (sc->sc_type >= WM_T_82544) {
6456 mii->mii_readreg = wm_gmii_i82544_readreg;
6457 mii->mii_writereg = wm_gmii_i82544_writereg;
6458 } else {
6459 mii->mii_readreg = wm_gmii_i82543_readreg;
6460 mii->mii_writereg = wm_gmii_i82543_writereg;
6461 }
6462 break;
6463 }
6464 mii->mii_statchg = wm_gmii_statchg;
6465
6466 wm_gmii_reset(sc);
6467
6468 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6469 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6470 wm_gmii_mediastatus);
6471
6472 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6473 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6474 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6475 || (sc->sc_type == WM_T_I211)) {
6476 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6477 /* Attach only one port */
6478 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6479 MII_OFFSET_ANY, MIIF_DOPAUSE);
6480 } else {
6481 int i;
6482 uint32_t ctrl_ext;
6483
6484 /* Power on sgmii phy if it is disabled */
6485 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6486 CSR_WRITE(sc, WMREG_CTRL_EXT,
6487 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6488 CSR_WRITE_FLUSH(sc);
6489 delay(300*1000); /* XXX too long */
6490
6491 /* from 1 to 8 */
6492 for (i = 1; i < 8; i++)
6493 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6494 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6495
6496 /* restore previous sfp cage power state */
6497 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6498 }
6499 } else {
6500 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6501 MII_OFFSET_ANY, MIIF_DOPAUSE);
6502 }
6503
6504 /*
6505 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6506 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6507 */
6508 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6509 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6510 wm_set_mdio_slow_mode_hv(sc);
6511 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6512 MII_OFFSET_ANY, MIIF_DOPAUSE);
6513 }
6514
6515 /*
6516 * (For ICH8 variants)
6517 * If PHY detection failed, use BM's r/w function and retry.
6518 */
6519 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6520 /* if failed, retry with *_bm_* */
6521 mii->mii_readreg = wm_gmii_bm_readreg;
6522 mii->mii_writereg = wm_gmii_bm_writereg;
6523
6524 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6525 MII_OFFSET_ANY, MIIF_DOPAUSE);
6526 }
6527
6528 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6529 /* Any PHY wasn't find */
6530 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6531 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6532 sc->sc_phytype = WMPHY_NONE;
6533 } else {
6534 /*
6535 * PHY Found!
6536 * Check PHY type.
6537 */
6538 uint32_t model;
6539 struct mii_softc *child;
6540
6541 child = LIST_FIRST(&mii->mii_phys);
6542 if (device_is_a(child->mii_dev, "igphy")) {
6543 struct igphy_softc *isc = (struct igphy_softc *)child;
6544
6545 model = isc->sc_mii.mii_mpd_model;
6546 if (model == MII_MODEL_yyINTEL_I82566)
6547 sc->sc_phytype = WMPHY_IGP_3;
6548 }
6549
6550 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6551 }
6552 }
6553
6554 /*
6555 * wm_gmii_mediastatus: [ifmedia interface function]
6556 *
6557 * Get the current interface media status on a 1000BASE-T device.
6558 */
6559 static void
6560 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6561 {
6562 struct wm_softc *sc = ifp->if_softc;
6563
6564 ether_mediastatus(ifp, ifmr);
6565 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6566 | sc->sc_flowflags;
6567 }
6568
6569 /*
6570 * wm_gmii_mediachange: [ifmedia interface function]
6571 *
6572 * Set hardware to newly-selected media on a 1000BASE-T device.
6573 */
6574 static int
6575 wm_gmii_mediachange(struct ifnet *ifp)
6576 {
6577 struct wm_softc *sc = ifp->if_softc;
6578 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6579 int rc;
6580
6581 if ((ifp->if_flags & IFF_UP) == 0)
6582 return 0;
6583
6584 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6585 sc->sc_ctrl |= CTRL_SLU;
6586 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6587 || (sc->sc_type > WM_T_82543)) {
6588 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6589 } else {
6590 sc->sc_ctrl &= ~CTRL_ASDE;
6591 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6592 if (ife->ifm_media & IFM_FDX)
6593 sc->sc_ctrl |= CTRL_FD;
6594 switch (IFM_SUBTYPE(ife->ifm_media)) {
6595 case IFM_10_T:
6596 sc->sc_ctrl |= CTRL_SPEED_10;
6597 break;
6598 case IFM_100_TX:
6599 sc->sc_ctrl |= CTRL_SPEED_100;
6600 break;
6601 case IFM_1000_T:
6602 sc->sc_ctrl |= CTRL_SPEED_1000;
6603 break;
6604 default:
6605 panic("wm_gmii_mediachange: bad media 0x%x",
6606 ife->ifm_media);
6607 }
6608 }
6609 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6610 if (sc->sc_type <= WM_T_82543)
6611 wm_gmii_reset(sc);
6612
6613 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6614 return 0;
6615 return rc;
6616 }
6617
6618 #define MDI_IO CTRL_SWDPIN(2)
6619 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6620 #define MDI_CLK CTRL_SWDPIN(3)
6621
6622 static void
6623 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6624 {
6625 uint32_t i, v;
6626
6627 v = CSR_READ(sc, WMREG_CTRL);
6628 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6629 v |= MDI_DIR | CTRL_SWDPIO(3);
6630
6631 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6632 if (data & i)
6633 v |= MDI_IO;
6634 else
6635 v &= ~MDI_IO;
6636 CSR_WRITE(sc, WMREG_CTRL, v);
6637 delay(10);
6638 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6639 delay(10);
6640 CSR_WRITE(sc, WMREG_CTRL, v);
6641 delay(10);
6642 }
6643 }
6644
6645 static uint32_t
6646 i82543_mii_recvbits(struct wm_softc *sc)
6647 {
6648 uint32_t v, i, data = 0;
6649
6650 v = CSR_READ(sc, WMREG_CTRL);
6651 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6652 v |= CTRL_SWDPIO(3);
6653
6654 CSR_WRITE(sc, WMREG_CTRL, v);
6655 delay(10);
6656 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6657 delay(10);
6658 CSR_WRITE(sc, WMREG_CTRL, v);
6659 delay(10);
6660
6661 for (i = 0; i < 16; i++) {
6662 data <<= 1;
6663 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6664 delay(10);
6665 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6666 data |= 1;
6667 CSR_WRITE(sc, WMREG_CTRL, v);
6668 delay(10);
6669 }
6670
6671 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6672 delay(10);
6673 CSR_WRITE(sc, WMREG_CTRL, v);
6674 delay(10);
6675
6676 return data;
6677 }
6678
6679 #undef MDI_IO
6680 #undef MDI_DIR
6681 #undef MDI_CLK
6682
6683 /*
6684 * wm_gmii_i82543_readreg: [mii interface function]
6685 *
6686 * Read a PHY register on the GMII (i82543 version).
6687 */
6688 static int
6689 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6690 {
6691 struct wm_softc *sc = device_private(self);
6692 int rv;
6693
6694 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6695 i82543_mii_sendbits(sc, reg | (phy << 5) |
6696 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6697 rv = i82543_mii_recvbits(sc) & 0xffff;
6698
6699 DPRINTF(WM_DEBUG_GMII,
6700 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6701 device_xname(sc->sc_dev), phy, reg, rv));
6702
6703 return rv;
6704 }
6705
6706 /*
6707 * wm_gmii_i82543_writereg: [mii interface function]
6708 *
6709 * Write a PHY register on the GMII (i82543 version).
6710 */
6711 static void
6712 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6713 {
6714 struct wm_softc *sc = device_private(self);
6715
6716 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6717 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6718 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6719 (MII_COMMAND_START << 30), 32);
6720 }
6721
6722 /*
6723 * wm_gmii_i82544_readreg: [mii interface function]
6724 *
6725 * Read a PHY register on the GMII.
6726 */
6727 static int
6728 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6729 {
6730 struct wm_softc *sc = device_private(self);
6731 uint32_t mdic = 0;
6732 int i, rv;
6733
6734 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6735 MDIC_REGADD(reg));
6736
6737 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6738 mdic = CSR_READ(sc, WMREG_MDIC);
6739 if (mdic & MDIC_READY)
6740 break;
6741 delay(50);
6742 }
6743
6744 if ((mdic & MDIC_READY) == 0) {
6745 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6746 device_xname(sc->sc_dev), phy, reg);
6747 rv = 0;
6748 } else if (mdic & MDIC_E) {
6749 #if 0 /* This is normal if no PHY is present. */
6750 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6751 device_xname(sc->sc_dev), phy, reg);
6752 #endif
6753 rv = 0;
6754 } else {
6755 rv = MDIC_DATA(mdic);
6756 if (rv == 0xffff)
6757 rv = 0;
6758 }
6759
6760 return rv;
6761 }
6762
6763 /*
6764 * wm_gmii_i82544_writereg: [mii interface function]
6765 *
6766 * Write a PHY register on the GMII.
6767 */
6768 static void
6769 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6770 {
6771 struct wm_softc *sc = device_private(self);
6772 uint32_t mdic = 0;
6773 int i;
6774
6775 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6776 MDIC_REGADD(reg) | MDIC_DATA(val));
6777
6778 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6779 mdic = CSR_READ(sc, WMREG_MDIC);
6780 if (mdic & MDIC_READY)
6781 break;
6782 delay(50);
6783 }
6784
6785 if ((mdic & MDIC_READY) == 0)
6786 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6787 device_xname(sc->sc_dev), phy, reg);
6788 else if (mdic & MDIC_E)
6789 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6790 device_xname(sc->sc_dev), phy, reg);
6791 }
6792
6793 /*
6794 * wm_gmii_i80003_readreg: [mii interface function]
6795 *
6796 * Read a PHY register on the kumeran
6797 * This could be handled by the PHY layer if we didn't have to lock the
6798 * ressource ...
6799 */
6800 static int
6801 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6802 {
6803 struct wm_softc *sc = device_private(self);
6804 int sem;
6805 int rv;
6806
6807 if (phy != 1) /* only one PHY on kumeran bus */
6808 return 0;
6809
6810 sem = swfwphysem[sc->sc_funcid];
6811 if (wm_get_swfw_semaphore(sc, sem)) {
6812 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6813 __func__);
6814 return 0;
6815 }
6816
6817 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6818 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6819 reg >> GG82563_PAGE_SHIFT);
6820 } else {
6821 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6822 reg >> GG82563_PAGE_SHIFT);
6823 }
6824 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6825 delay(200);
6826 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6827 delay(200);
6828
6829 wm_put_swfw_semaphore(sc, sem);
6830 return rv;
6831 }
6832
6833 /*
6834 * wm_gmii_i80003_writereg: [mii interface function]
6835 *
6836 * Write a PHY register on the kumeran.
6837 * This could be handled by the PHY layer if we didn't have to lock the
6838 * ressource ...
6839 */
6840 static void
6841 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6842 {
6843 struct wm_softc *sc = device_private(self);
6844 int sem;
6845
6846 if (phy != 1) /* only one PHY on kumeran bus */
6847 return;
6848
6849 sem = swfwphysem[sc->sc_funcid];
6850 if (wm_get_swfw_semaphore(sc, sem)) {
6851 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6852 __func__);
6853 return;
6854 }
6855
6856 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6857 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6858 reg >> GG82563_PAGE_SHIFT);
6859 } else {
6860 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6861 reg >> GG82563_PAGE_SHIFT);
6862 }
6863 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6864 delay(200);
6865 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6866 delay(200);
6867
6868 wm_put_swfw_semaphore(sc, sem);
6869 }
6870
6871 /*
6872 * wm_gmii_bm_readreg: [mii interface function]
6873 *
6874 * Read a PHY register on the kumeran
6875 * This could be handled by the PHY layer if we didn't have to lock the
6876 * ressource ...
6877 */
6878 static int
6879 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6880 {
6881 struct wm_softc *sc = device_private(self);
6882 int sem;
6883 int rv;
6884
6885 sem = swfwphysem[sc->sc_funcid];
6886 if (wm_get_swfw_semaphore(sc, sem)) {
6887 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6888 __func__);
6889 return 0;
6890 }
6891
6892 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6893 if (phy == 1)
6894 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6895 reg);
6896 else
6897 wm_gmii_i82544_writereg(self, phy,
6898 GG82563_PHY_PAGE_SELECT,
6899 reg >> GG82563_PAGE_SHIFT);
6900 }
6901
6902 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6903 wm_put_swfw_semaphore(sc, sem);
6904 return rv;
6905 }
6906
6907 /*
6908 * wm_gmii_bm_writereg: [mii interface function]
6909 *
6910 * Write a PHY register on the kumeran.
6911 * This could be handled by the PHY layer if we didn't have to lock the
6912 * ressource ...
6913 */
6914 static void
6915 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6916 {
6917 struct wm_softc *sc = device_private(self);
6918 int sem;
6919
6920 sem = swfwphysem[sc->sc_funcid];
6921 if (wm_get_swfw_semaphore(sc, sem)) {
6922 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6923 __func__);
6924 return;
6925 }
6926
6927 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6928 if (phy == 1)
6929 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6930 reg);
6931 else
6932 wm_gmii_i82544_writereg(self, phy,
6933 GG82563_PHY_PAGE_SELECT,
6934 reg >> GG82563_PAGE_SHIFT);
6935 }
6936
6937 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6938 wm_put_swfw_semaphore(sc, sem);
6939 }
6940
6941 static void
6942 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6943 {
6944 struct wm_softc *sc = device_private(self);
6945 uint16_t regnum = BM_PHY_REG_NUM(offset);
6946 uint16_t wuce;
6947
6948 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6949 if (sc->sc_type == WM_T_PCH) {
6950 /* XXX e1000 driver do nothing... why? */
6951 }
6952
6953 /* Set page 769 */
6954 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6955 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6956
6957 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6958
6959 wuce &= ~BM_WUC_HOST_WU_BIT;
6960 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6961 wuce | BM_WUC_ENABLE_BIT);
6962
6963 /* Select page 800 */
6964 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6965 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6966
6967 /* Write page 800 */
6968 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6969
6970 if (rd)
6971 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6972 else
6973 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6974
6975 /* Set page 769 */
6976 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6977 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6978
6979 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6980 }
6981
6982 /*
6983 * wm_gmii_hv_readreg: [mii interface function]
6984 *
6985 * Read a PHY register on the kumeran
6986 * This could be handled by the PHY layer if we didn't have to lock the
6987 * ressource ...
6988 */
6989 static int
6990 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6991 {
6992 struct wm_softc *sc = device_private(self);
6993 uint16_t page = BM_PHY_REG_PAGE(reg);
6994 uint16_t regnum = BM_PHY_REG_NUM(reg);
6995 uint16_t val;
6996 int rv;
6997
6998 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6999 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7000 __func__);
7001 return 0;
7002 }
7003
7004 /* XXX Workaround failure in MDIO access while cable is disconnected */
7005 if (sc->sc_phytype == WMPHY_82577) {
7006 /* XXX must write */
7007 }
7008
7009 /* Page 800 works differently than the rest so it has its own func */
7010 if (page == BM_WUC_PAGE) {
7011 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7012 return val;
7013 }
7014
7015 /*
7016 * Lower than page 768 works differently than the rest so it has its
7017 * own func
7018 */
7019 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7020 printf("gmii_hv_readreg!!!\n");
7021 return 0;
7022 }
7023
7024 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7025 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7026 page << BME1000_PAGE_SHIFT);
7027 }
7028
7029 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7030 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7031 return rv;
7032 }
7033
7034 /*
7035 * wm_gmii_hv_writereg: [mii interface function]
7036 *
7037 * Write a PHY register on the kumeran.
7038 * This could be handled by the PHY layer if we didn't have to lock the
7039 * ressource ...
7040 */
7041 static void
7042 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7043 {
7044 struct wm_softc *sc = device_private(self);
7045 uint16_t page = BM_PHY_REG_PAGE(reg);
7046 uint16_t regnum = BM_PHY_REG_NUM(reg);
7047
7048 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
7049 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7050 __func__);
7051 return;
7052 }
7053
7054 /* XXX Workaround failure in MDIO access while cable is disconnected */
7055
7056 /* Page 800 works differently than the rest so it has its own func */
7057 if (page == BM_WUC_PAGE) {
7058 uint16_t tmp;
7059
7060 tmp = val;
7061 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7062 return;
7063 }
7064
7065 /*
7066 * Lower than page 768 works differently than the rest so it has its
7067 * own func
7068 */
7069 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7070 printf("gmii_hv_writereg!!!\n");
7071 return;
7072 }
7073
7074 /*
7075 * XXX Workaround MDIO accesses being disabled after entering IEEE
7076 * Power Down (whenever bit 11 of the PHY control register is set)
7077 */
7078
7079 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7080 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7081 page << BME1000_PAGE_SHIFT);
7082 }
7083
7084 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7085 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7086 }
7087
7088 /*
7089 * wm_sgmii_readreg: [mii interface function]
7090 *
7091 * Read a PHY register on the SGMII
7092 * This could be handled by the PHY layer if we didn't have to lock the
7093 * ressource ...
7094 */
7095 static int
7096 wm_sgmii_readreg(device_t self, int phy, int reg)
7097 {
7098 struct wm_softc *sc = device_private(self);
7099 uint32_t i2ccmd;
7100 int i, rv;
7101
7102 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7103 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7104 __func__);
7105 return 0;
7106 }
7107
7108 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7109 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7110 | I2CCMD_OPCODE_READ;
7111 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7112
7113 /* Poll the ready bit */
7114 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7115 delay(50);
7116 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7117 if (i2ccmd & I2CCMD_READY)
7118 break;
7119 }
7120 if ((i2ccmd & I2CCMD_READY) == 0)
7121 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7122 if ((i2ccmd & I2CCMD_ERROR) != 0)
7123 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7124
7125 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7126
7127 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7128 return rv;
7129 }
7130
7131 /*
7132 * wm_sgmii_writereg: [mii interface function]
7133 *
7134 * Write a PHY register on the SGMII.
7135 * This could be handled by the PHY layer if we didn't have to lock the
7136 * ressource ...
7137 */
7138 static void
7139 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7140 {
7141 struct wm_softc *sc = device_private(self);
7142 uint32_t i2ccmd;
7143 int i;
7144
7145 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7146 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7147 __func__);
7148 return;
7149 }
7150
7151 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7152 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7153 | I2CCMD_OPCODE_WRITE;
7154 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7155
7156 /* Poll the ready bit */
7157 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7158 delay(50);
7159 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7160 if (i2ccmd & I2CCMD_READY)
7161 break;
7162 }
7163 if ((i2ccmd & I2CCMD_READY) == 0)
7164 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7165 if ((i2ccmd & I2CCMD_ERROR) != 0)
7166 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7167
7168 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7169 }
7170
7171 /*
7172 * wm_gmii_82580_readreg: [mii interface function]
7173 *
7174 * Read a PHY register on the 82580 and I350.
7175 * This could be handled by the PHY layer if we didn't have to lock the
7176 * ressource ...
7177 */
7178 static int
7179 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7180 {
7181 struct wm_softc *sc = device_private(self);
7182 int sem;
7183 int rv;
7184
7185 sem = swfwphysem[sc->sc_funcid];
7186 if (wm_get_swfw_semaphore(sc, sem)) {
7187 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7188 __func__);
7189 return 0;
7190 }
7191
7192 rv = wm_gmii_i82544_readreg(self, phy, reg);
7193
7194 wm_put_swfw_semaphore(sc, sem);
7195 return rv;
7196 }
7197
7198 /*
7199 * wm_gmii_82580_writereg: [mii interface function]
7200 *
7201 * Write a PHY register on the 82580 and I350.
7202 * This could be handled by the PHY layer if we didn't have to lock the
7203 * ressource ...
7204 */
7205 static void
7206 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7207 {
7208 struct wm_softc *sc = device_private(self);
7209 int sem;
7210
7211 sem = swfwphysem[sc->sc_funcid];
7212 if (wm_get_swfw_semaphore(sc, sem)) {
7213 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7214 __func__);
7215 return;
7216 }
7217
7218 wm_gmii_i82544_writereg(self, phy, reg, val);
7219
7220 wm_put_swfw_semaphore(sc, sem);
7221 }
7222
7223 /*
7224 * wm_gmii_statchg: [mii interface function]
7225 *
7226 * Callback from MII layer when media changes.
7227 */
7228 static void
7229 wm_gmii_statchg(struct ifnet *ifp)
7230 {
7231 struct wm_softc *sc = ifp->if_softc;
7232 struct mii_data *mii = &sc->sc_mii;
7233
7234 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7235 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7236 sc->sc_fcrtl &= ~FCRTL_XONE;
7237
7238 /*
7239 * Get flow control negotiation result.
7240 */
7241 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7242 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7243 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7244 mii->mii_media_active &= ~IFM_ETH_FMASK;
7245 }
7246
7247 if (sc->sc_flowflags & IFM_FLOW) {
7248 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7249 sc->sc_ctrl |= CTRL_TFCE;
7250 sc->sc_fcrtl |= FCRTL_XONE;
7251 }
7252 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7253 sc->sc_ctrl |= CTRL_RFCE;
7254 }
7255
7256 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7257 DPRINTF(WM_DEBUG_LINK,
7258 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7259 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7260 } else {
7261 DPRINTF(WM_DEBUG_LINK,
7262 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7263 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7264 }
7265
7266 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7267 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7268 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7269 : WMREG_FCRTL, sc->sc_fcrtl);
7270 if (sc->sc_type == WM_T_80003) {
7271 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7272 case IFM_1000_T:
7273 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7274 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7275 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7276 break;
7277 default:
7278 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7279 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7280 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7281 break;
7282 }
7283 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7284 }
7285 }
7286
7287 /*
7288 * wm_kmrn_readreg:
7289 *
7290 * Read a kumeran register
7291 */
7292 static int
7293 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7294 {
7295 int rv;
7296
7297 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7298 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7299 aprint_error_dev(sc->sc_dev,
7300 "%s: failed to get semaphore\n", __func__);
7301 return 0;
7302 }
7303 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7304 if (wm_get_swfwhw_semaphore(sc)) {
7305 aprint_error_dev(sc->sc_dev,
7306 "%s: failed to get semaphore\n", __func__);
7307 return 0;
7308 }
7309 }
7310
7311 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7312 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7313 KUMCTRLSTA_REN);
7314 delay(2);
7315
7316 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7317
7318 if (sc->sc_flags == WM_F_SWFW_SYNC)
7319 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7320 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7321 wm_put_swfwhw_semaphore(sc);
7322
7323 return rv;
7324 }
7325
7326 /*
7327 * wm_kmrn_writereg:
7328 *
7329 * Write a kumeran register
7330 */
7331 static void
7332 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7333 {
7334
7335 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7336 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7337 aprint_error_dev(sc->sc_dev,
7338 "%s: failed to get semaphore\n", __func__);
7339 return;
7340 }
7341 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7342 if (wm_get_swfwhw_semaphore(sc)) {
7343 aprint_error_dev(sc->sc_dev,
7344 "%s: failed to get semaphore\n", __func__);
7345 return;
7346 }
7347 }
7348
7349 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7350 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7351 (val & KUMCTRLSTA_MASK));
7352
7353 if (sc->sc_flags == WM_F_SWFW_SYNC)
7354 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7355 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7356 wm_put_swfwhw_semaphore(sc);
7357 }
7358
7359 static int
7360 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7361 {
7362 uint32_t eecd = 0;
7363
7364 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7365 || sc->sc_type == WM_T_82583) {
7366 eecd = CSR_READ(sc, WMREG_EECD);
7367
7368 /* Isolate bits 15 & 16 */
7369 eecd = ((eecd >> 15) & 0x03);
7370
7371 /* If both bits are set, device is Flash type */
7372 if (eecd == 0x03)
7373 return 0;
7374 }
7375 return 1;
7376 }
7377
7378 static int
7379 wm_get_swsm_semaphore(struct wm_softc *sc)
7380 {
7381 int32_t timeout;
7382 uint32_t swsm;
7383
7384 /* Get the FW semaphore. */
7385 timeout = 1000 + 1; /* XXX */
7386 while (timeout) {
7387 swsm = CSR_READ(sc, WMREG_SWSM);
7388 swsm |= SWSM_SWESMBI;
7389 CSR_WRITE(sc, WMREG_SWSM, swsm);
7390 /* if we managed to set the bit we got the semaphore. */
7391 swsm = CSR_READ(sc, WMREG_SWSM);
7392 if (swsm & SWSM_SWESMBI)
7393 break;
7394
7395 delay(50);
7396 timeout--;
7397 }
7398
7399 if (timeout == 0) {
7400 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7401 /* Release semaphores */
7402 wm_put_swsm_semaphore(sc);
7403 return 1;
7404 }
7405 return 0;
7406 }
7407
7408 static void
7409 wm_put_swsm_semaphore(struct wm_softc *sc)
7410 {
7411 uint32_t swsm;
7412
7413 swsm = CSR_READ(sc, WMREG_SWSM);
7414 swsm &= ~(SWSM_SWESMBI);
7415 CSR_WRITE(sc, WMREG_SWSM, swsm);
7416 }
7417
7418 static int
7419 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7420 {
7421 uint32_t swfw_sync;
7422 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7423 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7424 int timeout = 200;
7425
7426 for (timeout = 0; timeout < 200; timeout++) {
7427 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7428 if (wm_get_swsm_semaphore(sc)) {
7429 aprint_error_dev(sc->sc_dev,
7430 "%s: failed to get semaphore\n",
7431 __func__);
7432 return 1;
7433 }
7434 }
7435 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7436 if ((swfw_sync & (swmask | fwmask)) == 0) {
7437 swfw_sync |= swmask;
7438 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7439 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7440 wm_put_swsm_semaphore(sc);
7441 return 0;
7442 }
7443 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7444 wm_put_swsm_semaphore(sc);
7445 delay(5000);
7446 }
7447 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7448 device_xname(sc->sc_dev), mask, swfw_sync);
7449 return 1;
7450 }
7451
7452 static void
7453 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7454 {
7455 uint32_t swfw_sync;
7456
7457 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7458 while (wm_get_swsm_semaphore(sc) != 0)
7459 continue;
7460 }
7461 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7462 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7463 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7464 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7465 wm_put_swsm_semaphore(sc);
7466 }
7467
7468 static int
7469 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7470 {
7471 uint32_t ext_ctrl;
7472 int timeout = 200;
7473
7474 for (timeout = 0; timeout < 200; timeout++) {
7475 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7476 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7477 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7478
7479 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7480 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7481 return 0;
7482 delay(5000);
7483 }
7484 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7485 device_xname(sc->sc_dev), ext_ctrl);
7486 return 1;
7487 }
7488
7489 static void
7490 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7491 {
7492 uint32_t ext_ctrl;
7493 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7494 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7495 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7496 }
7497
7498 static int
7499 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7500 {
7501 uint32_t eecd;
7502 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7503 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7504 uint8_t sig_byte = 0;
7505
7506 switch (sc->sc_type) {
7507 case WM_T_ICH8:
7508 case WM_T_ICH9:
7509 eecd = CSR_READ(sc, WMREG_EECD);
7510 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7511 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7512 return 0;
7513 }
7514 /* FALLTHROUGH */
7515 default:
7516 /* Default to 0 */
7517 *bank = 0;
7518
7519 /* Check bank 0 */
7520 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7521 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7522 *bank = 0;
7523 return 0;
7524 }
7525
7526 /* Check bank 1 */
7527 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7528 &sig_byte);
7529 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7530 *bank = 1;
7531 return 0;
7532 }
7533 }
7534
7535 aprint_error_dev(sc->sc_dev, "EEPROM not present\n");
7536 return -1;
7537 }
7538
7539 /******************************************************************************
7540 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7541 * register.
7542 *
7543 * sc - Struct containing variables accessed by shared code
7544 * offset - offset of word in the EEPROM to read
7545 * data - word read from the EEPROM
7546 * words - number of words to read
7547 *****************************************************************************/
7548 static int
7549 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7550 {
7551 int32_t error = 0;
7552 uint32_t flash_bank = 0;
7553 uint32_t act_offset = 0;
7554 uint32_t bank_offset = 0;
7555 uint16_t word = 0;
7556 uint16_t i = 0;
7557
7558 /* We need to know which is the valid flash bank. In the event
7559 * that we didn't allocate eeprom_shadow_ram, we may not be
7560 * managing flash_bank. So it cannot be trusted and needs
7561 * to be updated with each read.
7562 */
7563 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7564 if (error) {
7565 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7566 __func__);
7567 return error;
7568 }
7569
7570 /*
7571 * Adjust offset appropriately if we're on bank 1 - adjust for word
7572 * size
7573 */
7574 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7575
7576 error = wm_get_swfwhw_semaphore(sc);
7577 if (error) {
7578 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7579 __func__);
7580 return error;
7581 }
7582
7583 for (i = 0; i < words; i++) {
7584 /* The NVM part needs a byte offset, hence * 2 */
7585 act_offset = bank_offset + ((offset + i) * 2);
7586 error = wm_read_ich8_word(sc, act_offset, &word);
7587 if (error) {
7588 aprint_error_dev(sc->sc_dev,
7589 "%s: failed to read NVM\n", __func__);
7590 break;
7591 }
7592 data[i] = word;
7593 }
7594
7595 wm_put_swfwhw_semaphore(sc);
7596 return error;
7597 }
7598
7599 /******************************************************************************
7600 * This function does initial flash setup so that a new read/write/erase cycle
7601 * can be started.
7602 *
7603 * sc - The pointer to the hw structure
7604 ****************************************************************************/
7605 static int32_t
7606 wm_ich8_cycle_init(struct wm_softc *sc)
7607 {
7608 uint16_t hsfsts;
7609 int32_t error = 1;
7610 int32_t i = 0;
7611
7612 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7613
7614 /* May be check the Flash Des Valid bit in Hw status */
7615 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7616 return error;
7617 }
7618
7619 /* Clear FCERR in Hw status by writing 1 */
7620 /* Clear DAEL in Hw status by writing a 1 */
7621 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7622
7623 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7624
7625 /*
7626 * Either we should have a hardware SPI cycle in progress bit to check
7627 * against, in order to start a new cycle or FDONE bit should be
7628 * changed in the hardware so that it is 1 after harware reset, which
7629 * can then be used as an indication whether a cycle is in progress or
7630 * has been completed .. we should also have some software semaphore
7631 * mechanism to guard FDONE or the cycle in progress bit so that two
7632 * threads access to those bits can be sequentiallized or a way so that
7633 * 2 threads dont start the cycle at the same time
7634 */
7635
7636 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7637 /*
7638 * There is no cycle running at present, so we can start a
7639 * cycle
7640 */
7641
7642 /* Begin by setting Flash Cycle Done. */
7643 hsfsts |= HSFSTS_DONE;
7644 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7645 error = 0;
7646 } else {
7647 /*
7648 * otherwise poll for sometime so the current cycle has a
7649 * chance to end before giving up.
7650 */
7651 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7652 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7653 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7654 error = 0;
7655 break;
7656 }
7657 delay(1);
7658 }
7659 if (error == 0) {
7660 /*
7661 * Successful in waiting for previous cycle to timeout,
7662 * now set the Flash Cycle Done.
7663 */
7664 hsfsts |= HSFSTS_DONE;
7665 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7666 }
7667 }
7668 return error;
7669 }
7670
7671 /******************************************************************************
7672 * This function starts a flash cycle and waits for its completion
7673 *
7674 * sc - The pointer to the hw structure
7675 ****************************************************************************/
7676 static int32_t
7677 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7678 {
7679 uint16_t hsflctl;
7680 uint16_t hsfsts;
7681 int32_t error = 1;
7682 uint32_t i = 0;
7683
7684 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7685 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7686 hsflctl |= HSFCTL_GO;
7687 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7688
7689 /* wait till FDONE bit is set to 1 */
7690 do {
7691 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7692 if (hsfsts & HSFSTS_DONE)
7693 break;
7694 delay(1);
7695 i++;
7696 } while (i < timeout);
7697 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7698 error = 0;
7699
7700 return error;
7701 }
7702
7703 /******************************************************************************
7704 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7705 *
7706 * sc - The pointer to the hw structure
7707 * index - The index of the byte or word to read.
7708 * size - Size of data to read, 1=byte 2=word
7709 * data - Pointer to the word to store the value read.
7710 *****************************************************************************/
7711 static int32_t
7712 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7713 uint32_t size, uint16_t* data)
7714 {
7715 uint16_t hsfsts;
7716 uint16_t hsflctl;
7717 uint32_t flash_linear_address;
7718 uint32_t flash_data = 0;
7719 int32_t error = 1;
7720 int32_t count = 0;
7721
7722 if (size < 1 || size > 2 || data == 0x0 ||
7723 index > ICH_FLASH_LINEAR_ADDR_MASK)
7724 return error;
7725
7726 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7727 sc->sc_ich8_flash_base;
7728
7729 do {
7730 delay(1);
7731 /* Steps */
7732 error = wm_ich8_cycle_init(sc);
7733 if (error)
7734 break;
7735
7736 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7737 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7738 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7739 & HSFCTL_BCOUNT_MASK;
7740 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7741 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7742
7743 /*
7744 * Write the last 24 bits of index into Flash Linear address
7745 * field in Flash Address
7746 */
7747 /* TODO: TBD maybe check the index against the size of flash */
7748
7749 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7750
7751 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7752
7753 /*
7754 * Check if FCERR is set to 1, if set to 1, clear it and try
7755 * the whole sequence a few more times, else read in (shift in)
7756 * the Flash Data0, the order is least significant byte first
7757 * msb to lsb
7758 */
7759 if (error == 0) {
7760 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7761 if (size == 1)
7762 *data = (uint8_t)(flash_data & 0x000000FF);
7763 else if (size == 2)
7764 *data = (uint16_t)(flash_data & 0x0000FFFF);
7765 break;
7766 } else {
7767 /*
7768 * If we've gotten here, then things are probably
7769 * completely hosed, but if the error condition is
7770 * detected, it won't hurt to give it another try...
7771 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7772 */
7773 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7774 if (hsfsts & HSFSTS_ERR) {
7775 /* Repeat for some time before giving up. */
7776 continue;
7777 } else if ((hsfsts & HSFSTS_DONE) == 0)
7778 break;
7779 }
7780 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7781
7782 return error;
7783 }
7784
7785 /******************************************************************************
7786 * Reads a single byte from the NVM using the ICH8 flash access registers.
7787 *
7788 * sc - pointer to wm_hw structure
7789 * index - The index of the byte to read.
7790 * data - Pointer to a byte to store the value read.
7791 *****************************************************************************/
7792 static int32_t
7793 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7794 {
7795 int32_t status;
7796 uint16_t word = 0;
7797
7798 status = wm_read_ich8_data(sc, index, 1, &word);
7799 if (status == 0)
7800 *data = (uint8_t)word;
7801 else
7802 *data = 0;
7803
7804 return status;
7805 }
7806
7807 /******************************************************************************
7808 * Reads a word from the NVM using the ICH8 flash access registers.
7809 *
7810 * sc - pointer to wm_hw structure
7811 * index - The starting byte index of the word to read.
7812 * data - Pointer to a word to store the value read.
7813 *****************************************************************************/
7814 static int32_t
7815 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7816 {
7817 int32_t status;
7818
7819 status = wm_read_ich8_data(sc, index, 2, data);
7820 return status;
7821 }
7822
7823 static int
7824 wm_check_mng_mode(struct wm_softc *sc)
7825 {
7826 int rv;
7827
7828 switch (sc->sc_type) {
7829 case WM_T_ICH8:
7830 case WM_T_ICH9:
7831 case WM_T_ICH10:
7832 case WM_T_PCH:
7833 case WM_T_PCH2:
7834 case WM_T_PCH_LPT:
7835 rv = wm_check_mng_mode_ich8lan(sc);
7836 break;
7837 case WM_T_82574:
7838 case WM_T_82583:
7839 rv = wm_check_mng_mode_82574(sc);
7840 break;
7841 case WM_T_82571:
7842 case WM_T_82572:
7843 case WM_T_82573:
7844 case WM_T_80003:
7845 rv = wm_check_mng_mode_generic(sc);
7846 break;
7847 default:
7848 /* noting to do */
7849 rv = 0;
7850 break;
7851 }
7852
7853 return rv;
7854 }
7855
7856 static int
7857 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7858 {
7859 uint32_t fwsm;
7860
7861 fwsm = CSR_READ(sc, WMREG_FWSM);
7862
7863 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7864 return 1;
7865
7866 return 0;
7867 }
7868
7869 static int
7870 wm_check_mng_mode_82574(struct wm_softc *sc)
7871 {
7872 uint16_t data;
7873
7874 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7875
7876 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7877 return 1;
7878
7879 return 0;
7880 }
7881
7882 static int
7883 wm_check_mng_mode_generic(struct wm_softc *sc)
7884 {
7885 uint32_t fwsm;
7886
7887 fwsm = CSR_READ(sc, WMREG_FWSM);
7888
7889 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7890 return 1;
7891
7892 return 0;
7893 }
7894
7895 static int
7896 wm_enable_mng_pass_thru(struct wm_softc *sc)
7897 {
7898 uint32_t manc, fwsm, factps;
7899
7900 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7901 return 0;
7902
7903 manc = CSR_READ(sc, WMREG_MANC);
7904
7905 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7906 device_xname(sc->sc_dev), manc));
7907 if (((manc & MANC_RECV_TCO_EN) == 0)
7908 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7909 return 0;
7910
7911 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7912 fwsm = CSR_READ(sc, WMREG_FWSM);
7913 factps = CSR_READ(sc, WMREG_FACTPS);
7914 if (((factps & FACTPS_MNGCG) == 0)
7915 && ((fwsm & FWSM_MODE_MASK)
7916 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7917 return 1;
7918 } else if (((manc & MANC_SMBUS_EN) != 0)
7919 && ((manc & MANC_ASF_EN) == 0))
7920 return 1;
7921
7922 return 0;
7923 }
7924
7925 static int
7926 wm_check_reset_block(struct wm_softc *sc)
7927 {
7928 uint32_t reg;
7929
7930 switch (sc->sc_type) {
7931 case WM_T_ICH8:
7932 case WM_T_ICH9:
7933 case WM_T_ICH10:
7934 case WM_T_PCH:
7935 case WM_T_PCH2:
7936 case WM_T_PCH_LPT:
7937 reg = CSR_READ(sc, WMREG_FWSM);
7938 if ((reg & FWSM_RSPCIPHY) != 0)
7939 return 0;
7940 else
7941 return -1;
7942 break;
7943 case WM_T_82571:
7944 case WM_T_82572:
7945 case WM_T_82573:
7946 case WM_T_82574:
7947 case WM_T_82583:
7948 case WM_T_80003:
7949 reg = CSR_READ(sc, WMREG_MANC);
7950 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7951 return -1;
7952 else
7953 return 0;
7954 break;
7955 default:
7956 /* no problem */
7957 break;
7958 }
7959
7960 return 0;
7961 }
7962
7963 static void
7964 wm_get_hw_control(struct wm_softc *sc)
7965 {
7966 uint32_t reg;
7967
7968 switch (sc->sc_type) {
7969 case WM_T_82573:
7970 reg = CSR_READ(sc, WMREG_SWSM);
7971 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7972 break;
7973 case WM_T_82571:
7974 case WM_T_82572:
7975 case WM_T_82574:
7976 case WM_T_82583:
7977 case WM_T_80003:
7978 case WM_T_ICH8:
7979 case WM_T_ICH9:
7980 case WM_T_ICH10:
7981 case WM_T_PCH:
7982 case WM_T_PCH2:
7983 case WM_T_PCH_LPT:
7984 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7985 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7986 break;
7987 default:
7988 break;
7989 }
7990 }
7991
7992 static void
7993 wm_release_hw_control(struct wm_softc *sc)
7994 {
7995 uint32_t reg;
7996
7997 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7998 return;
7999
8000 if (sc->sc_type == WM_T_82573) {
8001 reg = CSR_READ(sc, WMREG_SWSM);
8002 reg &= ~SWSM_DRV_LOAD;
8003 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8004 } else {
8005 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8006 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8007 }
8008 }
8009
8010 /* XXX Currently TBI only */
8011 static int
8012 wm_check_for_link(struct wm_softc *sc)
8013 {
8014 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8015 uint32_t rxcw;
8016 uint32_t ctrl;
8017 uint32_t status;
8018 uint32_t sig;
8019
8020 rxcw = CSR_READ(sc, WMREG_RXCW);
8021 ctrl = CSR_READ(sc, WMREG_CTRL);
8022 status = CSR_READ(sc, WMREG_STATUS);
8023
8024 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8025
8026 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8027 device_xname(sc->sc_dev), __func__,
8028 ((ctrl & CTRL_SWDPIN(1)) == sig),
8029 ((status & STATUS_LU) != 0),
8030 ((rxcw & RXCW_C) != 0)
8031 ));
8032
8033 /*
8034 * SWDPIN LU RXCW
8035 * 0 0 0
8036 * 0 0 1 (should not happen)
8037 * 0 1 0 (should not happen)
8038 * 0 1 1 (should not happen)
8039 * 1 0 0 Disable autonego and force linkup
8040 * 1 0 1 got /C/ but not linkup yet
8041 * 1 1 0 (linkup)
8042 * 1 1 1 If IFM_AUTO, back to autonego
8043 *
8044 */
8045 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8046 && ((status & STATUS_LU) == 0)
8047 && ((rxcw & RXCW_C) == 0)) {
8048 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8049 __func__));
8050 sc->sc_tbi_linkup = 0;
8051 /* Disable auto-negotiation in the TXCW register */
8052 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8053
8054 /*
8055 * Force link-up and also force full-duplex.
8056 *
8057 * NOTE: CTRL was updated TFCE and RFCE automatically,
8058 * so we should update sc->sc_ctrl
8059 */
8060 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8061 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8062 } else if (((status & STATUS_LU) != 0)
8063 && ((rxcw & RXCW_C) != 0)
8064 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8065 sc->sc_tbi_linkup = 1;
8066 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8067 __func__));
8068 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8069 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8070 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8071 && ((rxcw & RXCW_C) != 0)) {
8072 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8073 } else {
8074 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8075 status));
8076 }
8077
8078 return 0;
8079 }
8080
8081 /* Work-around for 82566 Kumeran PCS lock loss */
8082 static void
8083 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8084 {
8085 int miistatus, active, i;
8086 int reg;
8087
8088 miistatus = sc->sc_mii.mii_media_status;
8089
8090 /* If the link is not up, do nothing */
8091 if ((miistatus & IFM_ACTIVE) != 0)
8092 return;
8093
8094 active = sc->sc_mii.mii_media_active;
8095
8096 /* Nothing to do if the link is other than 1Gbps */
8097 if (IFM_SUBTYPE(active) != IFM_1000_T)
8098 return;
8099
8100 for (i = 0; i < 10; i++) {
8101 /* read twice */
8102 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8103 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8104 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8105 goto out; /* GOOD! */
8106
8107 /* Reset the PHY */
8108 wm_gmii_reset(sc);
8109 delay(5*1000);
8110 }
8111
8112 /* Disable GigE link negotiation */
8113 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8114 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8115 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8116
8117 /*
8118 * Call gig speed drop workaround on Gig disable before accessing
8119 * any PHY registers.
8120 */
8121 wm_gig_downshift_workaround_ich8lan(sc);
8122
8123 out:
8124 return;
8125 }
8126
8127 /* WOL from S5 stops working */
8128 static void
8129 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8130 {
8131 uint16_t kmrn_reg;
8132
8133 /* Only for igp3 */
8134 if (sc->sc_phytype == WMPHY_IGP_3) {
8135 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8136 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8137 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8138 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8139 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8140 }
8141 }
8142
8143 #ifdef WM_WOL
8144 /* Power down workaround on D3 */
8145 static void
8146 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8147 {
8148 uint32_t reg;
8149 int i;
8150
8151 for (i = 0; i < 2; i++) {
8152 /* Disable link */
8153 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8154 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8155 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8156
8157 /*
8158 * Call gig speed drop workaround on Gig disable before
8159 * accessing any PHY registers
8160 */
8161 if (sc->sc_type == WM_T_ICH8)
8162 wm_gig_downshift_workaround_ich8lan(sc);
8163
8164 /* Write VR power-down enable */
8165 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8166 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8167 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8168 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8169
8170 /* Read it back and test */
8171 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8172 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8173 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8174 break;
8175
8176 /* Issue PHY reset and repeat at most one more time */
8177 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8178 }
8179 }
8180 #endif /* WM_WOL */
8181
8182 /*
8183 * Workaround for pch's PHYs
8184 * XXX should be moved to new PHY driver?
8185 */
8186 static void
8187 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8188 {
8189 if (sc->sc_phytype == WMPHY_82577)
8190 wm_set_mdio_slow_mode_hv(sc);
8191
8192 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8193
8194 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8195
8196 /* 82578 */
8197 if (sc->sc_phytype == WMPHY_82578) {
8198 /* PCH rev. < 3 */
8199 if (sc->sc_rev < 3) {
8200 /* XXX 6 bit shift? Why? Is it page2? */
8201 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8202 0x66c0);
8203 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8204 0xffff);
8205 }
8206
8207 /* XXX phy rev. < 2 */
8208 }
8209
8210 /* Select page 0 */
8211
8212 /* XXX acquire semaphore */
8213 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8214 /* XXX release semaphore */
8215
8216 /*
8217 * Configure the K1 Si workaround during phy reset assuming there is
8218 * link so that it disables K1 if link is in 1Gbps.
8219 */
8220 wm_k1_gig_workaround_hv(sc, 1);
8221 }
8222
8223 static void
8224 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8225 {
8226
8227 wm_set_mdio_slow_mode_hv(sc);
8228 }
8229
8230 static void
8231 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8232 {
8233 int k1_enable = sc->sc_nvm_k1_enabled;
8234
8235 /* XXX acquire semaphore */
8236
8237 if (link) {
8238 k1_enable = 0;
8239
8240 /* Link stall fix for link up */
8241 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8242 } else {
8243 /* Link stall fix for link down */
8244 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8245 }
8246
8247 wm_configure_k1_ich8lan(sc, k1_enable);
8248
8249 /* XXX release semaphore */
8250 }
8251
8252 static void
8253 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8254 {
8255 uint32_t reg;
8256
8257 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8258 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8259 reg | HV_KMRN_MDIO_SLOW);
8260 }
8261
8262 static void
8263 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8264 {
8265 uint32_t ctrl, ctrl_ext, tmp;
8266 uint16_t kmrn_reg;
8267
8268 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8269
8270 if (k1_enable)
8271 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8272 else
8273 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8274
8275 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8276
8277 delay(20);
8278
8279 ctrl = CSR_READ(sc, WMREG_CTRL);
8280 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8281
8282 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8283 tmp |= CTRL_FRCSPD;
8284
8285 CSR_WRITE(sc, WMREG_CTRL, tmp);
8286 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8287 delay(20);
8288
8289 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8290 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8291 delay(20);
8292 }
8293
8294 static void
8295 wm_smbustopci(struct wm_softc *sc)
8296 {
8297 uint32_t fwsm;
8298
8299 fwsm = CSR_READ(sc, WMREG_FWSM);
8300 if (((fwsm & FWSM_FW_VALID) == 0)
8301 && ((wm_check_reset_block(sc) == 0))) {
8302 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8303 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8304 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8305 delay(10);
8306 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8307 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8308 delay(50*1000);
8309
8310 /*
8311 * Gate automatic PHY configuration by hardware on non-managed
8312 * 82579
8313 */
8314 if (sc->sc_type == WM_T_PCH2)
8315 wm_gate_hw_phy_config_ich8lan(sc, 1);
8316 }
8317 }
8318
8319 static void
8320 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8321 {
8322 uint32_t gcr;
8323 pcireg_t ctrl2;
8324
8325 gcr = CSR_READ(sc, WMREG_GCR);
8326
8327 /* Only take action if timeout value is defaulted to 0 */
8328 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8329 goto out;
8330
8331 if ((gcr & GCR_CAP_VER2) == 0) {
8332 gcr |= GCR_CMPL_TMOUT_10MS;
8333 goto out;
8334 }
8335
8336 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8337 sc->sc_pcixe_capoff + PCIE_DCSR2);
8338 ctrl2 |= WM_PCIE_DCSR2_16MS;
8339 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8340 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8341
8342 out:
8343 /* Disable completion timeout resend */
8344 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8345
8346 CSR_WRITE(sc, WMREG_GCR, gcr);
8347 }
8348
8349 /* special case - for 82575 - need to do manual init ... */
8350 static void
8351 wm_reset_init_script_82575(struct wm_softc *sc)
8352 {
8353 /*
8354 * remark: this is untested code - we have no board without EEPROM
8355 * same setup as mentioned int the freeBSD driver for the i82575
8356 */
8357
8358 /* SerDes configuration via SERDESCTRL */
8359 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8360 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8361 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8362 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8363
8364 /* CCM configuration via CCMCTL register */
8365 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8366 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8367
8368 /* PCIe lanes configuration */
8369 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8370 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8371 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8372 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8373
8374 /* PCIe PLL Configuration */
8375 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8376 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8377 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8378 }
8379
8380 static void
8381 wm_init_manageability(struct wm_softc *sc)
8382 {
8383
8384 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8385 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8386 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8387
8388 /* disabl hardware interception of ARP */
8389 manc &= ~MANC_ARP_EN;
8390
8391 /* enable receiving management packets to the host */
8392 if (sc->sc_type >= WM_T_82571) {
8393 manc |= MANC_EN_MNG2HOST;
8394 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8395 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8396
8397 }
8398
8399 CSR_WRITE(sc, WMREG_MANC, manc);
8400 }
8401 }
8402
8403 static void
8404 wm_release_manageability(struct wm_softc *sc)
8405 {
8406
8407 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8408 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8409
8410 if (sc->sc_type >= WM_T_82571)
8411 manc &= ~MANC_EN_MNG2HOST;
8412
8413 CSR_WRITE(sc, WMREG_MANC, manc);
8414 }
8415 }
8416
8417 static void
8418 wm_get_wakeup(struct wm_softc *sc)
8419 {
8420
8421 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8422 switch (sc->sc_type) {
8423 case WM_T_82573:
8424 case WM_T_82583:
8425 sc->sc_flags |= WM_F_HAS_AMT;
8426 /* FALLTHROUGH */
8427 case WM_T_80003:
8428 case WM_T_82541:
8429 case WM_T_82547:
8430 case WM_T_82571:
8431 case WM_T_82572:
8432 case WM_T_82574:
8433 case WM_T_82575:
8434 case WM_T_82576:
8435 #if 0 /* XXX */
8436 case WM_T_82580:
8437 case WM_T_82580ER:
8438 case WM_T_I350:
8439 #endif
8440 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8441 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8442 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8443 break;
8444 case WM_T_ICH8:
8445 case WM_T_ICH9:
8446 case WM_T_ICH10:
8447 case WM_T_PCH:
8448 case WM_T_PCH2:
8449 case WM_T_PCH_LPT:
8450 sc->sc_flags |= WM_F_HAS_AMT;
8451 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8452 break;
8453 default:
8454 break;
8455 }
8456
8457 /* 1: HAS_MANAGE */
8458 if (wm_enable_mng_pass_thru(sc) != 0)
8459 sc->sc_flags |= WM_F_HAS_MANAGE;
8460
8461 #ifdef WM_DEBUG
8462 printf("\n");
8463 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8464 printf("HAS_AMT,");
8465 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8466 printf("ARC_SUBSYS_VALID,");
8467 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8468 printf("ASF_FIRMWARE_PRES,");
8469 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8470 printf("HAS_MANAGE,");
8471 printf("\n");
8472 #endif
8473 /*
8474 * Note that the WOL flags is set after the resetting of the eeprom
8475 * stuff
8476 */
8477 }
8478
8479 #ifdef WM_WOL
8480 /* WOL in the newer chipset interfaces (pchlan) */
8481 static void
8482 wm_enable_phy_wakeup(struct wm_softc *sc)
8483 {
8484 #if 0
8485 uint16_t preg;
8486
8487 /* Copy MAC RARs to PHY RARs */
8488
8489 /* Copy MAC MTA to PHY MTA */
8490
8491 /* Configure PHY Rx Control register */
8492
8493 /* Enable PHY wakeup in MAC register */
8494
8495 /* Configure and enable PHY wakeup in PHY registers */
8496
8497 /* Activate PHY wakeup */
8498
8499 /* XXX */
8500 #endif
8501 }
8502
8503 static void
8504 wm_enable_wakeup(struct wm_softc *sc)
8505 {
8506 uint32_t reg, pmreg;
8507 pcireg_t pmode;
8508
8509 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8510 &pmreg, NULL) == 0)
8511 return;
8512
8513 /* Advertise the wakeup capability */
8514 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8515 | CTRL_SWDPIN(3));
8516 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8517
8518 /* ICH workaround */
8519 switch (sc->sc_type) {
8520 case WM_T_ICH8:
8521 case WM_T_ICH9:
8522 case WM_T_ICH10:
8523 case WM_T_PCH:
8524 case WM_T_PCH2:
8525 case WM_T_PCH_LPT:
8526 /* Disable gig during WOL */
8527 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8528 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8529 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8530 if (sc->sc_type == WM_T_PCH)
8531 wm_gmii_reset(sc);
8532
8533 /* Power down workaround */
8534 if (sc->sc_phytype == WMPHY_82577) {
8535 struct mii_softc *child;
8536
8537 /* Assume that the PHY is copper */
8538 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8539 if (child->mii_mpd_rev <= 2)
8540 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8541 (768 << 5) | 25, 0x0444); /* magic num */
8542 }
8543 break;
8544 default:
8545 break;
8546 }
8547
8548 /* Keep the laser running on fiber adapters */
8549 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8550 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8551 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8552 reg |= CTRL_EXT_SWDPIN(3);
8553 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8554 }
8555
8556 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8557 #if 0 /* for the multicast packet */
8558 reg |= WUFC_MC;
8559 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8560 #endif
8561
8562 if (sc->sc_type == WM_T_PCH) {
8563 wm_enable_phy_wakeup(sc);
8564 } else {
8565 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8566 CSR_WRITE(sc, WMREG_WUFC, reg);
8567 }
8568
8569 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8570 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8571 || (sc->sc_type == WM_T_PCH2))
8572 && (sc->sc_phytype == WMPHY_IGP_3))
8573 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8574
8575 /* Request PME */
8576 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8577 #if 0
8578 /* Disable WOL */
8579 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8580 #else
8581 /* For WOL */
8582 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8583 #endif
8584 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8585 }
8586 #endif /* WM_WOL */
8587
8588 static bool
8589 wm_suspend(device_t self, const pmf_qual_t *qual)
8590 {
8591 struct wm_softc *sc = device_private(self);
8592
8593 wm_release_manageability(sc);
8594 wm_release_hw_control(sc);
8595 #ifdef WM_WOL
8596 wm_enable_wakeup(sc);
8597 #endif
8598
8599 return true;
8600 }
8601
8602 static bool
8603 wm_resume(device_t self, const pmf_qual_t *qual)
8604 {
8605 struct wm_softc *sc = device_private(self);
8606
8607 wm_init_manageability(sc);
8608
8609 return true;
8610 }
8611
8612 static void
8613 wm_set_eee_i350(struct wm_softc * sc)
8614 {
8615 uint32_t ipcnfg, eeer;
8616
8617 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8618 eeer = CSR_READ(sc, WMREG_EEER);
8619
8620 if ((sc->sc_flags & WM_F_EEE) != 0) {
8621 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8622 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8623 | EEER_LPI_FC);
8624 } else {
8625 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8626 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8627 | EEER_LPI_FC);
8628 }
8629
8630 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8631 CSR_WRITE(sc, WMREG_EEER, eeer);
8632 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8633 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8634 }
8635