if_wm.c revision 1.245 1 /* $NetBSD: if_wm.c,v 1.245 2013/02/13 16:58:04 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.245 2013/02/13 16:58:04 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 /*
145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time.
150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames.
154 */
155 #define WM_NTXSEGS 256
156 #define WM_IFQUEUELEN 256
157 #define WM_TXQUEUELEN_MAX 64
158 #define WM_TXQUEUELEN_MAX_82547 16
159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162 #define WM_NTXDESC_82542 256
163 #define WM_NTXDESC_82544 4096
164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169
170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171
172 /*
173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */
178 #define WM_NRXDESC 256
179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182
183 /*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things
186 * easier.
187 */
188 struct wm_control_data_82544 {
189 /*
190 * The receive descriptors.
191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193
194 /*
195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them.
197 */
198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u;
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257
258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */
260
261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak;
275
276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */
278
279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled;
283
284 /*
285 * Software state for the transmit and receive descriptors.
286 */
287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290
291 /*
292 * Control data structures.
293 */
294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define sc_rxdescs sc_control_data->wcd_rxdescs
304
305 #ifdef WM_EVENT_COUNTERS
306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326
327 struct evcnt sc_ev_tu; /* Tx underrun */
328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337
338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */
340
341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */
344
345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard;
356 int sc_rxlen;
357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp;
360
361 uint32_t sc_ctrl; /* prototype CTRL register */
362 #if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364 #endif
365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */
373
374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 krndsource_t rnd_source; /* random source */
383 };
384
385 #define WM_RXCHAIN_RESET(sc) \
386 do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \
390 } while (/*CONSTCOND*/0)
391
392 #define WM_RXCHAIN_LINK(sc, m) \
393 do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \
396 } while (/*CONSTCOND*/0)
397
398 #ifdef WM_EVENT_COUNTERS
399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401 #else
402 #define WM_EVCNT_INCR(ev) /* nothing */
403 #define WM_EVCNT_ADD(ev, val) /* nothing */
404 #endif
405
406 #define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS)
412
413 #define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422
423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425
426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435
436 #define WM_CDTXSYNC(sc, x, n, ops) \
437 do { \
438 int __x, __n; \
439 \
440 __x = (x); \
441 __n = (n); \
442 \
443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \
450 } \
451 \
452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455 } while (/*CONSTCOND*/0)
456
457 #define WM_CDRXSYNC(sc, x, ops) \
458 do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_INIT_RXDESC(sc, x) \
464 do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \
469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \
473 * \
474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \
482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495 } while (/*CONSTCOND*/0)
496
497 static void wm_start(struct ifnet *);
498 static void wm_nq_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ifflags_cb(struct ethercom *);
501 static int wm_ioctl(struct ifnet *, u_long, void *);
502 static int wm_init(struct ifnet *);
503 static void wm_stop(struct ifnet *, int);
504 static bool wm_suspend(device_t, const pmf_qual_t *);
505 static bool wm_resume(device_t, const pmf_qual_t *);
506
507 static void wm_reset(struct wm_softc *);
508 static void wm_rxdrain(struct wm_softc *);
509 static int wm_add_rxbuf(struct wm_softc *, int);
510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int wm_validate_eeprom_checksum(struct wm_softc *);
513 static int wm_check_alt_mac_addr(struct wm_softc *);
514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void wm_tick(void *);
516
517 static void wm_set_filter(struct wm_softc *);
518 static void wm_set_vlan(struct wm_softc *);
519
520 static int wm_intr(void *);
521 static void wm_txintr(struct wm_softc *);
522 static void wm_rxintr(struct wm_softc *);
523 static void wm_linkintr(struct wm_softc *, uint32_t);
524
525 static void wm_tbi_mediainit(struct wm_softc *);
526 static int wm_tbi_mediachange(struct ifnet *);
527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528
529 static void wm_tbi_set_linkled(struct wm_softc *);
530 static void wm_tbi_check_link(struct wm_softc *);
531
532 static void wm_gmii_reset(struct wm_softc *);
533
534 static int wm_gmii_i82543_readreg(device_t, int, int);
535 static void wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int wm_gmii_i80003_readreg(device_t, int, int);
539 static void wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int wm_gmii_bm_readreg(device_t, int, int);
541 static void wm_gmii_bm_writereg(device_t, int, int, int);
542 static int wm_gmii_hv_readreg(device_t, int, int);
543 static void wm_gmii_hv_writereg(device_t, int, int, int);
544 static int wm_gmii_82580_readreg(device_t, int, int);
545 static void wm_gmii_82580_writereg(device_t, int, int, int);
546 static int wm_sgmii_readreg(device_t, int, int);
547 static void wm_sgmii_writereg(device_t, int, int, int);
548
549 static void wm_gmii_statchg(struct ifnet *);
550
551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int wm_gmii_mediachange(struct ifnet *);
553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554
555 static int wm_kmrn_readreg(struct wm_softc *, int);
556 static void wm_kmrn_writereg(struct wm_softc *, int, int);
557
558 static void wm_set_spiaddrbits(struct wm_softc *);
559 static int wm_match(device_t, cfdata_t, void *);
560 static void wm_attach(device_t, device_t, void *);
561 static int wm_detach(device_t, int);
562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void wm_get_auto_rd_done(struct wm_softc *);
564 static void wm_lan_init_done(struct wm_softc *);
565 static void wm_get_cfg_done(struct wm_softc *);
566 static int wm_get_swsm_semaphore(struct wm_softc *);
567 static void wm_put_swsm_semaphore(struct wm_softc *);
568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void wm_put_swfwhw_semaphore(struct wm_softc *);
573
574 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
578 uint32_t, uint16_t *);
579 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void wm_82547_txfifo_stall(void *);
582 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int wm_check_mng_mode(struct wm_softc *);
584 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int wm_check_mng_mode_82574(struct wm_softc *);
586 static int wm_check_mng_mode_generic(struct wm_softc *);
587 static int wm_enable_mng_pass_thru(struct wm_softc *);
588 static int wm_check_reset_block(struct wm_softc *);
589 static void wm_get_hw_control(struct wm_softc *);
590 static int wm_check_for_link(struct wm_softc *);
591 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void wm_smbustopci(struct wm_softc *);
602 static void wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void wm_reset_init_script_82575(struct wm_softc *);
604 static void wm_release_manageability(struct wm_softc *);
605 static void wm_release_hw_control(struct wm_softc *);
606 static void wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void wm_enable_phy_wakeup(struct wm_softc *);
609 static void wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void wm_init_manageability(struct wm_softc *);
612 static void wm_set_eee_i350(struct wm_softc *);
613
614 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
615 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
616
617 /*
618 * Devices supported by this driver.
619 */
620 static const struct wm_product {
621 pci_vendor_id_t wmp_vendor;
622 pci_product_id_t wmp_product;
623 const char *wmp_name;
624 wm_chip_type wmp_type;
625 int wmp_flags;
626 #define WMP_F_1000X 0x01
627 #define WMP_F_1000T 0x02
628 #define WMP_F_SERDES 0x04
629 } wm_products[] = {
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
631 "Intel i82542 1000BASE-X Ethernet",
632 WM_T_82542_2_1, WMP_F_1000X },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
635 "Intel i82543GC 1000BASE-X Ethernet",
636 WM_T_82543, WMP_F_1000X },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
639 "Intel i82543GC 1000BASE-T Ethernet",
640 WM_T_82543, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
643 "Intel i82544EI 1000BASE-T Ethernet",
644 WM_T_82544, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
647 "Intel i82544EI 1000BASE-X Ethernet",
648 WM_T_82544, WMP_F_1000X },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
651 "Intel i82544GC 1000BASE-T Ethernet",
652 WM_T_82544, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
655 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
659 "Intel i82540EM 1000BASE-T Ethernet",
660 WM_T_82540, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
663 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
667 "Intel i82540EP 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
679 "Intel i82545EM 1000BASE-T Ethernet",
680 WM_T_82545, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
683 "Intel i82545GM 1000BASE-T Ethernet",
684 WM_T_82545_3, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
687 "Intel i82545GM 1000BASE-X Ethernet",
688 WM_T_82545_3, WMP_F_1000X },
689 #if 0
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
691 "Intel i82545GM Gigabit Ethernet (SERDES)",
692 WM_T_82545_3, WMP_F_SERDES },
693 #endif
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
695 "Intel i82546EB 1000BASE-T Ethernet",
696 WM_T_82546, WMP_F_1000T },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
703 "Intel i82545EM 1000BASE-X Ethernet",
704 WM_T_82545, WMP_F_1000X },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
707 "Intel i82546EB 1000BASE-X Ethernet",
708 WM_T_82546, WMP_F_1000X },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
711 "Intel i82546GB 1000BASE-T Ethernet",
712 WM_T_82546_3, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
715 "Intel i82546GB 1000BASE-X Ethernet",
716 WM_T_82546_3, WMP_F_1000X },
717 #if 0
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
719 "Intel i82546GB Gigabit Ethernet (SERDES)",
720 WM_T_82546_3, WMP_F_SERDES },
721 #endif
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
723 "i82546GB quad-port Gigabit Ethernet",
724 WM_T_82546_3, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
727 "i82546GB quad-port Gigabit Ethernet (KSP3)",
728 WM_T_82546_3, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
731 "Intel PRO/1000MT (82546GB)",
732 WM_T_82546_3, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
735 "Intel i82541EI 1000BASE-T Ethernet",
736 WM_T_82541, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
739 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
743 "Intel i82541EI Mobile 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
747 "Intel i82541ER 1000BASE-T Ethernet",
748 WM_T_82541_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
751 "Intel i82541GI 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
755 "Intel i82541GI Mobile 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
759 "Intel i82541PI 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
763 "Intel i82547EI 1000BASE-T Ethernet",
764 WM_T_82547, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
767 "Intel i82547EI Mobile 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
771 "Intel i82547GI 1000BASE-T Ethernet",
772 WM_T_82547_2, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
775 "Intel PRO/1000 PT (82571EB)",
776 WM_T_82571, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
779 "Intel PRO/1000 PF (82571EB)",
780 WM_T_82571, WMP_F_1000X },
781 #if 0
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
783 "Intel PRO/1000 PB (82571EB)",
784 WM_T_82571, WMP_F_SERDES },
785 #endif
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
787 "Intel PRO/1000 QT (82571EB)",
788 WM_T_82571, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
791 "Intel i82572EI 1000baseT Ethernet",
792 WM_T_82572, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
795 "Intel PRO/1000 PT Quad Port Server Adapter",
796 WM_T_82571, WMP_F_1000T, },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
799 "Intel i82572EI 1000baseX Ethernet",
800 WM_T_82572, WMP_F_1000X },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
803 "Intel i82572EI Gigabit Ethernet (SERDES)",
804 WM_T_82572, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
808 "Intel i82572EI 1000baseT Ethernet",
809 WM_T_82572, WMP_F_1000T },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
812 "Intel i82573E",
813 WM_T_82573, WMP_F_1000T },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
816 "Intel i82573E IAMT",
817 WM_T_82573, WMP_F_1000T },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
820 "Intel i82573L Gigabit Ethernet",
821 WM_T_82573, WMP_F_1000T },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
824 "Intel i82574L",
825 WM_T_82574, WMP_F_1000T },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
828 "Intel i82583V",
829 WM_T_82583, WMP_F_1000T },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
832 "i80003 dual 1000baseT Ethernet",
833 WM_T_80003, WMP_F_1000T },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
836 "i80003 dual 1000baseX Ethernet",
837 WM_T_80003, WMP_F_1000T },
838 #if 0
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
840 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
841 WM_T_80003, WMP_F_SERDES },
842 #endif
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
845 "Intel i80003 1000baseT Ethernet",
846 WM_T_80003, WMP_F_1000T },
847 #if 0
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
849 "Intel i80003 Gigabit Ethernet (SERDES)",
850 WM_T_80003, WMP_F_SERDES },
851 #endif
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
853 "Intel i82801H (M_AMT) LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
856 "Intel i82801H (AMT) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
859 "Intel i82801H LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
862 "Intel i82801H (IFE) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
865 "Intel i82801H (M) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
868 "Intel i82801H IFE (GT) LAN Controller",
869 WM_T_ICH8, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
871 "Intel i82801H IFE (G) LAN Controller",
872 WM_T_ICH8, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
874 "82801I (AMT) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
877 "82801I LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
880 "82801I (G) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
883 "82801I (GT) LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
886 "82801I (C) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
889 "82801I mobile LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
892 "82801I mobile (V) LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
895 "82801I mobile (AMT) LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
898 "82567LM-4 LAN Controller",
899 WM_T_ICH9, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
901 "82567V-3 LAN Controller",
902 WM_T_ICH9, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
904 "82567LM-2 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
907 "82567LF-2 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
910 "82567LM-3 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
913 "82567LF-3 LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
916 "82567V-2 LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
919 "82567V-3? LAN Controller",
920 WM_T_ICH10, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
922 "HANKSVILLE LAN Controller",
923 WM_T_ICH10, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
925 "PCH LAN (82577LM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
928 "PCH LAN (82577LC) Controller",
929 WM_T_PCH, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
931 "PCH LAN (82578DM) Controller",
932 WM_T_PCH, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
934 "PCH LAN (82578DC) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
937 "PCH2 LAN (82579LM) Controller",
938 WM_T_PCH2, WMP_F_1000T },
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
940 "PCH2 LAN (82579V) Controller",
941 WM_T_PCH2, WMP_F_1000T },
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
943 "82575EB dual-1000baseT Ethernet",
944 WM_T_82575, WMP_F_1000T },
945 #if 0
946 /*
947 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
948 * disabled for now ...
949 */
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
951 "82575EB dual-1000baseX Ethernet (SERDES)",
952 WM_T_82575, WMP_F_SERDES },
953 #endif
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
955 "82575GB quad-1000baseT Ethernet",
956 WM_T_82575, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
958 "82575GB quad-1000baseT Ethernet (PM)",
959 WM_T_82575, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
961 "82576 1000BaseT Ethernet",
962 WM_T_82576, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
964 "82576 1000BaseX Ethernet",
965 WM_T_82576, WMP_F_1000X },
966 #if 0
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
968 "82576 gigabit Ethernet (SERDES)",
969 WM_T_82576, WMP_F_SERDES },
970 #endif
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
972 "82576 quad-1000BaseT Ethernet",
973 WM_T_82576, WMP_F_1000T },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
975 "82576 gigabit Ethernet",
976 WM_T_82576, WMP_F_1000T },
977 #if 0
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
979 "82576 gigabit Ethernet (SERDES)",
980 WM_T_82576, WMP_F_SERDES },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
982 "82576 quad-gigabit Ethernet (SERDES)",
983 WM_T_82576, WMP_F_SERDES },
984 #endif
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
986 "82580 1000BaseT Ethernet",
987 WM_T_82580, WMP_F_1000T },
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
989 "82580 1000BaseX Ethernet",
990 WM_T_82580, WMP_F_1000X },
991 #if 0
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
993 "82580 1000BaseT Ethernet (SERDES)",
994 WM_T_82580, WMP_F_SERDES },
995 #endif
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
997 "82580 gigabit Ethernet (SGMII)",
998 WM_T_82580, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1003 "82580 1000BaseT Ethernet",
1004 WM_T_82580ER, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1006 "82580 dual-1000BaseT Ethernet",
1007 WM_T_82580ER, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1009 "82580 quad-1000BaseX Ethernet",
1010 WM_T_82580, WMP_F_1000X },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1012 "I350 Gigabit Network Connection",
1013 WM_T_I350, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1015 "I350 Gigabit Fiber Network Connection",
1016 WM_T_I350, WMP_F_1000X },
1017 #if 0
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1019 "I350 Gigabit Backplane Connection",
1020 WM_T_I350, WMP_F_SERDES },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1022 "I350 Gigabit Connection",
1023 WM_T_I350, WMP_F_1000T },
1024 #endif
1025 { 0, 0,
1026 NULL,
1027 0, 0 },
1028 };
1029
1030 #ifdef WM_EVENT_COUNTERS
1031 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1032 #endif /* WM_EVENT_COUNTERS */
1033
1034 #if 0 /* Not currently used */
1035 static inline uint32_t
1036 wm_io_read(struct wm_softc *sc, int reg)
1037 {
1038
1039 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1040 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1041 }
1042 #endif
1043
1044 static inline void
1045 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1046 {
1047
1048 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1049 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1050 }
1051
1052 static inline void
1053 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1054 uint32_t data)
1055 {
1056 uint32_t regval;
1057 int i;
1058
1059 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1060
1061 CSR_WRITE(sc, reg, regval);
1062
1063 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1064 delay(5);
1065 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1066 break;
1067 }
1068 if (i == SCTL_CTL_POLL_TIMEOUT) {
1069 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1070 device_xname(sc->sc_dev), reg);
1071 }
1072 }
1073
1074 static inline void
1075 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1076 {
1077 wa->wa_low = htole32(v & 0xffffffffU);
1078 if (sizeof(bus_addr_t) == 8)
1079 wa->wa_high = htole32((uint64_t) v >> 32);
1080 else
1081 wa->wa_high = 0;
1082 }
1083
1084 static void
1085 wm_set_spiaddrbits(struct wm_softc *sc)
1086 {
1087 uint32_t reg;
1088
1089 sc->sc_flags |= WM_F_EEPROM_SPI;
1090 reg = CSR_READ(sc, WMREG_EECD);
1091 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1092 }
1093
1094 static const struct wm_product *
1095 wm_lookup(const struct pci_attach_args *pa)
1096 {
1097 const struct wm_product *wmp;
1098
1099 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1100 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1101 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1102 return wmp;
1103 }
1104 return NULL;
1105 }
1106
1107 static int
1108 wm_match(device_t parent, cfdata_t cf, void *aux)
1109 {
1110 struct pci_attach_args *pa = aux;
1111
1112 if (wm_lookup(pa) != NULL)
1113 return 1;
1114
1115 return 0;
1116 }
1117
1118 static void
1119 wm_attach(device_t parent, device_t self, void *aux)
1120 {
1121 struct wm_softc *sc = device_private(self);
1122 struct pci_attach_args *pa = aux;
1123 prop_dictionary_t dict;
1124 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1125 pci_chipset_tag_t pc = pa->pa_pc;
1126 pci_intr_handle_t ih;
1127 const char *intrstr = NULL;
1128 const char *eetype, *xname;
1129 bus_space_tag_t memt;
1130 bus_space_handle_t memh;
1131 bus_size_t memsize;
1132 int memh_valid;
1133 int i, error;
1134 const struct wm_product *wmp;
1135 prop_data_t ea;
1136 prop_number_t pn;
1137 uint8_t enaddr[ETHER_ADDR_LEN];
1138 uint16_t cfg1, cfg2, swdpin, io3;
1139 pcireg_t preg, memtype;
1140 uint16_t eeprom_data, apme_mask;
1141 uint32_t reg;
1142
1143 sc->sc_dev = self;
1144 callout_init(&sc->sc_tick_ch, 0);
1145
1146 sc->sc_wmp = wmp = wm_lookup(pa);
1147 if (wmp == NULL) {
1148 printf("\n");
1149 panic("wm_attach: impossible");
1150 }
1151
1152 sc->sc_pc = pa->pa_pc;
1153 sc->sc_pcitag = pa->pa_tag;
1154
1155 if (pci_dma64_available(pa))
1156 sc->sc_dmat = pa->pa_dmat64;
1157 else
1158 sc->sc_dmat = pa->pa_dmat;
1159
1160 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1161 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1162
1163 sc->sc_type = wmp->wmp_type;
1164 if (sc->sc_type < WM_T_82543) {
1165 if (sc->sc_rev < 2) {
1166 aprint_error_dev(sc->sc_dev,
1167 "i82542 must be at least rev. 2\n");
1168 return;
1169 }
1170 if (sc->sc_rev < 3)
1171 sc->sc_type = WM_T_82542_2_0;
1172 }
1173
1174 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1175 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1176 || (sc->sc_type == WM_T_I350))
1177 sc->sc_flags |= WM_F_NEWQUEUE;
1178
1179 /* Set device properties (mactype) */
1180 dict = device_properties(sc->sc_dev);
1181 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1182
1183 /*
1184 * Map the device. All devices support memory-mapped acccess,
1185 * and it is really required for normal operation.
1186 */
1187 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1188 switch (memtype) {
1189 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1190 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1191 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1192 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1193 break;
1194 default:
1195 memh_valid = 0;
1196 break;
1197 }
1198
1199 if (memh_valid) {
1200 sc->sc_st = memt;
1201 sc->sc_sh = memh;
1202 sc->sc_ss = memsize;
1203 } else {
1204 aprint_error_dev(sc->sc_dev,
1205 "unable to map device registers\n");
1206 return;
1207 }
1208
1209 wm_get_wakeup(sc);
1210
1211 /*
1212 * In addition, i82544 and later support I/O mapped indirect
1213 * register access. It is not desirable (nor supported in
1214 * this driver) to use it for normal operation, though it is
1215 * required to work around bugs in some chip versions.
1216 */
1217 if (sc->sc_type >= WM_T_82544) {
1218 /* First we have to find the I/O BAR. */
1219 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1220 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1221 if (memtype == PCI_MAPREG_TYPE_IO)
1222 break;
1223 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1224 PCI_MAPREG_MEM_TYPE_64BIT)
1225 i += 4; /* skip high bits, too */
1226 }
1227 if (i < PCI_MAPREG_END) {
1228 /*
1229 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1230 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1231 * It's no problem because newer chips has no this
1232 * bug.
1233 *
1234 * The i8254x doesn't apparently respond when the
1235 * I/O BAR is 0, which looks somewhat like it's not
1236 * been configured.
1237 */
1238 preg = pci_conf_read(pc, pa->pa_tag, i);
1239 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1240 aprint_error_dev(sc->sc_dev,
1241 "WARNING: I/O BAR at zero.\n");
1242 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1243 0, &sc->sc_iot, &sc->sc_ioh,
1244 NULL, &sc->sc_ios) == 0) {
1245 sc->sc_flags |= WM_F_IOH_VALID;
1246 } else {
1247 aprint_error_dev(sc->sc_dev,
1248 "WARNING: unable to map I/O space\n");
1249 }
1250 }
1251
1252 }
1253
1254 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1255 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1256 preg |= PCI_COMMAND_MASTER_ENABLE;
1257 if (sc->sc_type < WM_T_82542_2_1)
1258 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1259 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1260
1261 /* power up chip */
1262 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1263 NULL)) && error != EOPNOTSUPP) {
1264 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1265 return;
1266 }
1267
1268 /*
1269 * Map and establish our interrupt.
1270 */
1271 if (pci_intr_map(pa, &ih)) {
1272 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1273 return;
1274 }
1275 intrstr = pci_intr_string(pc, ih);
1276 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1277 if (sc->sc_ih == NULL) {
1278 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1279 if (intrstr != NULL)
1280 aprint_error(" at %s", intrstr);
1281 aprint_error("\n");
1282 return;
1283 }
1284 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1285
1286 /*
1287 * Check the function ID (unit number of the chip).
1288 */
1289 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1290 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1291 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1292 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1293 || (sc->sc_type == WM_T_I350))
1294 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1295 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1296 else
1297 sc->sc_funcid = 0;
1298
1299 /*
1300 * Determine a few things about the bus we're connected to.
1301 */
1302 if (sc->sc_type < WM_T_82543) {
1303 /* We don't really know the bus characteristics here. */
1304 sc->sc_bus_speed = 33;
1305 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1306 /*
1307 * CSA (Communication Streaming Architecture) is about as fast
1308 * a 32-bit 66MHz PCI Bus.
1309 */
1310 sc->sc_flags |= WM_F_CSA;
1311 sc->sc_bus_speed = 66;
1312 aprint_verbose_dev(sc->sc_dev,
1313 "Communication Streaming Architecture\n");
1314 if (sc->sc_type == WM_T_82547) {
1315 callout_init(&sc->sc_txfifo_ch, 0);
1316 callout_setfunc(&sc->sc_txfifo_ch,
1317 wm_82547_txfifo_stall, sc);
1318 aprint_verbose_dev(sc->sc_dev,
1319 "using 82547 Tx FIFO stall work-around\n");
1320 }
1321 } else if (sc->sc_type >= WM_T_82571) {
1322 sc->sc_flags |= WM_F_PCIE;
1323 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1324 && (sc->sc_type != WM_T_ICH10)
1325 && (sc->sc_type != WM_T_PCH)
1326 && (sc->sc_type != WM_T_PCH2)) {
1327 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1328 /* ICH* and PCH* have no PCIe capability registers */
1329 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1330 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1331 NULL) == 0)
1332 aprint_error_dev(sc->sc_dev,
1333 "unable to find PCIe capability\n");
1334 }
1335 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1336 } else {
1337 reg = CSR_READ(sc, WMREG_STATUS);
1338 if (reg & STATUS_BUS64)
1339 sc->sc_flags |= WM_F_BUS64;
1340 if ((reg & STATUS_PCIX_MODE) != 0) {
1341 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1342
1343 sc->sc_flags |= WM_F_PCIX;
1344 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1345 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1346 aprint_error_dev(sc->sc_dev,
1347 "unable to find PCIX capability\n");
1348 else if (sc->sc_type != WM_T_82545_3 &&
1349 sc->sc_type != WM_T_82546_3) {
1350 /*
1351 * Work around a problem caused by the BIOS
1352 * setting the max memory read byte count
1353 * incorrectly.
1354 */
1355 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1356 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1357 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1358 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1359
1360 bytecnt =
1361 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1362 PCI_PCIX_CMD_BYTECNT_SHIFT;
1363 maxb =
1364 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1365 PCI_PCIX_STATUS_MAXB_SHIFT;
1366 if (bytecnt > maxb) {
1367 aprint_verbose_dev(sc->sc_dev,
1368 "resetting PCI-X MMRBC: %d -> %d\n",
1369 512 << bytecnt, 512 << maxb);
1370 pcix_cmd = (pcix_cmd &
1371 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1372 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1373 pci_conf_write(pa->pa_pc, pa->pa_tag,
1374 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1375 pcix_cmd);
1376 }
1377 }
1378 }
1379 /*
1380 * The quad port adapter is special; it has a PCIX-PCIX
1381 * bridge on the board, and can run the secondary bus at
1382 * a higher speed.
1383 */
1384 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1385 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1386 : 66;
1387 } else if (sc->sc_flags & WM_F_PCIX) {
1388 switch (reg & STATUS_PCIXSPD_MASK) {
1389 case STATUS_PCIXSPD_50_66:
1390 sc->sc_bus_speed = 66;
1391 break;
1392 case STATUS_PCIXSPD_66_100:
1393 sc->sc_bus_speed = 100;
1394 break;
1395 case STATUS_PCIXSPD_100_133:
1396 sc->sc_bus_speed = 133;
1397 break;
1398 default:
1399 aprint_error_dev(sc->sc_dev,
1400 "unknown PCIXSPD %d; assuming 66MHz\n",
1401 reg & STATUS_PCIXSPD_MASK);
1402 sc->sc_bus_speed = 66;
1403 break;
1404 }
1405 } else
1406 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1407 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1408 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1409 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1410 }
1411
1412 /*
1413 * Allocate the control data structures, and create and load the
1414 * DMA map for it.
1415 *
1416 * NOTE: All Tx descriptors must be in the same 4G segment of
1417 * memory. So must Rx descriptors. We simplify by allocating
1418 * both sets within the same 4G segment.
1419 */
1420 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1421 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1422 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1423 sizeof(struct wm_control_data_82542) :
1424 sizeof(struct wm_control_data_82544);
1425 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1426 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1427 &sc->sc_cd_rseg, 0)) != 0) {
1428 aprint_error_dev(sc->sc_dev,
1429 "unable to allocate control data, error = %d\n",
1430 error);
1431 goto fail_0;
1432 }
1433
1434 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1435 sc->sc_cd_rseg, sc->sc_cd_size,
1436 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1437 aprint_error_dev(sc->sc_dev,
1438 "unable to map control data, error = %d\n", error);
1439 goto fail_1;
1440 }
1441
1442 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1443 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1444 aprint_error_dev(sc->sc_dev,
1445 "unable to create control data DMA map, error = %d\n",
1446 error);
1447 goto fail_2;
1448 }
1449
1450 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1451 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1452 aprint_error_dev(sc->sc_dev,
1453 "unable to load control data DMA map, error = %d\n",
1454 error);
1455 goto fail_3;
1456 }
1457
1458 /*
1459 * Create the transmit buffer DMA maps.
1460 */
1461 WM_TXQUEUELEN(sc) =
1462 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1463 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1464 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1465 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1466 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1467 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1468 aprint_error_dev(sc->sc_dev,
1469 "unable to create Tx DMA map %d, error = %d\n",
1470 i, error);
1471 goto fail_4;
1472 }
1473 }
1474
1475 /*
1476 * Create the receive buffer DMA maps.
1477 */
1478 for (i = 0; i < WM_NRXDESC; i++) {
1479 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1480 MCLBYTES, 0, 0,
1481 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1482 aprint_error_dev(sc->sc_dev,
1483 "unable to create Rx DMA map %d error = %d\n",
1484 i, error);
1485 goto fail_5;
1486 }
1487 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1488 }
1489
1490 /* clear interesting stat counters */
1491 CSR_READ(sc, WMREG_COLC);
1492 CSR_READ(sc, WMREG_RXERRC);
1493
1494 /* get PHY control from SMBus to PCIe */
1495 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1496 wm_smbustopci(sc);
1497
1498 /*
1499 * Reset the chip to a known state.
1500 */
1501 wm_reset(sc);
1502
1503 switch (sc->sc_type) {
1504 case WM_T_82571:
1505 case WM_T_82572:
1506 case WM_T_82573:
1507 case WM_T_82574:
1508 case WM_T_82583:
1509 case WM_T_80003:
1510 case WM_T_ICH8:
1511 case WM_T_ICH9:
1512 case WM_T_ICH10:
1513 case WM_T_PCH:
1514 case WM_T_PCH2:
1515 if (wm_check_mng_mode(sc) != 0)
1516 wm_get_hw_control(sc);
1517 break;
1518 default:
1519 break;
1520 }
1521
1522 /*
1523 * Get some information about the EEPROM.
1524 */
1525 switch (sc->sc_type) {
1526 case WM_T_82542_2_0:
1527 case WM_T_82542_2_1:
1528 case WM_T_82543:
1529 case WM_T_82544:
1530 /* Microwire */
1531 sc->sc_ee_addrbits = 6;
1532 break;
1533 case WM_T_82540:
1534 case WM_T_82545:
1535 case WM_T_82545_3:
1536 case WM_T_82546:
1537 case WM_T_82546_3:
1538 /* Microwire */
1539 reg = CSR_READ(sc, WMREG_EECD);
1540 if (reg & EECD_EE_SIZE)
1541 sc->sc_ee_addrbits = 8;
1542 else
1543 sc->sc_ee_addrbits = 6;
1544 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1545 break;
1546 case WM_T_82541:
1547 case WM_T_82541_2:
1548 case WM_T_82547:
1549 case WM_T_82547_2:
1550 reg = CSR_READ(sc, WMREG_EECD);
1551 if (reg & EECD_EE_TYPE) {
1552 /* SPI */
1553 wm_set_spiaddrbits(sc);
1554 } else
1555 /* Microwire */
1556 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1557 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1558 break;
1559 case WM_T_82571:
1560 case WM_T_82572:
1561 /* SPI */
1562 wm_set_spiaddrbits(sc);
1563 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1564 break;
1565 case WM_T_82573:
1566 case WM_T_82574:
1567 case WM_T_82583:
1568 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1569 sc->sc_flags |= WM_F_EEPROM_FLASH;
1570 else {
1571 /* SPI */
1572 wm_set_spiaddrbits(sc);
1573 }
1574 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1575 break;
1576 case WM_T_82575:
1577 case WM_T_82576:
1578 case WM_T_82580:
1579 case WM_T_82580ER:
1580 case WM_T_I350:
1581 case WM_T_80003:
1582 /* SPI */
1583 wm_set_spiaddrbits(sc);
1584 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1585 break;
1586 case WM_T_ICH8:
1587 case WM_T_ICH9:
1588 case WM_T_ICH10:
1589 case WM_T_PCH:
1590 case WM_T_PCH2:
1591 /* FLASH */
1592 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1593 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1594 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1595 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1596 aprint_error_dev(sc->sc_dev,
1597 "can't map FLASH registers\n");
1598 return;
1599 }
1600 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1601 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1602 ICH_FLASH_SECTOR_SIZE;
1603 sc->sc_ich8_flash_bank_size =
1604 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1605 sc->sc_ich8_flash_bank_size -=
1606 (reg & ICH_GFPREG_BASE_MASK);
1607 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1608 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1609 break;
1610 default:
1611 break;
1612 }
1613
1614 /*
1615 * Defer printing the EEPROM type until after verifying the checksum
1616 * This allows the EEPROM type to be printed correctly in the case
1617 * that no EEPROM is attached.
1618 */
1619 /*
1620 * Validate the EEPROM checksum. If the checksum fails, flag
1621 * this for later, so we can fail future reads from the EEPROM.
1622 */
1623 if (wm_validate_eeprom_checksum(sc)) {
1624 /*
1625 * Read twice again because some PCI-e parts fail the
1626 * first check due to the link being in sleep state.
1627 */
1628 if (wm_validate_eeprom_checksum(sc))
1629 sc->sc_flags |= WM_F_EEPROM_INVALID;
1630 }
1631
1632 /* Set device properties (macflags) */
1633 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1634
1635 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1636 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1637 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1638 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1639 } else {
1640 if (sc->sc_flags & WM_F_EEPROM_SPI)
1641 eetype = "SPI";
1642 else
1643 eetype = "MicroWire";
1644 aprint_verbose_dev(sc->sc_dev,
1645 "%u word (%d address bits) %s EEPROM\n",
1646 1U << sc->sc_ee_addrbits,
1647 sc->sc_ee_addrbits, eetype);
1648 }
1649
1650 /*
1651 * Read the Ethernet address from the EEPROM, if not first found
1652 * in device properties.
1653 */
1654 ea = prop_dictionary_get(dict, "mac-address");
1655 if (ea != NULL) {
1656 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1657 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1658 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1659 } else {
1660 if (wm_read_mac_addr(sc, enaddr) != 0) {
1661 aprint_error_dev(sc->sc_dev,
1662 "unable to read Ethernet address\n");
1663 return;
1664 }
1665 }
1666
1667 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1668 ether_sprintf(enaddr));
1669
1670 /*
1671 * Read the config info from the EEPROM, and set up various
1672 * bits in the control registers based on their contents.
1673 */
1674 pn = prop_dictionary_get(dict, "i82543-cfg1");
1675 if (pn != NULL) {
1676 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1677 cfg1 = (uint16_t) prop_number_integer_value(pn);
1678 } else {
1679 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1680 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1681 return;
1682 }
1683 }
1684
1685 pn = prop_dictionary_get(dict, "i82543-cfg2");
1686 if (pn != NULL) {
1687 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1688 cfg2 = (uint16_t) prop_number_integer_value(pn);
1689 } else {
1690 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1691 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1692 return;
1693 }
1694 }
1695
1696 /* check for WM_F_WOL */
1697 switch (sc->sc_type) {
1698 case WM_T_82542_2_0:
1699 case WM_T_82542_2_1:
1700 case WM_T_82543:
1701 /* dummy? */
1702 eeprom_data = 0;
1703 apme_mask = EEPROM_CFG3_APME;
1704 break;
1705 case WM_T_82544:
1706 apme_mask = EEPROM_CFG2_82544_APM_EN;
1707 eeprom_data = cfg2;
1708 break;
1709 case WM_T_82546:
1710 case WM_T_82546_3:
1711 case WM_T_82571:
1712 case WM_T_82572:
1713 case WM_T_82573:
1714 case WM_T_82574:
1715 case WM_T_82583:
1716 case WM_T_80003:
1717 default:
1718 apme_mask = EEPROM_CFG3_APME;
1719 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1720 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1721 break;
1722 case WM_T_82575:
1723 case WM_T_82576:
1724 case WM_T_82580:
1725 case WM_T_82580ER:
1726 case WM_T_I350:
1727 case WM_T_ICH8:
1728 case WM_T_ICH9:
1729 case WM_T_ICH10:
1730 case WM_T_PCH:
1731 case WM_T_PCH2:
1732 /* XXX The funcid should be checked on some devices */
1733 apme_mask = WUC_APME;
1734 eeprom_data = CSR_READ(sc, WMREG_WUC);
1735 break;
1736 }
1737
1738 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1739 if ((eeprom_data & apme_mask) != 0)
1740 sc->sc_flags |= WM_F_WOL;
1741 #ifdef WM_DEBUG
1742 if ((sc->sc_flags & WM_F_WOL) != 0)
1743 printf("WOL\n");
1744 #endif
1745
1746 /*
1747 * XXX need special handling for some multiple port cards
1748 * to disable a paticular port.
1749 */
1750
1751 if (sc->sc_type >= WM_T_82544) {
1752 pn = prop_dictionary_get(dict, "i82543-swdpin");
1753 if (pn != NULL) {
1754 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1755 swdpin = (uint16_t) prop_number_integer_value(pn);
1756 } else {
1757 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1758 aprint_error_dev(sc->sc_dev,
1759 "unable to read SWDPIN\n");
1760 return;
1761 }
1762 }
1763 }
1764
1765 if (cfg1 & EEPROM_CFG1_ILOS)
1766 sc->sc_ctrl |= CTRL_ILOS;
1767 if (sc->sc_type >= WM_T_82544) {
1768 sc->sc_ctrl |=
1769 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1770 CTRL_SWDPIO_SHIFT;
1771 sc->sc_ctrl |=
1772 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1773 CTRL_SWDPINS_SHIFT;
1774 } else {
1775 sc->sc_ctrl |=
1776 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1777 CTRL_SWDPIO_SHIFT;
1778 }
1779
1780 #if 0
1781 if (sc->sc_type >= WM_T_82544) {
1782 if (cfg1 & EEPROM_CFG1_IPS0)
1783 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1784 if (cfg1 & EEPROM_CFG1_IPS1)
1785 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1786 sc->sc_ctrl_ext |=
1787 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1788 CTRL_EXT_SWDPIO_SHIFT;
1789 sc->sc_ctrl_ext |=
1790 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1791 CTRL_EXT_SWDPINS_SHIFT;
1792 } else {
1793 sc->sc_ctrl_ext |=
1794 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1795 CTRL_EXT_SWDPIO_SHIFT;
1796 }
1797 #endif
1798
1799 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1800 #if 0
1801 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1802 #endif
1803
1804 /*
1805 * Set up some register offsets that are different between
1806 * the i82542 and the i82543 and later chips.
1807 */
1808 if (sc->sc_type < WM_T_82543) {
1809 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1810 sc->sc_tdt_reg = WMREG_OLD_TDT;
1811 } else {
1812 sc->sc_rdt_reg = WMREG_RDT;
1813 sc->sc_tdt_reg = WMREG_TDT;
1814 }
1815
1816 if (sc->sc_type == WM_T_PCH) {
1817 uint16_t val;
1818
1819 /* Save the NVM K1 bit setting */
1820 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1821
1822 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1823 sc->sc_nvm_k1_enabled = 1;
1824 else
1825 sc->sc_nvm_k1_enabled = 0;
1826 }
1827
1828 /*
1829 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1830 * media structures accordingly.
1831 */
1832 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1833 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1834 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1835 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1836 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1837 wm_gmii_mediainit(sc, wmp->wmp_product);
1838 } else if (sc->sc_type < WM_T_82543 ||
1839 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1840 if (wmp->wmp_flags & WMP_F_1000T)
1841 aprint_error_dev(sc->sc_dev,
1842 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1843 wm_tbi_mediainit(sc);
1844 } else {
1845 switch (sc->sc_type) {
1846 case WM_T_82575:
1847 case WM_T_82576:
1848 case WM_T_82580:
1849 case WM_T_82580ER:
1850 case WM_T_I350:
1851 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1852 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1853 case CTRL_EXT_LINK_MODE_SGMII:
1854 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1855 sc->sc_flags |= WM_F_SGMII;
1856 CSR_WRITE(sc, WMREG_CTRL_EXT,
1857 reg | CTRL_EXT_I2C_ENA);
1858 wm_gmii_mediainit(sc, wmp->wmp_product);
1859 break;
1860 case CTRL_EXT_LINK_MODE_1000KX:
1861 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1862 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1863 CSR_WRITE(sc, WMREG_CTRL_EXT,
1864 reg | CTRL_EXT_I2C_ENA);
1865 panic("not supported yet\n");
1866 break;
1867 case CTRL_EXT_LINK_MODE_GMII:
1868 default:
1869 CSR_WRITE(sc, WMREG_CTRL_EXT,
1870 reg & ~CTRL_EXT_I2C_ENA);
1871 wm_gmii_mediainit(sc, wmp->wmp_product);
1872 break;
1873 }
1874 break;
1875 default:
1876 if (wmp->wmp_flags & WMP_F_1000X)
1877 aprint_error_dev(sc->sc_dev,
1878 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1879 wm_gmii_mediainit(sc, wmp->wmp_product);
1880 }
1881 }
1882
1883 ifp = &sc->sc_ethercom.ec_if;
1884 xname = device_xname(sc->sc_dev);
1885 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1886 ifp->if_softc = sc;
1887 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1888 ifp->if_ioctl = wm_ioctl;
1889 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1890 ifp->if_start = wm_nq_start;
1891 else
1892 ifp->if_start = wm_start;
1893 ifp->if_watchdog = wm_watchdog;
1894 ifp->if_init = wm_init;
1895 ifp->if_stop = wm_stop;
1896 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1897 IFQ_SET_READY(&ifp->if_snd);
1898
1899 /* Check for jumbo frame */
1900 switch (sc->sc_type) {
1901 case WM_T_82573:
1902 /* XXX limited to 9234 if ASPM is disabled */
1903 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1904 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1905 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1906 break;
1907 case WM_T_82571:
1908 case WM_T_82572:
1909 case WM_T_82574:
1910 case WM_T_82575:
1911 case WM_T_82576:
1912 case WM_T_82580:
1913 case WM_T_82580ER:
1914 case WM_T_I350:
1915 case WM_T_80003:
1916 case WM_T_ICH9:
1917 case WM_T_ICH10:
1918 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1919 /* XXX limited to 9234 */
1920 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1921 break;
1922 case WM_T_PCH:
1923 /* XXX limited to 4096 */
1924 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1925 break;
1926 case WM_T_82542_2_0:
1927 case WM_T_82542_2_1:
1928 case WM_T_82583:
1929 case WM_T_ICH8:
1930 /* No support for jumbo frame */
1931 break;
1932 default:
1933 /* ETHER_MAX_LEN_JUMBO */
1934 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1935 break;
1936 }
1937
1938 /*
1939 * If we're a i82543 or greater, we can support VLANs.
1940 */
1941 if (sc->sc_type >= WM_T_82543)
1942 sc->sc_ethercom.ec_capabilities |=
1943 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1944
1945 /*
1946 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1947 * on i82543 and later.
1948 */
1949 if (sc->sc_type >= WM_T_82543) {
1950 ifp->if_capabilities |=
1951 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1952 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1953 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1954 IFCAP_CSUM_TCPv6_Tx |
1955 IFCAP_CSUM_UDPv6_Tx;
1956 }
1957
1958 /*
1959 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1960 *
1961 * 82541GI (8086:1076) ... no
1962 * 82572EI (8086:10b9) ... yes
1963 */
1964 if (sc->sc_type >= WM_T_82571) {
1965 ifp->if_capabilities |=
1966 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1967 }
1968
1969 /*
1970 * If we're a i82544 or greater (except i82547), we can do
1971 * TCP segmentation offload.
1972 */
1973 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1974 ifp->if_capabilities |= IFCAP_TSOv4;
1975 }
1976
1977 if (sc->sc_type >= WM_T_82571) {
1978 ifp->if_capabilities |= IFCAP_TSOv6;
1979 }
1980
1981 /*
1982 * Attach the interface.
1983 */
1984 if_attach(ifp);
1985 ether_ifattach(ifp, enaddr);
1986 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1987 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1988
1989 #ifdef WM_EVENT_COUNTERS
1990 /* Attach event counters. */
1991 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1992 NULL, xname, "txsstall");
1993 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1994 NULL, xname, "txdstall");
1995 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1996 NULL, xname, "txfifo_stall");
1997 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1998 NULL, xname, "txdw");
1999 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2000 NULL, xname, "txqe");
2001 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2002 NULL, xname, "rxintr");
2003 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2004 NULL, xname, "linkintr");
2005
2006 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2007 NULL, xname, "rxipsum");
2008 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2009 NULL, xname, "rxtusum");
2010 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2011 NULL, xname, "txipsum");
2012 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2013 NULL, xname, "txtusum");
2014 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2015 NULL, xname, "txtusum6");
2016
2017 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2018 NULL, xname, "txtso");
2019 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2020 NULL, xname, "txtso6");
2021 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2022 NULL, xname, "txtsopain");
2023
2024 for (i = 0; i < WM_NTXSEGS; i++) {
2025 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2026 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2027 NULL, xname, wm_txseg_evcnt_names[i]);
2028 }
2029
2030 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2031 NULL, xname, "txdrop");
2032
2033 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2034 NULL, xname, "tu");
2035
2036 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2037 NULL, xname, "tx_xoff");
2038 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2039 NULL, xname, "tx_xon");
2040 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2041 NULL, xname, "rx_xoff");
2042 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2043 NULL, xname, "rx_xon");
2044 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2045 NULL, xname, "rx_macctl");
2046 #endif /* WM_EVENT_COUNTERS */
2047
2048 if (pmf_device_register(self, wm_suspend, wm_resume))
2049 pmf_class_network_register(self, ifp);
2050 else
2051 aprint_error_dev(self, "couldn't establish power handler\n");
2052
2053 return;
2054
2055 /*
2056 * Free any resources we've allocated during the failed attach
2057 * attempt. Do this in reverse order and fall through.
2058 */
2059 fail_5:
2060 for (i = 0; i < WM_NRXDESC; i++) {
2061 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2062 bus_dmamap_destroy(sc->sc_dmat,
2063 sc->sc_rxsoft[i].rxs_dmamap);
2064 }
2065 fail_4:
2066 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2067 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2068 bus_dmamap_destroy(sc->sc_dmat,
2069 sc->sc_txsoft[i].txs_dmamap);
2070 }
2071 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2072 fail_3:
2073 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2074 fail_2:
2075 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2076 sc->sc_cd_size);
2077 fail_1:
2078 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2079 fail_0:
2080 return;
2081 }
2082
2083 static int
2084 wm_detach(device_t self, int flags __unused)
2085 {
2086 struct wm_softc *sc = device_private(self);
2087 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2088 int i, s;
2089
2090 s = splnet();
2091 /* Stop the interface. Callouts are stopped in it. */
2092 wm_stop(ifp, 1);
2093 splx(s);
2094
2095 pmf_device_deregister(self);
2096
2097 /* Tell the firmware about the release */
2098 wm_release_manageability(sc);
2099 wm_release_hw_control(sc);
2100
2101 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2102
2103 /* Delete all remaining media. */
2104 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2105
2106 ether_ifdetach(ifp);
2107 if_detach(ifp);
2108
2109
2110 /* Unload RX dmamaps and free mbufs */
2111 wm_rxdrain(sc);
2112
2113 /* Free dmamap. It's the same as the end of the wm_attach() function */
2114 for (i = 0; i < WM_NRXDESC; i++) {
2115 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2116 bus_dmamap_destroy(sc->sc_dmat,
2117 sc->sc_rxsoft[i].rxs_dmamap);
2118 }
2119 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2120 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2121 bus_dmamap_destroy(sc->sc_dmat,
2122 sc->sc_txsoft[i].txs_dmamap);
2123 }
2124 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2125 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2126 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2127 sc->sc_cd_size);
2128 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2129
2130 /* Disestablish the interrupt handler */
2131 if (sc->sc_ih != NULL) {
2132 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2133 sc->sc_ih = NULL;
2134 }
2135
2136 /* Unmap the registers */
2137 if (sc->sc_ss) {
2138 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2139 sc->sc_ss = 0;
2140 }
2141
2142 if (sc->sc_ios) {
2143 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2144 sc->sc_ios = 0;
2145 }
2146
2147 return 0;
2148 }
2149
2150 /*
2151 * wm_tx_offload:
2152 *
2153 * Set up TCP/IP checksumming parameters for the
2154 * specified packet.
2155 */
2156 static int
2157 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2158 uint8_t *fieldsp)
2159 {
2160 struct mbuf *m0 = txs->txs_mbuf;
2161 struct livengood_tcpip_ctxdesc *t;
2162 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2163 uint32_t ipcse;
2164 struct ether_header *eh;
2165 int offset, iphl;
2166 uint8_t fields;
2167
2168 /*
2169 * XXX It would be nice if the mbuf pkthdr had offset
2170 * fields for the protocol headers.
2171 */
2172
2173 eh = mtod(m0, struct ether_header *);
2174 switch (htons(eh->ether_type)) {
2175 case ETHERTYPE_IP:
2176 case ETHERTYPE_IPV6:
2177 offset = ETHER_HDR_LEN;
2178 break;
2179
2180 case ETHERTYPE_VLAN:
2181 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2182 break;
2183
2184 default:
2185 /*
2186 * Don't support this protocol or encapsulation.
2187 */
2188 *fieldsp = 0;
2189 *cmdp = 0;
2190 return 0;
2191 }
2192
2193 if ((m0->m_pkthdr.csum_flags &
2194 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2195 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2196 } else {
2197 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2198 }
2199 ipcse = offset + iphl - 1;
2200
2201 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2202 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2203 seg = 0;
2204 fields = 0;
2205
2206 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2207 int hlen = offset + iphl;
2208 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2209
2210 if (__predict_false(m0->m_len <
2211 (hlen + sizeof(struct tcphdr)))) {
2212 /*
2213 * TCP/IP headers are not in the first mbuf; we need
2214 * to do this the slow and painful way. Let's just
2215 * hope this doesn't happen very often.
2216 */
2217 struct tcphdr th;
2218
2219 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2220
2221 m_copydata(m0, hlen, sizeof(th), &th);
2222 if (v4) {
2223 struct ip ip;
2224
2225 m_copydata(m0, offset, sizeof(ip), &ip);
2226 ip.ip_len = 0;
2227 m_copyback(m0,
2228 offset + offsetof(struct ip, ip_len),
2229 sizeof(ip.ip_len), &ip.ip_len);
2230 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2231 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2232 } else {
2233 struct ip6_hdr ip6;
2234
2235 m_copydata(m0, offset, sizeof(ip6), &ip6);
2236 ip6.ip6_plen = 0;
2237 m_copyback(m0,
2238 offset + offsetof(struct ip6_hdr, ip6_plen),
2239 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2240 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2241 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2242 }
2243 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2244 sizeof(th.th_sum), &th.th_sum);
2245
2246 hlen += th.th_off << 2;
2247 } else {
2248 /*
2249 * TCP/IP headers are in the first mbuf; we can do
2250 * this the easy way.
2251 */
2252 struct tcphdr *th;
2253
2254 if (v4) {
2255 struct ip *ip =
2256 (void *)(mtod(m0, char *) + offset);
2257 th = (void *)(mtod(m0, char *) + hlen);
2258
2259 ip->ip_len = 0;
2260 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2261 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2262 } else {
2263 struct ip6_hdr *ip6 =
2264 (void *)(mtod(m0, char *) + offset);
2265 th = (void *)(mtod(m0, char *) + hlen);
2266
2267 ip6->ip6_plen = 0;
2268 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2269 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2270 }
2271 hlen += th->th_off << 2;
2272 }
2273
2274 if (v4) {
2275 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2276 cmdlen |= WTX_TCPIP_CMD_IP;
2277 } else {
2278 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2279 ipcse = 0;
2280 }
2281 cmd |= WTX_TCPIP_CMD_TSE;
2282 cmdlen |= WTX_TCPIP_CMD_TSE |
2283 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2284 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2285 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2286 }
2287
2288 /*
2289 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2290 * offload feature, if we load the context descriptor, we
2291 * MUST provide valid values for IPCSS and TUCSS fields.
2292 */
2293
2294 ipcs = WTX_TCPIP_IPCSS(offset) |
2295 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2296 WTX_TCPIP_IPCSE(ipcse);
2297 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2298 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2299 fields |= WTX_IXSM;
2300 }
2301
2302 offset += iphl;
2303
2304 if (m0->m_pkthdr.csum_flags &
2305 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2306 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2307 fields |= WTX_TXSM;
2308 tucs = WTX_TCPIP_TUCSS(offset) |
2309 WTX_TCPIP_TUCSO(offset +
2310 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2311 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2312 } else if ((m0->m_pkthdr.csum_flags &
2313 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2314 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2315 fields |= WTX_TXSM;
2316 tucs = WTX_TCPIP_TUCSS(offset) |
2317 WTX_TCPIP_TUCSO(offset +
2318 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2319 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2320 } else {
2321 /* Just initialize it to a valid TCP context. */
2322 tucs = WTX_TCPIP_TUCSS(offset) |
2323 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2324 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2325 }
2326
2327 /* Fill in the context descriptor. */
2328 t = (struct livengood_tcpip_ctxdesc *)
2329 &sc->sc_txdescs[sc->sc_txnext];
2330 t->tcpip_ipcs = htole32(ipcs);
2331 t->tcpip_tucs = htole32(tucs);
2332 t->tcpip_cmdlen = htole32(cmdlen);
2333 t->tcpip_seg = htole32(seg);
2334 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2335
2336 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2337 txs->txs_ndesc++;
2338
2339 *cmdp = cmd;
2340 *fieldsp = fields;
2341
2342 return 0;
2343 }
2344
2345 static void
2346 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2347 {
2348 struct mbuf *m;
2349 int i;
2350
2351 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2352 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2353 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2354 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2355 m->m_data, m->m_len, m->m_flags);
2356 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2357 i, i == 1 ? "" : "s");
2358 }
2359
2360 /*
2361 * wm_82547_txfifo_stall:
2362 *
2363 * Callout used to wait for the 82547 Tx FIFO to drain,
2364 * reset the FIFO pointers, and restart packet transmission.
2365 */
2366 static void
2367 wm_82547_txfifo_stall(void *arg)
2368 {
2369 struct wm_softc *sc = arg;
2370 int s;
2371
2372 s = splnet();
2373
2374 if (sc->sc_txfifo_stall) {
2375 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2376 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2377 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2378 /*
2379 * Packets have drained. Stop transmitter, reset
2380 * FIFO pointers, restart transmitter, and kick
2381 * the packet queue.
2382 */
2383 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2384 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2385 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2386 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2387 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2388 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2389 CSR_WRITE(sc, WMREG_TCTL, tctl);
2390 CSR_WRITE_FLUSH(sc);
2391
2392 sc->sc_txfifo_head = 0;
2393 sc->sc_txfifo_stall = 0;
2394 wm_start(&sc->sc_ethercom.ec_if);
2395 } else {
2396 /*
2397 * Still waiting for packets to drain; try again in
2398 * another tick.
2399 */
2400 callout_schedule(&sc->sc_txfifo_ch, 1);
2401 }
2402 }
2403
2404 splx(s);
2405 }
2406
2407 static void
2408 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2409 {
2410 uint32_t reg;
2411
2412 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2413
2414 if (on != 0)
2415 reg |= EXTCNFCTR_GATE_PHY_CFG;
2416 else
2417 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2418
2419 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2420 }
2421
2422 /*
2423 * wm_82547_txfifo_bugchk:
2424 *
2425 * Check for bug condition in the 82547 Tx FIFO. We need to
2426 * prevent enqueueing a packet that would wrap around the end
2427 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2428 *
2429 * We do this by checking the amount of space before the end
2430 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2431 * the Tx FIFO, wait for all remaining packets to drain, reset
2432 * the internal FIFO pointers to the beginning, and restart
2433 * transmission on the interface.
2434 */
2435 #define WM_FIFO_HDR 0x10
2436 #define WM_82547_PAD_LEN 0x3e0
2437 static int
2438 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2439 {
2440 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2441 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2442
2443 /* Just return if already stalled. */
2444 if (sc->sc_txfifo_stall)
2445 return 1;
2446
2447 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2448 /* Stall only occurs in half-duplex mode. */
2449 goto send_packet;
2450 }
2451
2452 if (len >= WM_82547_PAD_LEN + space) {
2453 sc->sc_txfifo_stall = 1;
2454 callout_schedule(&sc->sc_txfifo_ch, 1);
2455 return 1;
2456 }
2457
2458 send_packet:
2459 sc->sc_txfifo_head += len;
2460 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2461 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2462
2463 return 0;
2464 }
2465
2466 /*
2467 * wm_start: [ifnet interface function]
2468 *
2469 * Start packet transmission on the interface.
2470 */
2471 static void
2472 wm_start(struct ifnet *ifp)
2473 {
2474 struct wm_softc *sc = ifp->if_softc;
2475 struct mbuf *m0;
2476 struct m_tag *mtag;
2477 struct wm_txsoft *txs;
2478 bus_dmamap_t dmamap;
2479 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2480 bus_addr_t curaddr;
2481 bus_size_t seglen, curlen;
2482 uint32_t cksumcmd;
2483 uint8_t cksumfields;
2484
2485 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2486 return;
2487
2488 /*
2489 * Remember the previous number of free descriptors.
2490 */
2491 ofree = sc->sc_txfree;
2492
2493 /*
2494 * Loop through the send queue, setting up transmit descriptors
2495 * until we drain the queue, or use up all available transmit
2496 * descriptors.
2497 */
2498 for (;;) {
2499 /* Grab a packet off the queue. */
2500 IFQ_POLL(&ifp->if_snd, m0);
2501 if (m0 == NULL)
2502 break;
2503
2504 DPRINTF(WM_DEBUG_TX,
2505 ("%s: TX: have packet to transmit: %p\n",
2506 device_xname(sc->sc_dev), m0));
2507
2508 /* Get a work queue entry. */
2509 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2510 wm_txintr(sc);
2511 if (sc->sc_txsfree == 0) {
2512 DPRINTF(WM_DEBUG_TX,
2513 ("%s: TX: no free job descriptors\n",
2514 device_xname(sc->sc_dev)));
2515 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2516 break;
2517 }
2518 }
2519
2520 txs = &sc->sc_txsoft[sc->sc_txsnext];
2521 dmamap = txs->txs_dmamap;
2522
2523 use_tso = (m0->m_pkthdr.csum_flags &
2524 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2525
2526 /*
2527 * So says the Linux driver:
2528 * The controller does a simple calculation to make sure
2529 * there is enough room in the FIFO before initiating the
2530 * DMA for each buffer. The calc is:
2531 * 4 = ceil(buffer len / MSS)
2532 * To make sure we don't overrun the FIFO, adjust the max
2533 * buffer len if the MSS drops.
2534 */
2535 dmamap->dm_maxsegsz =
2536 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2537 ? m0->m_pkthdr.segsz << 2
2538 : WTX_MAX_LEN;
2539
2540 /*
2541 * Load the DMA map. If this fails, the packet either
2542 * didn't fit in the allotted number of segments, or we
2543 * were short on resources. For the too-many-segments
2544 * case, we simply report an error and drop the packet,
2545 * since we can't sanely copy a jumbo packet to a single
2546 * buffer.
2547 */
2548 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2549 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2550 if (error) {
2551 if (error == EFBIG) {
2552 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2553 log(LOG_ERR, "%s: Tx packet consumes too many "
2554 "DMA segments, dropping...\n",
2555 device_xname(sc->sc_dev));
2556 IFQ_DEQUEUE(&ifp->if_snd, m0);
2557 wm_dump_mbuf_chain(sc, m0);
2558 m_freem(m0);
2559 continue;
2560 }
2561 /*
2562 * Short on resources, just stop for now.
2563 */
2564 DPRINTF(WM_DEBUG_TX,
2565 ("%s: TX: dmamap load failed: %d\n",
2566 device_xname(sc->sc_dev), error));
2567 break;
2568 }
2569
2570 segs_needed = dmamap->dm_nsegs;
2571 if (use_tso) {
2572 /* For sentinel descriptor; see below. */
2573 segs_needed++;
2574 }
2575
2576 /*
2577 * Ensure we have enough descriptors free to describe
2578 * the packet. Note, we always reserve one descriptor
2579 * at the end of the ring due to the semantics of the
2580 * TDT register, plus one more in the event we need
2581 * to load offload context.
2582 */
2583 if (segs_needed > sc->sc_txfree - 2) {
2584 /*
2585 * Not enough free descriptors to transmit this
2586 * packet. We haven't committed anything yet,
2587 * so just unload the DMA map, put the packet
2588 * pack on the queue, and punt. Notify the upper
2589 * layer that there are no more slots left.
2590 */
2591 DPRINTF(WM_DEBUG_TX,
2592 ("%s: TX: need %d (%d) descriptors, have %d\n",
2593 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2594 segs_needed, sc->sc_txfree - 1));
2595 ifp->if_flags |= IFF_OACTIVE;
2596 bus_dmamap_unload(sc->sc_dmat, dmamap);
2597 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2598 break;
2599 }
2600
2601 /*
2602 * Check for 82547 Tx FIFO bug. We need to do this
2603 * once we know we can transmit the packet, since we
2604 * do some internal FIFO space accounting here.
2605 */
2606 if (sc->sc_type == WM_T_82547 &&
2607 wm_82547_txfifo_bugchk(sc, m0)) {
2608 DPRINTF(WM_DEBUG_TX,
2609 ("%s: TX: 82547 Tx FIFO bug detected\n",
2610 device_xname(sc->sc_dev)));
2611 ifp->if_flags |= IFF_OACTIVE;
2612 bus_dmamap_unload(sc->sc_dmat, dmamap);
2613 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2614 break;
2615 }
2616
2617 IFQ_DEQUEUE(&ifp->if_snd, m0);
2618
2619 /*
2620 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2621 */
2622
2623 DPRINTF(WM_DEBUG_TX,
2624 ("%s: TX: packet has %d (%d) DMA segments\n",
2625 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2626
2627 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2628
2629 /*
2630 * Store a pointer to the packet so that we can free it
2631 * later.
2632 *
2633 * Initially, we consider the number of descriptors the
2634 * packet uses the number of DMA segments. This may be
2635 * incremented by 1 if we do checksum offload (a descriptor
2636 * is used to set the checksum context).
2637 */
2638 txs->txs_mbuf = m0;
2639 txs->txs_firstdesc = sc->sc_txnext;
2640 txs->txs_ndesc = segs_needed;
2641
2642 /* Set up offload parameters for this packet. */
2643 if (m0->m_pkthdr.csum_flags &
2644 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2645 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2646 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2647 if (wm_tx_offload(sc, txs, &cksumcmd,
2648 &cksumfields) != 0) {
2649 /* Error message already displayed. */
2650 bus_dmamap_unload(sc->sc_dmat, dmamap);
2651 continue;
2652 }
2653 } else {
2654 cksumcmd = 0;
2655 cksumfields = 0;
2656 }
2657
2658 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2659
2660 /* Sync the DMA map. */
2661 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2662 BUS_DMASYNC_PREWRITE);
2663
2664 /*
2665 * Initialize the transmit descriptor.
2666 */
2667 for (nexttx = sc->sc_txnext, seg = 0;
2668 seg < dmamap->dm_nsegs; seg++) {
2669 for (seglen = dmamap->dm_segs[seg].ds_len,
2670 curaddr = dmamap->dm_segs[seg].ds_addr;
2671 seglen != 0;
2672 curaddr += curlen, seglen -= curlen,
2673 nexttx = WM_NEXTTX(sc, nexttx)) {
2674 curlen = seglen;
2675
2676 /*
2677 * So says the Linux driver:
2678 * Work around for premature descriptor
2679 * write-backs in TSO mode. Append a
2680 * 4-byte sentinel descriptor.
2681 */
2682 if (use_tso &&
2683 seg == dmamap->dm_nsegs - 1 &&
2684 curlen > 8)
2685 curlen -= 4;
2686
2687 wm_set_dma_addr(
2688 &sc->sc_txdescs[nexttx].wtx_addr,
2689 curaddr);
2690 sc->sc_txdescs[nexttx].wtx_cmdlen =
2691 htole32(cksumcmd | curlen);
2692 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2693 0;
2694 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2695 cksumfields;
2696 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2697 lasttx = nexttx;
2698
2699 DPRINTF(WM_DEBUG_TX,
2700 ("%s: TX: desc %d: low %#" PRIx64 ", "
2701 "len %#04zx\n",
2702 device_xname(sc->sc_dev), nexttx,
2703 (uint64_t)curaddr, curlen));
2704 }
2705 }
2706
2707 KASSERT(lasttx != -1);
2708
2709 /*
2710 * Set up the command byte on the last descriptor of
2711 * the packet. If we're in the interrupt delay window,
2712 * delay the interrupt.
2713 */
2714 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2715 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2716
2717 /*
2718 * If VLANs are enabled and the packet has a VLAN tag, set
2719 * up the descriptor to encapsulate the packet for us.
2720 *
2721 * This is only valid on the last descriptor of the packet.
2722 */
2723 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2724 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2725 htole32(WTX_CMD_VLE);
2726 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2727 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2728 }
2729
2730 txs->txs_lastdesc = lasttx;
2731
2732 DPRINTF(WM_DEBUG_TX,
2733 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2734 device_xname(sc->sc_dev),
2735 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2736
2737 /* Sync the descriptors we're using. */
2738 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2739 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2740
2741 /* Give the packet to the chip. */
2742 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2743
2744 DPRINTF(WM_DEBUG_TX,
2745 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2746
2747 DPRINTF(WM_DEBUG_TX,
2748 ("%s: TX: finished transmitting packet, job %d\n",
2749 device_xname(sc->sc_dev), sc->sc_txsnext));
2750
2751 /* Advance the tx pointer. */
2752 sc->sc_txfree -= txs->txs_ndesc;
2753 sc->sc_txnext = nexttx;
2754
2755 sc->sc_txsfree--;
2756 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2757
2758 /* Pass the packet to any BPF listeners. */
2759 bpf_mtap(ifp, m0);
2760 }
2761
2762 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2763 /* No more slots; notify upper layer. */
2764 ifp->if_flags |= IFF_OACTIVE;
2765 }
2766
2767 if (sc->sc_txfree != ofree) {
2768 /* Set a watchdog timer in case the chip flakes out. */
2769 ifp->if_timer = 5;
2770 }
2771 }
2772
2773 /*
2774 * wm_nq_tx_offload:
2775 *
2776 * Set up TCP/IP checksumming parameters for the
2777 * specified packet, for NEWQUEUE devices
2778 */
2779 static int
2780 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2781 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2782 {
2783 struct mbuf *m0 = txs->txs_mbuf;
2784 struct m_tag *mtag;
2785 uint32_t vl_len, mssidx, cmdc;
2786 struct ether_header *eh;
2787 int offset, iphl;
2788
2789 /*
2790 * XXX It would be nice if the mbuf pkthdr had offset
2791 * fields for the protocol headers.
2792 */
2793 *cmdlenp = 0;
2794 *fieldsp = 0;
2795
2796 eh = mtod(m0, struct ether_header *);
2797 switch (htons(eh->ether_type)) {
2798 case ETHERTYPE_IP:
2799 case ETHERTYPE_IPV6:
2800 offset = ETHER_HDR_LEN;
2801 break;
2802
2803 case ETHERTYPE_VLAN:
2804 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2805 break;
2806
2807 default:
2808 /*
2809 * Don't support this protocol or encapsulation.
2810 */
2811 *do_csum = false;
2812 return 0;
2813 }
2814 *do_csum = true;
2815 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2816 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2817
2818 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2819 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2820
2821 if ((m0->m_pkthdr.csum_flags &
2822 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2823 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2824 } else {
2825 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2826 }
2827 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2828 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2829
2830 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2831 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2832 << NQTXC_VLLEN_VLAN_SHIFT);
2833 *cmdlenp |= NQTX_CMD_VLE;
2834 }
2835
2836 mssidx = 0;
2837
2838 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2839 int hlen = offset + iphl;
2840 int tcp_hlen;
2841 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2842
2843 if (__predict_false(m0->m_len <
2844 (hlen + sizeof(struct tcphdr)))) {
2845 /*
2846 * TCP/IP headers are not in the first mbuf; we need
2847 * to do this the slow and painful way. Let's just
2848 * hope this doesn't happen very often.
2849 */
2850 struct tcphdr th;
2851
2852 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2853
2854 m_copydata(m0, hlen, sizeof(th), &th);
2855 if (v4) {
2856 struct ip ip;
2857
2858 m_copydata(m0, offset, sizeof(ip), &ip);
2859 ip.ip_len = 0;
2860 m_copyback(m0,
2861 offset + offsetof(struct ip, ip_len),
2862 sizeof(ip.ip_len), &ip.ip_len);
2863 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2864 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2865 } else {
2866 struct ip6_hdr ip6;
2867
2868 m_copydata(m0, offset, sizeof(ip6), &ip6);
2869 ip6.ip6_plen = 0;
2870 m_copyback(m0,
2871 offset + offsetof(struct ip6_hdr, ip6_plen),
2872 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2873 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2874 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2875 }
2876 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2877 sizeof(th.th_sum), &th.th_sum);
2878
2879 tcp_hlen = th.th_off << 2;
2880 } else {
2881 /*
2882 * TCP/IP headers are in the first mbuf; we can do
2883 * this the easy way.
2884 */
2885 struct tcphdr *th;
2886
2887 if (v4) {
2888 struct ip *ip =
2889 (void *)(mtod(m0, char *) + offset);
2890 th = (void *)(mtod(m0, char *) + hlen);
2891
2892 ip->ip_len = 0;
2893 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2894 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2895 } else {
2896 struct ip6_hdr *ip6 =
2897 (void *)(mtod(m0, char *) + offset);
2898 th = (void *)(mtod(m0, char *) + hlen);
2899
2900 ip6->ip6_plen = 0;
2901 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2902 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2903 }
2904 tcp_hlen = th->th_off << 2;
2905 }
2906 hlen += tcp_hlen;
2907 *cmdlenp |= NQTX_CMD_TSE;
2908
2909 if (v4) {
2910 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2911 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2912 } else {
2913 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2914 *fieldsp |= NQTXD_FIELDS_TUXSM;
2915 }
2916 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2917 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2918 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2919 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2920 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2921 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2922 } else {
2923 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2924 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2925 }
2926
2927 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2928 *fieldsp |= NQTXD_FIELDS_IXSM;
2929 cmdc |= NQTXC_CMD_IP4;
2930 }
2931
2932 if (m0->m_pkthdr.csum_flags &
2933 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2934 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2935 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2936 cmdc |= NQTXC_CMD_TCP;
2937 } else {
2938 cmdc |= NQTXC_CMD_UDP;
2939 }
2940 cmdc |= NQTXC_CMD_IP4;
2941 *fieldsp |= NQTXD_FIELDS_TUXSM;
2942 }
2943 if (m0->m_pkthdr.csum_flags &
2944 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2945 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2946 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2947 cmdc |= NQTXC_CMD_TCP;
2948 } else {
2949 cmdc |= NQTXC_CMD_UDP;
2950 }
2951 cmdc |= NQTXC_CMD_IP6;
2952 *fieldsp |= NQTXD_FIELDS_TUXSM;
2953 }
2954
2955 /* Fill in the context descriptor. */
2956 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
2957 htole32(vl_len);
2958 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
2959 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
2960 htole32(cmdc);
2961 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
2962 htole32(mssidx);
2963 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2964 DPRINTF(WM_DEBUG_TX,
2965 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
2966 sc->sc_txnext, 0, vl_len));
2967 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
2968 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2969 txs->txs_ndesc++;
2970 return 0;
2971 }
2972
2973 /*
2974 * wm_nq_start: [ifnet interface function]
2975 *
2976 * Start packet transmission on the interface for NEWQUEUE devices
2977 */
2978 static void
2979 wm_nq_start(struct ifnet *ifp)
2980 {
2981 struct wm_softc *sc = ifp->if_softc;
2982 struct mbuf *m0;
2983 struct m_tag *mtag;
2984 struct wm_txsoft *txs;
2985 bus_dmamap_t dmamap;
2986 int error, nexttx, lasttx = -1, seg, segs_needed;
2987 bool do_csum, sent;
2988
2989 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2990 return;
2991
2992 sent = false;
2993
2994 /*
2995 * Loop through the send queue, setting up transmit descriptors
2996 * until we drain the queue, or use up all available transmit
2997 * descriptors.
2998 */
2999 for (;;) {
3000 /* Grab a packet off the queue. */
3001 IFQ_POLL(&ifp->if_snd, m0);
3002 if (m0 == NULL)
3003 break;
3004
3005 DPRINTF(WM_DEBUG_TX,
3006 ("%s: TX: have packet to transmit: %p\n",
3007 device_xname(sc->sc_dev), m0));
3008
3009 /* Get a work queue entry. */
3010 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3011 wm_txintr(sc);
3012 if (sc->sc_txsfree == 0) {
3013 DPRINTF(WM_DEBUG_TX,
3014 ("%s: TX: no free job descriptors\n",
3015 device_xname(sc->sc_dev)));
3016 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3017 break;
3018 }
3019 }
3020
3021 txs = &sc->sc_txsoft[sc->sc_txsnext];
3022 dmamap = txs->txs_dmamap;
3023
3024 /*
3025 * Load the DMA map. If this fails, the packet either
3026 * didn't fit in the allotted number of segments, or we
3027 * were short on resources. For the too-many-segments
3028 * case, we simply report an error and drop the packet,
3029 * since we can't sanely copy a jumbo packet to a single
3030 * buffer.
3031 */
3032 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3033 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3034 if (error) {
3035 if (error == EFBIG) {
3036 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3037 log(LOG_ERR, "%s: Tx packet consumes too many "
3038 "DMA segments, dropping...\n",
3039 device_xname(sc->sc_dev));
3040 IFQ_DEQUEUE(&ifp->if_snd, m0);
3041 wm_dump_mbuf_chain(sc, m0);
3042 m_freem(m0);
3043 continue;
3044 }
3045 /*
3046 * Short on resources, just stop for now.
3047 */
3048 DPRINTF(WM_DEBUG_TX,
3049 ("%s: TX: dmamap load failed: %d\n",
3050 device_xname(sc->sc_dev), error));
3051 break;
3052 }
3053
3054 segs_needed = dmamap->dm_nsegs;
3055
3056 /*
3057 * Ensure we have enough descriptors free to describe
3058 * the packet. Note, we always reserve one descriptor
3059 * at the end of the ring due to the semantics of the
3060 * TDT register, plus one more in the event we need
3061 * to load offload context.
3062 */
3063 if (segs_needed > sc->sc_txfree - 2) {
3064 /*
3065 * Not enough free descriptors to transmit this
3066 * packet. We haven't committed anything yet,
3067 * so just unload the DMA map, put the packet
3068 * pack on the queue, and punt. Notify the upper
3069 * layer that there are no more slots left.
3070 */
3071 DPRINTF(WM_DEBUG_TX,
3072 ("%s: TX: need %d (%d) descriptors, have %d\n",
3073 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3074 segs_needed, sc->sc_txfree - 1));
3075 ifp->if_flags |= IFF_OACTIVE;
3076 bus_dmamap_unload(sc->sc_dmat, dmamap);
3077 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3078 break;
3079 }
3080
3081 IFQ_DEQUEUE(&ifp->if_snd, m0);
3082
3083 /*
3084 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3085 */
3086
3087 DPRINTF(WM_DEBUG_TX,
3088 ("%s: TX: packet has %d (%d) DMA segments\n",
3089 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3090
3091 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3092
3093 /*
3094 * Store a pointer to the packet so that we can free it
3095 * later.
3096 *
3097 * Initially, we consider the number of descriptors the
3098 * packet uses the number of DMA segments. This may be
3099 * incremented by 1 if we do checksum offload (a descriptor
3100 * is used to set the checksum context).
3101 */
3102 txs->txs_mbuf = m0;
3103 txs->txs_firstdesc = sc->sc_txnext;
3104 txs->txs_ndesc = segs_needed;
3105
3106 /* Set up offload parameters for this packet. */
3107 uint32_t cmdlen, fields, dcmdlen;
3108 if (m0->m_pkthdr.csum_flags &
3109 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3110 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3111 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3112 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3113 &do_csum) != 0) {
3114 /* Error message already displayed. */
3115 bus_dmamap_unload(sc->sc_dmat, dmamap);
3116 continue;
3117 }
3118 } else {
3119 do_csum = false;
3120 cmdlen = 0;
3121 fields = 0;
3122 }
3123
3124 /* Sync the DMA map. */
3125 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3126 BUS_DMASYNC_PREWRITE);
3127
3128 /*
3129 * Initialize the first transmit descriptor.
3130 */
3131 nexttx = sc->sc_txnext;
3132 if (!do_csum) {
3133 /* setup a legacy descriptor */
3134 wm_set_dma_addr(
3135 &sc->sc_txdescs[nexttx].wtx_addr,
3136 dmamap->dm_segs[0].ds_addr);
3137 sc->sc_txdescs[nexttx].wtx_cmdlen =
3138 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3139 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3140 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3141 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3142 NULL) {
3143 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3144 htole32(WTX_CMD_VLE);
3145 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3146 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3147 } else {
3148 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3149 }
3150 dcmdlen = 0;
3151 } else {
3152 /* setup an advanced data descriptor */
3153 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3154 htole64(dmamap->dm_segs[0].ds_addr);
3155 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3156 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3157 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3158 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3159 htole32(fields);
3160 DPRINTF(WM_DEBUG_TX,
3161 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3162 device_xname(sc->sc_dev), nexttx,
3163 (uint64_t)dmamap->dm_segs[0].ds_addr));
3164 DPRINTF(WM_DEBUG_TX,
3165 ("\t 0x%08x%08x\n", fields,
3166 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3167 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3168 }
3169
3170 lasttx = nexttx;
3171 nexttx = WM_NEXTTX(sc, nexttx);
3172 /*
3173 * fill in the next descriptors. legacy or adcanced format
3174 * is the same here
3175 */
3176 for (seg = 1; seg < dmamap->dm_nsegs;
3177 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3178 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3179 htole64(dmamap->dm_segs[seg].ds_addr);
3180 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3181 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3182 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3183 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3184 lasttx = nexttx;
3185
3186 DPRINTF(WM_DEBUG_TX,
3187 ("%s: TX: desc %d: %#" PRIx64 ", "
3188 "len %#04zx\n",
3189 device_xname(sc->sc_dev), nexttx,
3190 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3191 dmamap->dm_segs[seg].ds_len));
3192 }
3193
3194 KASSERT(lasttx != -1);
3195
3196 /*
3197 * Set up the command byte on the last descriptor of
3198 * the packet. If we're in the interrupt delay window,
3199 * delay the interrupt.
3200 */
3201 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3202 (NQTX_CMD_EOP | NQTX_CMD_RS));
3203 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3204 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3205
3206 txs->txs_lastdesc = lasttx;
3207
3208 DPRINTF(WM_DEBUG_TX,
3209 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3210 device_xname(sc->sc_dev),
3211 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3212
3213 /* Sync the descriptors we're using. */
3214 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3215 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3216
3217 /* Give the packet to the chip. */
3218 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3219 sent = true;
3220
3221 DPRINTF(WM_DEBUG_TX,
3222 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3223
3224 DPRINTF(WM_DEBUG_TX,
3225 ("%s: TX: finished transmitting packet, job %d\n",
3226 device_xname(sc->sc_dev), sc->sc_txsnext));
3227
3228 /* Advance the tx pointer. */
3229 sc->sc_txfree -= txs->txs_ndesc;
3230 sc->sc_txnext = nexttx;
3231
3232 sc->sc_txsfree--;
3233 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3234
3235 /* Pass the packet to any BPF listeners. */
3236 bpf_mtap(ifp, m0);
3237 }
3238
3239 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3240 /* No more slots; notify upper layer. */
3241 ifp->if_flags |= IFF_OACTIVE;
3242 }
3243
3244 if (sent) {
3245 /* Set a watchdog timer in case the chip flakes out. */
3246 ifp->if_timer = 5;
3247 }
3248 }
3249
3250 /*
3251 * wm_watchdog: [ifnet interface function]
3252 *
3253 * Watchdog timer handler.
3254 */
3255 static void
3256 wm_watchdog(struct ifnet *ifp)
3257 {
3258 struct wm_softc *sc = ifp->if_softc;
3259
3260 /*
3261 * Since we're using delayed interrupts, sweep up
3262 * before we report an error.
3263 */
3264 wm_txintr(sc);
3265
3266 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3267 #ifdef WM_DEBUG
3268 int i, j;
3269 struct wm_txsoft *txs;
3270 #endif
3271 log(LOG_ERR,
3272 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3273 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3274 sc->sc_txnext);
3275 ifp->if_oerrors++;
3276 #ifdef WM_DEBUG
3277 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3278 i = WM_NEXTTXS(sc, i)) {
3279 txs = &sc->sc_txsoft[i];
3280 printf("txs %d tx %d -> %d\n",
3281 i, txs->txs_firstdesc, txs->txs_lastdesc);
3282 for (j = txs->txs_firstdesc; ;
3283 j = WM_NEXTTX(sc, j)) {
3284 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3285 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3286 printf("\t %#08x%08x\n",
3287 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3288 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3289 if (j == txs->txs_lastdesc)
3290 break;
3291 }
3292 }
3293 #endif
3294 /* Reset the interface. */
3295 (void) wm_init(ifp);
3296 }
3297
3298 /* Try to get more packets going. */
3299 ifp->if_start(ifp);
3300 }
3301
3302 static int
3303 wm_ifflags_cb(struct ethercom *ec)
3304 {
3305 struct ifnet *ifp = &ec->ec_if;
3306 struct wm_softc *sc = ifp->if_softc;
3307 int change = ifp->if_flags ^ sc->sc_if_flags;
3308
3309 if (change != 0)
3310 sc->sc_if_flags = ifp->if_flags;
3311
3312 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3313 return ENETRESET;
3314
3315 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3316 wm_set_filter(sc);
3317
3318 wm_set_vlan(sc);
3319
3320 return 0;
3321 }
3322
3323 /*
3324 * wm_ioctl: [ifnet interface function]
3325 *
3326 * Handle control requests from the operator.
3327 */
3328 static int
3329 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3330 {
3331 struct wm_softc *sc = ifp->if_softc;
3332 struct ifreq *ifr = (struct ifreq *) data;
3333 struct ifaddr *ifa = (struct ifaddr *)data;
3334 struct sockaddr_dl *sdl;
3335 int s, error;
3336
3337 s = splnet();
3338
3339 switch (cmd) {
3340 case SIOCSIFMEDIA:
3341 case SIOCGIFMEDIA:
3342 /* Flow control requires full-duplex mode. */
3343 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3344 (ifr->ifr_media & IFM_FDX) == 0)
3345 ifr->ifr_media &= ~IFM_ETH_FMASK;
3346 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3347 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3348 /* We can do both TXPAUSE and RXPAUSE. */
3349 ifr->ifr_media |=
3350 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3351 }
3352 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3353 }
3354 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3355 break;
3356 case SIOCINITIFADDR:
3357 if (ifa->ifa_addr->sa_family == AF_LINK) {
3358 sdl = satosdl(ifp->if_dl->ifa_addr);
3359 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3360 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3361 /* unicast address is first multicast entry */
3362 wm_set_filter(sc);
3363 error = 0;
3364 break;
3365 }
3366 /*FALLTHROUGH*/
3367 default:
3368 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3369 break;
3370
3371 error = 0;
3372
3373 if (cmd == SIOCSIFCAP)
3374 error = (*ifp->if_init)(ifp);
3375 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3376 ;
3377 else if (ifp->if_flags & IFF_RUNNING) {
3378 /*
3379 * Multicast list has changed; set the hardware filter
3380 * accordingly.
3381 */
3382 wm_set_filter(sc);
3383 }
3384 break;
3385 }
3386
3387 /* Try to get more packets going. */
3388 ifp->if_start(ifp);
3389
3390 splx(s);
3391 return error;
3392 }
3393
3394 /*
3395 * wm_intr:
3396 *
3397 * Interrupt service routine.
3398 */
3399 static int
3400 wm_intr(void *arg)
3401 {
3402 struct wm_softc *sc = arg;
3403 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3404 uint32_t icr;
3405 int handled = 0;
3406
3407 while (1 /* CONSTCOND */) {
3408 icr = CSR_READ(sc, WMREG_ICR);
3409 if ((icr & sc->sc_icr) == 0)
3410 break;
3411 rnd_add_uint32(&sc->rnd_source, icr);
3412
3413 handled = 1;
3414
3415 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3416 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3417 DPRINTF(WM_DEBUG_RX,
3418 ("%s: RX: got Rx intr 0x%08x\n",
3419 device_xname(sc->sc_dev),
3420 icr & (ICR_RXDMT0|ICR_RXT0)));
3421 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3422 }
3423 #endif
3424 wm_rxintr(sc);
3425
3426 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3427 if (icr & ICR_TXDW) {
3428 DPRINTF(WM_DEBUG_TX,
3429 ("%s: TX: got TXDW interrupt\n",
3430 device_xname(sc->sc_dev)));
3431 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3432 }
3433 #endif
3434 wm_txintr(sc);
3435
3436 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3437 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3438 wm_linkintr(sc, icr);
3439 }
3440
3441 if (icr & ICR_RXO) {
3442 #if defined(WM_DEBUG)
3443 log(LOG_WARNING, "%s: Receive overrun\n",
3444 device_xname(sc->sc_dev));
3445 #endif /* defined(WM_DEBUG) */
3446 }
3447 }
3448
3449 if (handled) {
3450 /* Try to get more packets going. */
3451 ifp->if_start(ifp);
3452 }
3453
3454 return handled;
3455 }
3456
3457 /*
3458 * wm_txintr:
3459 *
3460 * Helper; handle transmit interrupts.
3461 */
3462 static void
3463 wm_txintr(struct wm_softc *sc)
3464 {
3465 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3466 struct wm_txsoft *txs;
3467 uint8_t status;
3468 int i;
3469
3470 ifp->if_flags &= ~IFF_OACTIVE;
3471
3472 /*
3473 * Go through the Tx list and free mbufs for those
3474 * frames which have been transmitted.
3475 */
3476 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3477 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3478 txs = &sc->sc_txsoft[i];
3479
3480 DPRINTF(WM_DEBUG_TX,
3481 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3482
3483 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3484 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3485
3486 status =
3487 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3488 if ((status & WTX_ST_DD) == 0) {
3489 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3490 BUS_DMASYNC_PREREAD);
3491 break;
3492 }
3493
3494 DPRINTF(WM_DEBUG_TX,
3495 ("%s: TX: job %d done: descs %d..%d\n",
3496 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3497 txs->txs_lastdesc));
3498
3499 /*
3500 * XXX We should probably be using the statistics
3501 * XXX registers, but I don't know if they exist
3502 * XXX on chips before the i82544.
3503 */
3504
3505 #ifdef WM_EVENT_COUNTERS
3506 if (status & WTX_ST_TU)
3507 WM_EVCNT_INCR(&sc->sc_ev_tu);
3508 #endif /* WM_EVENT_COUNTERS */
3509
3510 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3511 ifp->if_oerrors++;
3512 if (status & WTX_ST_LC)
3513 log(LOG_WARNING, "%s: late collision\n",
3514 device_xname(sc->sc_dev));
3515 else if (status & WTX_ST_EC) {
3516 ifp->if_collisions += 16;
3517 log(LOG_WARNING, "%s: excessive collisions\n",
3518 device_xname(sc->sc_dev));
3519 }
3520 } else
3521 ifp->if_opackets++;
3522
3523 sc->sc_txfree += txs->txs_ndesc;
3524 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3525 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3526 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3527 m_freem(txs->txs_mbuf);
3528 txs->txs_mbuf = NULL;
3529 }
3530
3531 /* Update the dirty transmit buffer pointer. */
3532 sc->sc_txsdirty = i;
3533 DPRINTF(WM_DEBUG_TX,
3534 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3535
3536 /*
3537 * If there are no more pending transmissions, cancel the watchdog
3538 * timer.
3539 */
3540 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3541 ifp->if_timer = 0;
3542 }
3543
3544 /*
3545 * wm_rxintr:
3546 *
3547 * Helper; handle receive interrupts.
3548 */
3549 static void
3550 wm_rxintr(struct wm_softc *sc)
3551 {
3552 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3553 struct wm_rxsoft *rxs;
3554 struct mbuf *m;
3555 int i, len;
3556 uint8_t status, errors;
3557 uint16_t vlantag;
3558
3559 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3560 rxs = &sc->sc_rxsoft[i];
3561
3562 DPRINTF(WM_DEBUG_RX,
3563 ("%s: RX: checking descriptor %d\n",
3564 device_xname(sc->sc_dev), i));
3565
3566 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3567
3568 status = sc->sc_rxdescs[i].wrx_status;
3569 errors = sc->sc_rxdescs[i].wrx_errors;
3570 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3571 vlantag = sc->sc_rxdescs[i].wrx_special;
3572
3573 if ((status & WRX_ST_DD) == 0) {
3574 /*
3575 * We have processed all of the receive descriptors.
3576 */
3577 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3578 break;
3579 }
3580
3581 if (__predict_false(sc->sc_rxdiscard)) {
3582 DPRINTF(WM_DEBUG_RX,
3583 ("%s: RX: discarding contents of descriptor %d\n",
3584 device_xname(sc->sc_dev), i));
3585 WM_INIT_RXDESC(sc, i);
3586 if (status & WRX_ST_EOP) {
3587 /* Reset our state. */
3588 DPRINTF(WM_DEBUG_RX,
3589 ("%s: RX: resetting rxdiscard -> 0\n",
3590 device_xname(sc->sc_dev)));
3591 sc->sc_rxdiscard = 0;
3592 }
3593 continue;
3594 }
3595
3596 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3597 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3598
3599 m = rxs->rxs_mbuf;
3600
3601 /*
3602 * Add a new receive buffer to the ring, unless of
3603 * course the length is zero. Treat the latter as a
3604 * failed mapping.
3605 */
3606 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3607 /*
3608 * Failed, throw away what we've done so
3609 * far, and discard the rest of the packet.
3610 */
3611 ifp->if_ierrors++;
3612 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3613 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3614 WM_INIT_RXDESC(sc, i);
3615 if ((status & WRX_ST_EOP) == 0)
3616 sc->sc_rxdiscard = 1;
3617 if (sc->sc_rxhead != NULL)
3618 m_freem(sc->sc_rxhead);
3619 WM_RXCHAIN_RESET(sc);
3620 DPRINTF(WM_DEBUG_RX,
3621 ("%s: RX: Rx buffer allocation failed, "
3622 "dropping packet%s\n", device_xname(sc->sc_dev),
3623 sc->sc_rxdiscard ? " (discard)" : ""));
3624 continue;
3625 }
3626
3627 m->m_len = len;
3628 sc->sc_rxlen += len;
3629 DPRINTF(WM_DEBUG_RX,
3630 ("%s: RX: buffer at %p len %d\n",
3631 device_xname(sc->sc_dev), m->m_data, len));
3632
3633 /*
3634 * If this is not the end of the packet, keep
3635 * looking.
3636 */
3637 if ((status & WRX_ST_EOP) == 0) {
3638 WM_RXCHAIN_LINK(sc, m);
3639 DPRINTF(WM_DEBUG_RX,
3640 ("%s: RX: not yet EOP, rxlen -> %d\n",
3641 device_xname(sc->sc_dev), sc->sc_rxlen));
3642 continue;
3643 }
3644
3645 /*
3646 * Okay, we have the entire packet now. The chip is
3647 * configured to include the FCS except I350
3648 * (not all chips can be configured to strip it),
3649 * so we need to trim it.
3650 * May need to adjust length of previous mbuf in the
3651 * chain if the current mbuf is too short.
3652 * For an eratta, the RCTL_SECRC bit in RCTL register
3653 * is always set in I350, so we don't trim it.
3654 */
3655 if (sc->sc_type != WM_T_I350) {
3656 if (m->m_len < ETHER_CRC_LEN) {
3657 sc->sc_rxtail->m_len
3658 -= (ETHER_CRC_LEN - m->m_len);
3659 m->m_len = 0;
3660 } else
3661 m->m_len -= ETHER_CRC_LEN;
3662 len = sc->sc_rxlen - ETHER_CRC_LEN;
3663 } else
3664 len = sc->sc_rxlen;
3665
3666 WM_RXCHAIN_LINK(sc, m);
3667
3668 *sc->sc_rxtailp = NULL;
3669 m = sc->sc_rxhead;
3670
3671 WM_RXCHAIN_RESET(sc);
3672
3673 DPRINTF(WM_DEBUG_RX,
3674 ("%s: RX: have entire packet, len -> %d\n",
3675 device_xname(sc->sc_dev), len));
3676
3677 /*
3678 * If an error occurred, update stats and drop the packet.
3679 */
3680 if (errors &
3681 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3682 if (errors & WRX_ER_SE)
3683 log(LOG_WARNING, "%s: symbol error\n",
3684 device_xname(sc->sc_dev));
3685 else if (errors & WRX_ER_SEQ)
3686 log(LOG_WARNING, "%s: receive sequence error\n",
3687 device_xname(sc->sc_dev));
3688 else if (errors & WRX_ER_CE)
3689 log(LOG_WARNING, "%s: CRC error\n",
3690 device_xname(sc->sc_dev));
3691 m_freem(m);
3692 continue;
3693 }
3694
3695 /*
3696 * No errors. Receive the packet.
3697 */
3698 m->m_pkthdr.rcvif = ifp;
3699 m->m_pkthdr.len = len;
3700
3701 /*
3702 * If VLANs are enabled, VLAN packets have been unwrapped
3703 * for us. Associate the tag with the packet.
3704 */
3705 if ((status & WRX_ST_VP) != 0) {
3706 VLAN_INPUT_TAG(ifp, m,
3707 le16toh(vlantag),
3708 continue);
3709 }
3710
3711 /*
3712 * Set up checksum info for this packet.
3713 */
3714 if ((status & WRX_ST_IXSM) == 0) {
3715 if (status & WRX_ST_IPCS) {
3716 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3717 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3718 if (errors & WRX_ER_IPE)
3719 m->m_pkthdr.csum_flags |=
3720 M_CSUM_IPv4_BAD;
3721 }
3722 if (status & WRX_ST_TCPCS) {
3723 /*
3724 * Note: we don't know if this was TCP or UDP,
3725 * so we just set both bits, and expect the
3726 * upper layers to deal.
3727 */
3728 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3729 m->m_pkthdr.csum_flags |=
3730 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3731 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3732 if (errors & WRX_ER_TCPE)
3733 m->m_pkthdr.csum_flags |=
3734 M_CSUM_TCP_UDP_BAD;
3735 }
3736 }
3737
3738 ifp->if_ipackets++;
3739
3740 /* Pass this up to any BPF listeners. */
3741 bpf_mtap(ifp, m);
3742
3743 /* Pass it on. */
3744 (*ifp->if_input)(ifp, m);
3745 }
3746
3747 /* Update the receive pointer. */
3748 sc->sc_rxptr = i;
3749
3750 DPRINTF(WM_DEBUG_RX,
3751 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3752 }
3753
3754 /*
3755 * wm_linkintr_gmii:
3756 *
3757 * Helper; handle link interrupts for GMII.
3758 */
3759 static void
3760 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3761 {
3762
3763 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3764 __func__));
3765
3766 if (icr & ICR_LSC) {
3767 DPRINTF(WM_DEBUG_LINK,
3768 ("%s: LINK: LSC -> mii_tick\n",
3769 device_xname(sc->sc_dev)));
3770 mii_tick(&sc->sc_mii);
3771 if (sc->sc_type == WM_T_82543) {
3772 int miistatus, active;
3773
3774 /*
3775 * With 82543, we need to force speed and
3776 * duplex on the MAC equal to what the PHY
3777 * speed and duplex configuration is.
3778 */
3779 miistatus = sc->sc_mii.mii_media_status;
3780
3781 if (miistatus & IFM_ACTIVE) {
3782 active = sc->sc_mii.mii_media_active;
3783 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3784 switch (IFM_SUBTYPE(active)) {
3785 case IFM_10_T:
3786 sc->sc_ctrl |= CTRL_SPEED_10;
3787 break;
3788 case IFM_100_TX:
3789 sc->sc_ctrl |= CTRL_SPEED_100;
3790 break;
3791 case IFM_1000_T:
3792 sc->sc_ctrl |= CTRL_SPEED_1000;
3793 break;
3794 default:
3795 /*
3796 * fiber?
3797 * Shoud not enter here.
3798 */
3799 printf("unknown media (%x)\n",
3800 active);
3801 break;
3802 }
3803 if (active & IFM_FDX)
3804 sc->sc_ctrl |= CTRL_FD;
3805 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3806 }
3807 } else if ((sc->sc_type == WM_T_ICH8)
3808 && (sc->sc_phytype == WMPHY_IGP_3)) {
3809 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3810 } else if (sc->sc_type == WM_T_PCH) {
3811 wm_k1_gig_workaround_hv(sc,
3812 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3813 }
3814
3815 if ((sc->sc_phytype == WMPHY_82578)
3816 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3817 == IFM_1000_T)) {
3818
3819 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3820 delay(200*1000); /* XXX too big */
3821
3822 /* Link stall fix for link up */
3823 wm_gmii_hv_writereg(sc->sc_dev, 1,
3824 HV_MUX_DATA_CTRL,
3825 HV_MUX_DATA_CTRL_GEN_TO_MAC
3826 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3827 wm_gmii_hv_writereg(sc->sc_dev, 1,
3828 HV_MUX_DATA_CTRL,
3829 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3830 }
3831 }
3832 } else if (icr & ICR_RXSEQ) {
3833 DPRINTF(WM_DEBUG_LINK,
3834 ("%s: LINK Receive sequence error\n",
3835 device_xname(sc->sc_dev)));
3836 }
3837 }
3838
3839 /*
3840 * wm_linkintr_tbi:
3841 *
3842 * Helper; handle link interrupts for TBI mode.
3843 */
3844 static void
3845 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3846 {
3847 uint32_t status;
3848
3849 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3850 __func__));
3851
3852 status = CSR_READ(sc, WMREG_STATUS);
3853 if (icr & ICR_LSC) {
3854 if (status & STATUS_LU) {
3855 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3856 device_xname(sc->sc_dev),
3857 (status & STATUS_FD) ? "FDX" : "HDX"));
3858 /*
3859 * NOTE: CTRL will update TFCE and RFCE automatically,
3860 * so we should update sc->sc_ctrl
3861 */
3862
3863 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3864 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3865 sc->sc_fcrtl &= ~FCRTL_XONE;
3866 if (status & STATUS_FD)
3867 sc->sc_tctl |=
3868 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3869 else
3870 sc->sc_tctl |=
3871 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3872 if (sc->sc_ctrl & CTRL_TFCE)
3873 sc->sc_fcrtl |= FCRTL_XONE;
3874 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3875 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3876 WMREG_OLD_FCRTL : WMREG_FCRTL,
3877 sc->sc_fcrtl);
3878 sc->sc_tbi_linkup = 1;
3879 } else {
3880 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3881 device_xname(sc->sc_dev)));
3882 sc->sc_tbi_linkup = 0;
3883 }
3884 wm_tbi_set_linkled(sc);
3885 } else if (icr & ICR_RXCFG) {
3886 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3887 device_xname(sc->sc_dev)));
3888 sc->sc_tbi_nrxcfg++;
3889 wm_check_for_link(sc);
3890 } else if (icr & ICR_RXSEQ) {
3891 DPRINTF(WM_DEBUG_LINK,
3892 ("%s: LINK: Receive sequence error\n",
3893 device_xname(sc->sc_dev)));
3894 }
3895 }
3896
3897 /*
3898 * wm_linkintr:
3899 *
3900 * Helper; handle link interrupts.
3901 */
3902 static void
3903 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3904 {
3905
3906 if (sc->sc_flags & WM_F_HAS_MII)
3907 wm_linkintr_gmii(sc, icr);
3908 else
3909 wm_linkintr_tbi(sc, icr);
3910 }
3911
3912 /*
3913 * wm_tick:
3914 *
3915 * One second timer, used to check link status, sweep up
3916 * completed transmit jobs, etc.
3917 */
3918 static void
3919 wm_tick(void *arg)
3920 {
3921 struct wm_softc *sc = arg;
3922 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3923 int s;
3924
3925 s = splnet();
3926
3927 if (sc->sc_type >= WM_T_82542_2_1) {
3928 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3929 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3930 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3931 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3932 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3933 }
3934
3935 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3936 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3937 + CSR_READ(sc, WMREG_CRCERRS)
3938 + CSR_READ(sc, WMREG_ALGNERRC)
3939 + CSR_READ(sc, WMREG_SYMERRC)
3940 + CSR_READ(sc, WMREG_RXERRC)
3941 + CSR_READ(sc, WMREG_SEC)
3942 + CSR_READ(sc, WMREG_CEXTERR)
3943 + CSR_READ(sc, WMREG_RLEC);
3944 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3945
3946 if (sc->sc_flags & WM_F_HAS_MII)
3947 mii_tick(&sc->sc_mii);
3948 else
3949 wm_tbi_check_link(sc);
3950
3951 splx(s);
3952
3953 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3954 }
3955
3956 /*
3957 * wm_reset:
3958 *
3959 * Reset the i82542 chip.
3960 */
3961 static void
3962 wm_reset(struct wm_softc *sc)
3963 {
3964 int phy_reset = 0;
3965 uint32_t reg, mask;
3966 int i;
3967
3968 /*
3969 * Allocate on-chip memory according to the MTU size.
3970 * The Packet Buffer Allocation register must be written
3971 * before the chip is reset.
3972 */
3973 switch (sc->sc_type) {
3974 case WM_T_82547:
3975 case WM_T_82547_2:
3976 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3977 PBA_22K : PBA_30K;
3978 sc->sc_txfifo_head = 0;
3979 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3980 sc->sc_txfifo_size =
3981 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3982 sc->sc_txfifo_stall = 0;
3983 break;
3984 case WM_T_82571:
3985 case WM_T_82572:
3986 case WM_T_82575: /* XXX need special handing for jumbo frames */
3987 case WM_T_I350:
3988 case WM_T_80003:
3989 sc->sc_pba = PBA_32K;
3990 break;
3991 case WM_T_82580:
3992 case WM_T_82580ER:
3993 sc->sc_pba = PBA_35K;
3994 break;
3995 case WM_T_82576:
3996 sc->sc_pba = PBA_64K;
3997 break;
3998 case WM_T_82573:
3999 sc->sc_pba = PBA_12K;
4000 break;
4001 case WM_T_82574:
4002 case WM_T_82583:
4003 sc->sc_pba = PBA_20K;
4004 break;
4005 case WM_T_ICH8:
4006 sc->sc_pba = PBA_8K;
4007 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4008 break;
4009 case WM_T_ICH9:
4010 case WM_T_ICH10:
4011 sc->sc_pba = PBA_10K;
4012 break;
4013 case WM_T_PCH:
4014 case WM_T_PCH2:
4015 sc->sc_pba = PBA_26K;
4016 break;
4017 default:
4018 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4019 PBA_40K : PBA_48K;
4020 break;
4021 }
4022 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4023
4024 /* Prevent the PCI-E bus from sticking */
4025 if (sc->sc_flags & WM_F_PCIE) {
4026 int timeout = 800;
4027
4028 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4029 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4030
4031 while (timeout--) {
4032 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4033 == 0)
4034 break;
4035 delay(100);
4036 }
4037 }
4038
4039 /* Set the completion timeout for interface */
4040 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4041 || (sc->sc_type == WM_T_I350))
4042 wm_set_pcie_completion_timeout(sc);
4043
4044 /* Clear interrupt */
4045 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4046
4047 /* Stop the transmit and receive processes. */
4048 CSR_WRITE(sc, WMREG_RCTL, 0);
4049 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4050 sc->sc_rctl &= ~RCTL_EN;
4051
4052 /* XXX set_tbi_sbp_82543() */
4053
4054 delay(10*1000);
4055
4056 /* Must acquire the MDIO ownership before MAC reset */
4057 switch (sc->sc_type) {
4058 case WM_T_82573:
4059 case WM_T_82574:
4060 case WM_T_82583:
4061 i = 0;
4062 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4063 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4064 do {
4065 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4066 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4067 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4068 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4069 break;
4070 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4071 delay(2*1000);
4072 i++;
4073 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4074 break;
4075 default:
4076 break;
4077 }
4078
4079 /*
4080 * 82541 Errata 29? & 82547 Errata 28?
4081 * See also the description about PHY_RST bit in CTRL register
4082 * in 8254x_GBe_SDM.pdf.
4083 */
4084 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4085 CSR_WRITE(sc, WMREG_CTRL,
4086 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4087 delay(5000);
4088 }
4089
4090 switch (sc->sc_type) {
4091 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4092 case WM_T_82541:
4093 case WM_T_82541_2:
4094 case WM_T_82547:
4095 case WM_T_82547_2:
4096 /*
4097 * On some chipsets, a reset through a memory-mapped write
4098 * cycle can cause the chip to reset before completing the
4099 * write cycle. This causes major headache that can be
4100 * avoided by issuing the reset via indirect register writes
4101 * through I/O space.
4102 *
4103 * So, if we successfully mapped the I/O BAR at attach time,
4104 * use that. Otherwise, try our luck with a memory-mapped
4105 * reset.
4106 */
4107 if (sc->sc_flags & WM_F_IOH_VALID)
4108 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4109 else
4110 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4111 break;
4112 case WM_T_82545_3:
4113 case WM_T_82546_3:
4114 /* Use the shadow control register on these chips. */
4115 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4116 break;
4117 case WM_T_80003:
4118 mask = swfwphysem[sc->sc_funcid];
4119 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4120 wm_get_swfw_semaphore(sc, mask);
4121 CSR_WRITE(sc, WMREG_CTRL, reg);
4122 wm_put_swfw_semaphore(sc, mask);
4123 break;
4124 case WM_T_ICH8:
4125 case WM_T_ICH9:
4126 case WM_T_ICH10:
4127 case WM_T_PCH:
4128 case WM_T_PCH2:
4129 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4130 if (wm_check_reset_block(sc) == 0) {
4131 /*
4132 * Gate automatic PHY configuration by hardware on
4133 * non-managed 82579
4134 */
4135 if ((sc->sc_type == WM_T_PCH2)
4136 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4137 != 0))
4138 wm_gate_hw_phy_config_ich8lan(sc, 1);
4139
4140
4141 reg |= CTRL_PHY_RESET;
4142 phy_reset = 1;
4143 }
4144 wm_get_swfwhw_semaphore(sc);
4145 CSR_WRITE(sc, WMREG_CTRL, reg);
4146 delay(20*1000);
4147 wm_put_swfwhw_semaphore(sc);
4148 break;
4149 case WM_T_82542_2_0:
4150 case WM_T_82542_2_1:
4151 case WM_T_82543:
4152 case WM_T_82540:
4153 case WM_T_82545:
4154 case WM_T_82546:
4155 case WM_T_82571:
4156 case WM_T_82572:
4157 case WM_T_82573:
4158 case WM_T_82574:
4159 case WM_T_82575:
4160 case WM_T_82576:
4161 case WM_T_82580:
4162 case WM_T_82580ER:
4163 case WM_T_82583:
4164 case WM_T_I350:
4165 default:
4166 /* Everything else can safely use the documented method. */
4167 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4168 break;
4169 }
4170
4171 if (phy_reset != 0)
4172 wm_get_cfg_done(sc);
4173
4174 /* reload EEPROM */
4175 switch (sc->sc_type) {
4176 case WM_T_82542_2_0:
4177 case WM_T_82542_2_1:
4178 case WM_T_82543:
4179 case WM_T_82544:
4180 delay(10);
4181 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4182 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4183 delay(2000);
4184 break;
4185 case WM_T_82540:
4186 case WM_T_82545:
4187 case WM_T_82545_3:
4188 case WM_T_82546:
4189 case WM_T_82546_3:
4190 delay(5*1000);
4191 /* XXX Disable HW ARPs on ASF enabled adapters */
4192 break;
4193 case WM_T_82541:
4194 case WM_T_82541_2:
4195 case WM_T_82547:
4196 case WM_T_82547_2:
4197 delay(20000);
4198 /* XXX Disable HW ARPs on ASF enabled adapters */
4199 break;
4200 case WM_T_82571:
4201 case WM_T_82572:
4202 case WM_T_82573:
4203 case WM_T_82574:
4204 case WM_T_82583:
4205 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4206 delay(10);
4207 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4208 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4209 }
4210 /* check EECD_EE_AUTORD */
4211 wm_get_auto_rd_done(sc);
4212 /*
4213 * Phy configuration from NVM just starts after EECD_AUTO_RD
4214 * is set.
4215 */
4216 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4217 || (sc->sc_type == WM_T_82583))
4218 delay(25*1000);
4219 break;
4220 case WM_T_82575:
4221 case WM_T_82576:
4222 case WM_T_82580:
4223 case WM_T_82580ER:
4224 case WM_T_I350:
4225 case WM_T_80003:
4226 case WM_T_ICH8:
4227 case WM_T_ICH9:
4228 /* check EECD_EE_AUTORD */
4229 wm_get_auto_rd_done(sc);
4230 break;
4231 case WM_T_ICH10:
4232 case WM_T_PCH:
4233 case WM_T_PCH2:
4234 wm_lan_init_done(sc);
4235 break;
4236 default:
4237 panic("%s: unknown type\n", __func__);
4238 }
4239
4240 /* Check whether EEPROM is present or not */
4241 switch (sc->sc_type) {
4242 case WM_T_82575:
4243 case WM_T_82576:
4244 #if 0 /* XXX */
4245 case WM_T_82580:
4246 case WM_T_82580ER:
4247 #endif
4248 case WM_T_I350:
4249 case WM_T_ICH8:
4250 case WM_T_ICH9:
4251 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4252 /* Not found */
4253 sc->sc_flags |= WM_F_EEPROM_INVALID;
4254 if ((sc->sc_type == WM_T_82575)
4255 || (sc->sc_type == WM_T_82576)
4256 || (sc->sc_type == WM_T_82580)
4257 || (sc->sc_type == WM_T_82580ER)
4258 || (sc->sc_type == WM_T_I350))
4259 wm_reset_init_script_82575(sc);
4260 }
4261 break;
4262 default:
4263 break;
4264 }
4265
4266 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4267 || (sc->sc_type == WM_T_I350)) {
4268 /* clear global device reset status bit */
4269 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4270 }
4271
4272 /* Clear any pending interrupt events. */
4273 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4274 reg = CSR_READ(sc, WMREG_ICR);
4275
4276 /* reload sc_ctrl */
4277 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4278
4279 if (sc->sc_type == WM_T_I350)
4280 wm_set_eee_i350(sc);
4281
4282 /* dummy read from WUC */
4283 if (sc->sc_type == WM_T_PCH)
4284 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4285 /*
4286 * For PCH, this write will make sure that any noise will be detected
4287 * as a CRC error and be dropped rather than show up as a bad packet
4288 * to the DMA engine
4289 */
4290 if (sc->sc_type == WM_T_PCH)
4291 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4292
4293 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4294 CSR_WRITE(sc, WMREG_WUC, 0);
4295
4296 /* XXX need special handling for 82580 */
4297 }
4298
4299 static void
4300 wm_set_vlan(struct wm_softc *sc)
4301 {
4302 /* Deal with VLAN enables. */
4303 if (VLAN_ATTACHED(&sc->sc_ethercom))
4304 sc->sc_ctrl |= CTRL_VME;
4305 else
4306 sc->sc_ctrl &= ~CTRL_VME;
4307
4308 /* Write the control registers. */
4309 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4310 }
4311
4312 /*
4313 * wm_init: [ifnet interface function]
4314 *
4315 * Initialize the interface. Must be called at splnet().
4316 */
4317 static int
4318 wm_init(struct ifnet *ifp)
4319 {
4320 struct wm_softc *sc = ifp->if_softc;
4321 struct wm_rxsoft *rxs;
4322 int i, j, trynum, error = 0;
4323 uint32_t reg;
4324
4325 /*
4326 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4327 * There is a small but measurable benefit to avoiding the adjusment
4328 * of the descriptor so that the headers are aligned, for normal mtu,
4329 * on such platforms. One possibility is that the DMA itself is
4330 * slightly more efficient if the front of the entire packet (instead
4331 * of the front of the headers) is aligned.
4332 *
4333 * Note we must always set align_tweak to 0 if we are using
4334 * jumbo frames.
4335 */
4336 #ifdef __NO_STRICT_ALIGNMENT
4337 sc->sc_align_tweak = 0;
4338 #else
4339 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4340 sc->sc_align_tweak = 0;
4341 else
4342 sc->sc_align_tweak = 2;
4343 #endif /* __NO_STRICT_ALIGNMENT */
4344
4345 /* Cancel any pending I/O. */
4346 wm_stop(ifp, 0);
4347
4348 /* update statistics before reset */
4349 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4350 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4351
4352 /* Reset the chip to a known state. */
4353 wm_reset(sc);
4354
4355 switch (sc->sc_type) {
4356 case WM_T_82571:
4357 case WM_T_82572:
4358 case WM_T_82573:
4359 case WM_T_82574:
4360 case WM_T_82583:
4361 case WM_T_80003:
4362 case WM_T_ICH8:
4363 case WM_T_ICH9:
4364 case WM_T_ICH10:
4365 case WM_T_PCH:
4366 case WM_T_PCH2:
4367 if (wm_check_mng_mode(sc) != 0)
4368 wm_get_hw_control(sc);
4369 break;
4370 default:
4371 break;
4372 }
4373
4374 /* Reset the PHY. */
4375 if (sc->sc_flags & WM_F_HAS_MII)
4376 wm_gmii_reset(sc);
4377
4378 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4379 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4380 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
4381 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4382
4383 /* Initialize the transmit descriptor ring. */
4384 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4385 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4386 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4387 sc->sc_txfree = WM_NTXDESC(sc);
4388 sc->sc_txnext = 0;
4389
4390 if (sc->sc_type < WM_T_82543) {
4391 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4392 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4393 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4394 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4395 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4396 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4397 } else {
4398 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4399 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4400 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4401 CSR_WRITE(sc, WMREG_TDH, 0);
4402 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4403 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4404
4405 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4406 /*
4407 * Don't write TDT before TCTL.EN is set.
4408 * See the document.
4409 */
4410 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4411 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4412 | TXDCTL_WTHRESH(0));
4413 else {
4414 CSR_WRITE(sc, WMREG_TDT, 0);
4415 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4416 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4417 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4418 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4419 }
4420 }
4421 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4422 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4423
4424 /* Initialize the transmit job descriptors. */
4425 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4426 sc->sc_txsoft[i].txs_mbuf = NULL;
4427 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4428 sc->sc_txsnext = 0;
4429 sc->sc_txsdirty = 0;
4430
4431 /*
4432 * Initialize the receive descriptor and receive job
4433 * descriptor rings.
4434 */
4435 if (sc->sc_type < WM_T_82543) {
4436 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4437 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4438 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4439 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4440 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4441 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4442
4443 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4444 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4445 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4446 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4447 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4448 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4449 } else {
4450 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4451 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4452 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4453 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4454 CSR_WRITE(sc, WMREG_EITR(0), 450);
4455 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4456 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4457 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4458 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4459 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4460 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4461 | RXDCTL_WTHRESH(1));
4462 } else {
4463 CSR_WRITE(sc, WMREG_RDH, 0);
4464 CSR_WRITE(sc, WMREG_RDT, 0);
4465 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4466 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4467 }
4468 }
4469 for (i = 0; i < WM_NRXDESC; i++) {
4470 rxs = &sc->sc_rxsoft[i];
4471 if (rxs->rxs_mbuf == NULL) {
4472 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4473 log(LOG_ERR, "%s: unable to allocate or map "
4474 "rx buffer %d, error = %d\n",
4475 device_xname(sc->sc_dev), i, error);
4476 /*
4477 * XXX Should attempt to run with fewer receive
4478 * XXX buffers instead of just failing.
4479 */
4480 wm_rxdrain(sc);
4481 goto out;
4482 }
4483 } else {
4484 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4485 WM_INIT_RXDESC(sc, i);
4486 /*
4487 * For 82575 and newer device, the RX descriptors
4488 * must be initialized after the setting of RCTL.EN in
4489 * wm_set_filter()
4490 */
4491 }
4492 }
4493 sc->sc_rxptr = 0;
4494 sc->sc_rxdiscard = 0;
4495 WM_RXCHAIN_RESET(sc);
4496
4497 /*
4498 * Clear out the VLAN table -- we don't use it (yet).
4499 */
4500 CSR_WRITE(sc, WMREG_VET, 0);
4501 if (sc->sc_type == WM_T_I350)
4502 trynum = 10; /* Due to hw errata */
4503 else
4504 trynum = 1;
4505 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4506 for (j = 0; j < trynum; j++)
4507 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4508
4509 /*
4510 * Set up flow-control parameters.
4511 *
4512 * XXX Values could probably stand some tuning.
4513 */
4514 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4515 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4516 && (sc->sc_type != WM_T_PCH2)) {
4517 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4518 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4519 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4520 }
4521
4522 sc->sc_fcrtl = FCRTL_DFLT;
4523 if (sc->sc_type < WM_T_82543) {
4524 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4525 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4526 } else {
4527 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4528 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4529 }
4530
4531 if (sc->sc_type == WM_T_80003)
4532 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4533 else
4534 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4535
4536 /* Writes the control register. */
4537 wm_set_vlan(sc);
4538
4539 if (sc->sc_flags & WM_F_HAS_MII) {
4540 int val;
4541
4542 switch (sc->sc_type) {
4543 case WM_T_80003:
4544 case WM_T_ICH8:
4545 case WM_T_ICH9:
4546 case WM_T_ICH10:
4547 case WM_T_PCH:
4548 case WM_T_PCH2:
4549 /*
4550 * Set the mac to wait the maximum time between each
4551 * iteration and increase the max iterations when
4552 * polling the phy; this fixes erroneous timeouts at
4553 * 10Mbps.
4554 */
4555 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4556 0xFFFF);
4557 val = wm_kmrn_readreg(sc,
4558 KUMCTRLSTA_OFFSET_INB_PARAM);
4559 val |= 0x3F;
4560 wm_kmrn_writereg(sc,
4561 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4562 break;
4563 default:
4564 break;
4565 }
4566
4567 if (sc->sc_type == WM_T_80003) {
4568 val = CSR_READ(sc, WMREG_CTRL_EXT);
4569 val &= ~CTRL_EXT_LINK_MODE_MASK;
4570 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4571
4572 /* Bypass RX and TX FIFO's */
4573 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4574 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4575 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4576 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4577 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4578 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4579 }
4580 }
4581 #if 0
4582 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4583 #endif
4584
4585 /*
4586 * Set up checksum offload parameters.
4587 */
4588 reg = CSR_READ(sc, WMREG_RXCSUM);
4589 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4590 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4591 reg |= RXCSUM_IPOFL;
4592 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4593 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4594 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4595 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4596 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4597
4598 /* Reset TBI's RXCFG count */
4599 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4600
4601 /*
4602 * Set up the interrupt registers.
4603 */
4604 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4605 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4606 ICR_RXO | ICR_RXT0;
4607 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4608 sc->sc_icr |= ICR_RXCFG;
4609 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4610
4611 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4612 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4613 || (sc->sc_type == WM_T_PCH2)) {
4614 reg = CSR_READ(sc, WMREG_KABGTXD);
4615 reg |= KABGTXD_BGSQLBIAS;
4616 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4617 }
4618
4619 /* Set up the inter-packet gap. */
4620 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4621
4622 if (sc->sc_type >= WM_T_82543) {
4623 /*
4624 * Set up the interrupt throttling register (units of 256ns)
4625 * Note that a footnote in Intel's documentation says this
4626 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4627 * or 10Mbit mode. Empirically, it appears to be the case
4628 * that that is also true for the 1024ns units of the other
4629 * interrupt-related timer registers -- so, really, we ought
4630 * to divide this value by 4 when the link speed is low.
4631 *
4632 * XXX implement this division at link speed change!
4633 */
4634
4635 /*
4636 * For N interrupts/sec, set this value to:
4637 * 1000000000 / (N * 256). Note that we set the
4638 * absolute and packet timer values to this value
4639 * divided by 4 to get "simple timer" behavior.
4640 */
4641
4642 sc->sc_itr = 1500; /* 2604 ints/sec */
4643 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4644 }
4645
4646 /* Set the VLAN ethernetype. */
4647 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4648
4649 /*
4650 * Set up the transmit control register; we start out with
4651 * a collision distance suitable for FDX, but update it whe
4652 * we resolve the media type.
4653 */
4654 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4655 | TCTL_CT(TX_COLLISION_THRESHOLD)
4656 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4657 if (sc->sc_type >= WM_T_82571)
4658 sc->sc_tctl |= TCTL_MULR;
4659 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4660
4661 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4662 /*
4663 * Write TDT after TCTL.EN is set.
4664 * See the document.
4665 */
4666 CSR_WRITE(sc, WMREG_TDT, 0);
4667 }
4668
4669 if (sc->sc_type == WM_T_80003) {
4670 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4671 reg &= ~TCTL_EXT_GCEX_MASK;
4672 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4673 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4674 }
4675
4676 /* Set the media. */
4677 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4678 goto out;
4679
4680 /* Configure for OS presence */
4681 wm_init_manageability(sc);
4682
4683 /*
4684 * Set up the receive control register; we actually program
4685 * the register when we set the receive filter. Use multicast
4686 * address offset type 0.
4687 *
4688 * Only the i82544 has the ability to strip the incoming
4689 * CRC, so we don't enable that feature.
4690 */
4691 sc->sc_mchash_type = 0;
4692 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4693 | RCTL_MO(sc->sc_mchash_type);
4694
4695 /*
4696 * The I350 has a bug where it always strips the CRC whether
4697 * asked to or not. So ask for stripped CRC here and cope in rxeof
4698 */
4699 if (sc->sc_type == WM_T_I350)
4700 sc->sc_rctl |= RCTL_SECRC;
4701
4702 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4703 && (ifp->if_mtu > ETHERMTU)) {
4704 sc->sc_rctl |= RCTL_LPE;
4705 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4706 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4707 }
4708
4709 if (MCLBYTES == 2048) {
4710 sc->sc_rctl |= RCTL_2k;
4711 } else {
4712 if (sc->sc_type >= WM_T_82543) {
4713 switch (MCLBYTES) {
4714 case 4096:
4715 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4716 break;
4717 case 8192:
4718 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4719 break;
4720 case 16384:
4721 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4722 break;
4723 default:
4724 panic("wm_init: MCLBYTES %d unsupported",
4725 MCLBYTES);
4726 break;
4727 }
4728 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4729 }
4730
4731 /* Set the receive filter. */
4732 wm_set_filter(sc);
4733
4734 /* On 575 and later set RDT only if RX enabled */
4735 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4736 for (i = 0; i < WM_NRXDESC; i++)
4737 WM_INIT_RXDESC(sc, i);
4738
4739 /* Start the one second link check clock. */
4740 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4741
4742 /* ...all done! */
4743 ifp->if_flags |= IFF_RUNNING;
4744 ifp->if_flags &= ~IFF_OACTIVE;
4745
4746 out:
4747 sc->sc_if_flags = ifp->if_flags;
4748 if (error)
4749 log(LOG_ERR, "%s: interface not running\n",
4750 device_xname(sc->sc_dev));
4751 return error;
4752 }
4753
4754 /*
4755 * wm_rxdrain:
4756 *
4757 * Drain the receive queue.
4758 */
4759 static void
4760 wm_rxdrain(struct wm_softc *sc)
4761 {
4762 struct wm_rxsoft *rxs;
4763 int i;
4764
4765 for (i = 0; i < WM_NRXDESC; i++) {
4766 rxs = &sc->sc_rxsoft[i];
4767 if (rxs->rxs_mbuf != NULL) {
4768 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4769 m_freem(rxs->rxs_mbuf);
4770 rxs->rxs_mbuf = NULL;
4771 }
4772 }
4773 }
4774
4775 /*
4776 * wm_stop: [ifnet interface function]
4777 *
4778 * Stop transmission on the interface.
4779 */
4780 static void
4781 wm_stop(struct ifnet *ifp, int disable)
4782 {
4783 struct wm_softc *sc = ifp->if_softc;
4784 struct wm_txsoft *txs;
4785 int i;
4786
4787 /* Stop the one second clock. */
4788 callout_stop(&sc->sc_tick_ch);
4789
4790 /* Stop the 82547 Tx FIFO stall check timer. */
4791 if (sc->sc_type == WM_T_82547)
4792 callout_stop(&sc->sc_txfifo_ch);
4793
4794 if (sc->sc_flags & WM_F_HAS_MII) {
4795 /* Down the MII. */
4796 mii_down(&sc->sc_mii);
4797 } else {
4798 #if 0
4799 /* Should we clear PHY's status properly? */
4800 wm_reset(sc);
4801 #endif
4802 }
4803
4804 /* Stop the transmit and receive processes. */
4805 CSR_WRITE(sc, WMREG_TCTL, 0);
4806 CSR_WRITE(sc, WMREG_RCTL, 0);
4807 sc->sc_rctl &= ~RCTL_EN;
4808
4809 /*
4810 * Clear the interrupt mask to ensure the device cannot assert its
4811 * interrupt line.
4812 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4813 * any currently pending or shared interrupt.
4814 */
4815 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4816 sc->sc_icr = 0;
4817
4818 /* Release any queued transmit buffers. */
4819 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4820 txs = &sc->sc_txsoft[i];
4821 if (txs->txs_mbuf != NULL) {
4822 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4823 m_freem(txs->txs_mbuf);
4824 txs->txs_mbuf = NULL;
4825 }
4826 }
4827
4828 /* Mark the interface as down and cancel the watchdog timer. */
4829 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4830 ifp->if_timer = 0;
4831
4832 if (disable)
4833 wm_rxdrain(sc);
4834
4835 #if 0 /* notyet */
4836 if (sc->sc_type >= WM_T_82544)
4837 CSR_WRITE(sc, WMREG_WUC, 0);
4838 #endif
4839 }
4840
4841 void
4842 wm_get_auto_rd_done(struct wm_softc *sc)
4843 {
4844 int i;
4845
4846 /* wait for eeprom to reload */
4847 switch (sc->sc_type) {
4848 case WM_T_82571:
4849 case WM_T_82572:
4850 case WM_T_82573:
4851 case WM_T_82574:
4852 case WM_T_82583:
4853 case WM_T_82575:
4854 case WM_T_82576:
4855 case WM_T_82580:
4856 case WM_T_82580ER:
4857 case WM_T_I350:
4858 case WM_T_80003:
4859 case WM_T_ICH8:
4860 case WM_T_ICH9:
4861 for (i = 0; i < 10; i++) {
4862 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4863 break;
4864 delay(1000);
4865 }
4866 if (i == 10) {
4867 log(LOG_ERR, "%s: auto read from eeprom failed to "
4868 "complete\n", device_xname(sc->sc_dev));
4869 }
4870 break;
4871 default:
4872 break;
4873 }
4874 }
4875
4876 void
4877 wm_lan_init_done(struct wm_softc *sc)
4878 {
4879 uint32_t reg = 0;
4880 int i;
4881
4882 /* wait for eeprom to reload */
4883 switch (sc->sc_type) {
4884 case WM_T_ICH10:
4885 case WM_T_PCH:
4886 case WM_T_PCH2:
4887 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4888 reg = CSR_READ(sc, WMREG_STATUS);
4889 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4890 break;
4891 delay(100);
4892 }
4893 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4894 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4895 "complete\n", device_xname(sc->sc_dev), __func__);
4896 }
4897 break;
4898 default:
4899 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4900 __func__);
4901 break;
4902 }
4903
4904 reg &= ~STATUS_LAN_INIT_DONE;
4905 CSR_WRITE(sc, WMREG_STATUS, reg);
4906 }
4907
4908 void
4909 wm_get_cfg_done(struct wm_softc *sc)
4910 {
4911 int mask;
4912 uint32_t reg;
4913 int i;
4914
4915 /* wait for eeprom to reload */
4916 switch (sc->sc_type) {
4917 case WM_T_82542_2_0:
4918 case WM_T_82542_2_1:
4919 /* null */
4920 break;
4921 case WM_T_82543:
4922 case WM_T_82544:
4923 case WM_T_82540:
4924 case WM_T_82545:
4925 case WM_T_82545_3:
4926 case WM_T_82546:
4927 case WM_T_82546_3:
4928 case WM_T_82541:
4929 case WM_T_82541_2:
4930 case WM_T_82547:
4931 case WM_T_82547_2:
4932 case WM_T_82573:
4933 case WM_T_82574:
4934 case WM_T_82583:
4935 /* generic */
4936 delay(10*1000);
4937 break;
4938 case WM_T_80003:
4939 case WM_T_82571:
4940 case WM_T_82572:
4941 case WM_T_82575:
4942 case WM_T_82576:
4943 case WM_T_82580:
4944 case WM_T_82580ER:
4945 case WM_T_I350:
4946 if (sc->sc_type == WM_T_82571) {
4947 /* Only 82571 shares port 0 */
4948 mask = EEMNGCTL_CFGDONE_0;
4949 } else
4950 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4951 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4952 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4953 break;
4954 delay(1000);
4955 }
4956 if (i >= WM_PHY_CFG_TIMEOUT) {
4957 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4958 device_xname(sc->sc_dev), __func__));
4959 }
4960 break;
4961 case WM_T_ICH8:
4962 case WM_T_ICH9:
4963 case WM_T_ICH10:
4964 case WM_T_PCH:
4965 case WM_T_PCH2:
4966 if (sc->sc_type >= WM_T_PCH) {
4967 reg = CSR_READ(sc, WMREG_STATUS);
4968 if ((reg & STATUS_PHYRA) != 0)
4969 CSR_WRITE(sc, WMREG_STATUS,
4970 reg & ~STATUS_PHYRA);
4971 }
4972 delay(10*1000);
4973 break;
4974 default:
4975 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4976 __func__);
4977 break;
4978 }
4979 }
4980
4981 /*
4982 * wm_acquire_eeprom:
4983 *
4984 * Perform the EEPROM handshake required on some chips.
4985 */
4986 static int
4987 wm_acquire_eeprom(struct wm_softc *sc)
4988 {
4989 uint32_t reg;
4990 int x;
4991 int ret = 0;
4992
4993 /* always success */
4994 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4995 return 0;
4996
4997 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4998 ret = wm_get_swfwhw_semaphore(sc);
4999 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5000 /* this will also do wm_get_swsm_semaphore() if needed */
5001 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5002 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5003 ret = wm_get_swsm_semaphore(sc);
5004 }
5005
5006 if (ret) {
5007 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5008 __func__);
5009 return 1;
5010 }
5011
5012 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5013 reg = CSR_READ(sc, WMREG_EECD);
5014
5015 /* Request EEPROM access. */
5016 reg |= EECD_EE_REQ;
5017 CSR_WRITE(sc, WMREG_EECD, reg);
5018
5019 /* ..and wait for it to be granted. */
5020 for (x = 0; x < 1000; x++) {
5021 reg = CSR_READ(sc, WMREG_EECD);
5022 if (reg & EECD_EE_GNT)
5023 break;
5024 delay(5);
5025 }
5026 if ((reg & EECD_EE_GNT) == 0) {
5027 aprint_error_dev(sc->sc_dev,
5028 "could not acquire EEPROM GNT\n");
5029 reg &= ~EECD_EE_REQ;
5030 CSR_WRITE(sc, WMREG_EECD, reg);
5031 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5032 wm_put_swfwhw_semaphore(sc);
5033 if (sc->sc_flags & WM_F_SWFW_SYNC)
5034 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5035 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5036 wm_put_swsm_semaphore(sc);
5037 return 1;
5038 }
5039 }
5040
5041 return 0;
5042 }
5043
5044 /*
5045 * wm_release_eeprom:
5046 *
5047 * Release the EEPROM mutex.
5048 */
5049 static void
5050 wm_release_eeprom(struct wm_softc *sc)
5051 {
5052 uint32_t reg;
5053
5054 /* always success */
5055 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5056 return;
5057
5058 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5059 reg = CSR_READ(sc, WMREG_EECD);
5060 reg &= ~EECD_EE_REQ;
5061 CSR_WRITE(sc, WMREG_EECD, reg);
5062 }
5063
5064 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5065 wm_put_swfwhw_semaphore(sc);
5066 if (sc->sc_flags & WM_F_SWFW_SYNC)
5067 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5068 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5069 wm_put_swsm_semaphore(sc);
5070 }
5071
5072 /*
5073 * wm_eeprom_sendbits:
5074 *
5075 * Send a series of bits to the EEPROM.
5076 */
5077 static void
5078 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5079 {
5080 uint32_t reg;
5081 int x;
5082
5083 reg = CSR_READ(sc, WMREG_EECD);
5084
5085 for (x = nbits; x > 0; x--) {
5086 if (bits & (1U << (x - 1)))
5087 reg |= EECD_DI;
5088 else
5089 reg &= ~EECD_DI;
5090 CSR_WRITE(sc, WMREG_EECD, reg);
5091 delay(2);
5092 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5093 delay(2);
5094 CSR_WRITE(sc, WMREG_EECD, reg);
5095 delay(2);
5096 }
5097 }
5098
5099 /*
5100 * wm_eeprom_recvbits:
5101 *
5102 * Receive a series of bits from the EEPROM.
5103 */
5104 static void
5105 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5106 {
5107 uint32_t reg, val;
5108 int x;
5109
5110 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5111
5112 val = 0;
5113 for (x = nbits; x > 0; x--) {
5114 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5115 delay(2);
5116 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5117 val |= (1U << (x - 1));
5118 CSR_WRITE(sc, WMREG_EECD, reg);
5119 delay(2);
5120 }
5121 *valp = val;
5122 }
5123
5124 /*
5125 * wm_read_eeprom_uwire:
5126 *
5127 * Read a word from the EEPROM using the MicroWire protocol.
5128 */
5129 static int
5130 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5131 {
5132 uint32_t reg, val;
5133 int i;
5134
5135 for (i = 0; i < wordcnt; i++) {
5136 /* Clear SK and DI. */
5137 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5138 CSR_WRITE(sc, WMREG_EECD, reg);
5139
5140 /*
5141 * XXX: workaround for a bug in qemu-0.12.x and prior
5142 * and Xen.
5143 *
5144 * We use this workaround only for 82540 because qemu's
5145 * e1000 act as 82540.
5146 */
5147 if (sc->sc_type == WM_T_82540) {
5148 reg |= EECD_SK;
5149 CSR_WRITE(sc, WMREG_EECD, reg);
5150 reg &= ~EECD_SK;
5151 CSR_WRITE(sc, WMREG_EECD, reg);
5152 delay(2);
5153 }
5154 /* XXX: end of workaround */
5155
5156 /* Set CHIP SELECT. */
5157 reg |= EECD_CS;
5158 CSR_WRITE(sc, WMREG_EECD, reg);
5159 delay(2);
5160
5161 /* Shift in the READ command. */
5162 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5163
5164 /* Shift in address. */
5165 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5166
5167 /* Shift out the data. */
5168 wm_eeprom_recvbits(sc, &val, 16);
5169 data[i] = val & 0xffff;
5170
5171 /* Clear CHIP SELECT. */
5172 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5173 CSR_WRITE(sc, WMREG_EECD, reg);
5174 delay(2);
5175 }
5176
5177 return 0;
5178 }
5179
5180 /*
5181 * wm_spi_eeprom_ready:
5182 *
5183 * Wait for a SPI EEPROM to be ready for commands.
5184 */
5185 static int
5186 wm_spi_eeprom_ready(struct wm_softc *sc)
5187 {
5188 uint32_t val;
5189 int usec;
5190
5191 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5192 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5193 wm_eeprom_recvbits(sc, &val, 8);
5194 if ((val & SPI_SR_RDY) == 0)
5195 break;
5196 }
5197 if (usec >= SPI_MAX_RETRIES) {
5198 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5199 return 1;
5200 }
5201 return 0;
5202 }
5203
5204 /*
5205 * wm_read_eeprom_spi:
5206 *
5207 * Read a work from the EEPROM using the SPI protocol.
5208 */
5209 static int
5210 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5211 {
5212 uint32_t reg, val;
5213 int i;
5214 uint8_t opc;
5215
5216 /* Clear SK and CS. */
5217 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5218 CSR_WRITE(sc, WMREG_EECD, reg);
5219 delay(2);
5220
5221 if (wm_spi_eeprom_ready(sc))
5222 return 1;
5223
5224 /* Toggle CS to flush commands. */
5225 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5226 delay(2);
5227 CSR_WRITE(sc, WMREG_EECD, reg);
5228 delay(2);
5229
5230 opc = SPI_OPC_READ;
5231 if (sc->sc_ee_addrbits == 8 && word >= 128)
5232 opc |= SPI_OPC_A8;
5233
5234 wm_eeprom_sendbits(sc, opc, 8);
5235 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5236
5237 for (i = 0; i < wordcnt; i++) {
5238 wm_eeprom_recvbits(sc, &val, 16);
5239 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5240 }
5241
5242 /* Raise CS and clear SK. */
5243 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5244 CSR_WRITE(sc, WMREG_EECD, reg);
5245 delay(2);
5246
5247 return 0;
5248 }
5249
5250 #define EEPROM_CHECKSUM 0xBABA
5251 #define EEPROM_SIZE 0x0040
5252
5253 /*
5254 * wm_validate_eeprom_checksum
5255 *
5256 * The checksum is defined as the sum of the first 64 (16 bit) words.
5257 */
5258 static int
5259 wm_validate_eeprom_checksum(struct wm_softc *sc)
5260 {
5261 uint16_t checksum;
5262 uint16_t eeprom_data;
5263 int i;
5264
5265 checksum = 0;
5266
5267 #ifdef WM_DEBUG
5268 /* Dump EEPROM image for debug */
5269 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5270 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5271 || (sc->sc_type == WM_T_PCH2)) {
5272 wm_read_eeprom(sc, 0x19, 1, &eeprom_data);
5273 if ((eeprom_data & 0x40) == 0) {
5274 DPRINTF(WM_DEBUG_NVM,("%s: NVM need to be updated\n",
5275 device_xname(sc->sc_dev)));
5276 }
5277 }
5278
5279 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5280 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5281 for (i = 0; i < EEPROM_SIZE; i++) {
5282 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5283 printf("XX ");
5284 else
5285 printf("%04x ", eeprom_data);
5286 if (i % 8 == 7)
5287 printf("\n");
5288 }
5289 }
5290
5291 #endif /* WM_DEBUG */
5292
5293 for (i = 0; i < EEPROM_SIZE; i++) {
5294 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5295 return 1;
5296 checksum += eeprom_data;
5297 }
5298
5299 if (checksum != (uint16_t) EEPROM_CHECKSUM)
5300 return 1;
5301
5302 return 0;
5303 }
5304
5305 /*
5306 * wm_read_eeprom:
5307 *
5308 * Read data from the serial EEPROM.
5309 */
5310 static int
5311 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5312 {
5313 int rv;
5314
5315 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5316 return 1;
5317
5318 if (wm_acquire_eeprom(sc))
5319 return 1;
5320
5321 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5322 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5323 || (sc->sc_type == WM_T_PCH2))
5324 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5325 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5326 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5327 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5328 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5329 else
5330 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5331
5332 wm_release_eeprom(sc);
5333 return rv;
5334 }
5335
5336 static int
5337 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5338 uint16_t *data)
5339 {
5340 int i, eerd = 0;
5341 int error = 0;
5342
5343 for (i = 0; i < wordcnt; i++) {
5344 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5345
5346 CSR_WRITE(sc, WMREG_EERD, eerd);
5347 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5348 if (error != 0)
5349 break;
5350
5351 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5352 }
5353
5354 return error;
5355 }
5356
5357 static int
5358 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5359 {
5360 uint32_t attempts = 100000;
5361 uint32_t i, reg = 0;
5362 int32_t done = -1;
5363
5364 for (i = 0; i < attempts; i++) {
5365 reg = CSR_READ(sc, rw);
5366
5367 if (reg & EERD_DONE) {
5368 done = 0;
5369 break;
5370 }
5371 delay(5);
5372 }
5373
5374 return done;
5375 }
5376
5377 static int
5378 wm_check_alt_mac_addr(struct wm_softc *sc)
5379 {
5380 uint16_t myea[ETHER_ADDR_LEN / 2];
5381 uint16_t offset = EEPROM_OFF_MACADDR;
5382
5383 /* Try to read alternative MAC address pointer */
5384 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5385 return -1;
5386
5387 /* Check pointer */
5388 if (offset == 0xffff)
5389 return -1;
5390
5391 /*
5392 * Check whether alternative MAC address is valid or not.
5393 * Some cards have non 0xffff pointer but those don't use
5394 * alternative MAC address in reality.
5395 *
5396 * Check whether the broadcast bit is set or not.
5397 */
5398 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5399 if (((myea[0] & 0xff) & 0x01) == 0)
5400 return 0; /* found! */
5401
5402 /* not found */
5403 return -1;
5404 }
5405
5406 static int
5407 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5408 {
5409 uint16_t myea[ETHER_ADDR_LEN / 2];
5410 uint16_t offset = EEPROM_OFF_MACADDR;
5411 int do_invert = 0;
5412
5413 switch (sc->sc_type) {
5414 case WM_T_82580:
5415 case WM_T_82580ER:
5416 case WM_T_I350:
5417 switch (sc->sc_funcid) {
5418 case 0:
5419 /* default value (== EEPROM_OFF_MACADDR) */
5420 break;
5421 case 1:
5422 offset = EEPROM_OFF_LAN1;
5423 break;
5424 case 2:
5425 offset = EEPROM_OFF_LAN2;
5426 break;
5427 case 3:
5428 offset = EEPROM_OFF_LAN3;
5429 break;
5430 default:
5431 goto bad;
5432 /* NOTREACHED */
5433 break;
5434 }
5435 break;
5436 case WM_T_82571:
5437 case WM_T_82575:
5438 case WM_T_82576:
5439 case WM_T_80003:
5440 if (wm_check_alt_mac_addr(sc) != 0) {
5441 /* reset the offset to LAN0 */
5442 offset = EEPROM_OFF_MACADDR;
5443 if ((sc->sc_funcid & 0x01) == 1)
5444 do_invert = 1;
5445 goto do_read;
5446 }
5447 switch (sc->sc_funcid) {
5448 case 0:
5449 /*
5450 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5451 * itself.
5452 */
5453 break;
5454 case 1:
5455 offset += EEPROM_OFF_MACADDR_LAN1;
5456 break;
5457 case 2:
5458 offset += EEPROM_OFF_MACADDR_LAN2;
5459 break;
5460 case 3:
5461 offset += EEPROM_OFF_MACADDR_LAN3;
5462 break;
5463 default:
5464 goto bad;
5465 /* NOTREACHED */
5466 break;
5467 }
5468 break;
5469 default:
5470 if ((sc->sc_funcid & 0x01) == 1)
5471 do_invert = 1;
5472 break;
5473 }
5474
5475 do_read:
5476 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5477 myea) != 0) {
5478 goto bad;
5479 }
5480
5481 enaddr[0] = myea[0] & 0xff;
5482 enaddr[1] = myea[0] >> 8;
5483 enaddr[2] = myea[1] & 0xff;
5484 enaddr[3] = myea[1] >> 8;
5485 enaddr[4] = myea[2] & 0xff;
5486 enaddr[5] = myea[2] >> 8;
5487
5488 /*
5489 * Toggle the LSB of the MAC address on the second port
5490 * of some dual port cards.
5491 */
5492 if (do_invert != 0)
5493 enaddr[5] ^= 1;
5494
5495 return 0;
5496
5497 bad:
5498 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5499
5500 return -1;
5501 }
5502
5503 /*
5504 * wm_add_rxbuf:
5505 *
5506 * Add a receive buffer to the indiciated descriptor.
5507 */
5508 static int
5509 wm_add_rxbuf(struct wm_softc *sc, int idx)
5510 {
5511 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5512 struct mbuf *m;
5513 int error;
5514
5515 MGETHDR(m, M_DONTWAIT, MT_DATA);
5516 if (m == NULL)
5517 return ENOBUFS;
5518
5519 MCLGET(m, M_DONTWAIT);
5520 if ((m->m_flags & M_EXT) == 0) {
5521 m_freem(m);
5522 return ENOBUFS;
5523 }
5524
5525 if (rxs->rxs_mbuf != NULL)
5526 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5527
5528 rxs->rxs_mbuf = m;
5529
5530 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5531 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5532 BUS_DMA_READ|BUS_DMA_NOWAIT);
5533 if (error) {
5534 /* XXX XXX XXX */
5535 aprint_error_dev(sc->sc_dev,
5536 "unable to load rx DMA map %d, error = %d\n",
5537 idx, error);
5538 panic("wm_add_rxbuf");
5539 }
5540
5541 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5542 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5543
5544 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5545 if ((sc->sc_rctl & RCTL_EN) != 0)
5546 WM_INIT_RXDESC(sc, idx);
5547 } else
5548 WM_INIT_RXDESC(sc, idx);
5549
5550 return 0;
5551 }
5552
5553 /*
5554 * wm_set_ral:
5555 *
5556 * Set an entery in the receive address list.
5557 */
5558 static void
5559 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5560 {
5561 uint32_t ral_lo, ral_hi;
5562
5563 if (enaddr != NULL) {
5564 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5565 (enaddr[3] << 24);
5566 ral_hi = enaddr[4] | (enaddr[5] << 8);
5567 ral_hi |= RAL_AV;
5568 } else {
5569 ral_lo = 0;
5570 ral_hi = 0;
5571 }
5572
5573 if (sc->sc_type >= WM_T_82544) {
5574 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5575 ral_lo);
5576 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5577 ral_hi);
5578 } else {
5579 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5580 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5581 }
5582 }
5583
5584 /*
5585 * wm_mchash:
5586 *
5587 * Compute the hash of the multicast address for the 4096-bit
5588 * multicast filter.
5589 */
5590 static uint32_t
5591 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5592 {
5593 static const int lo_shift[4] = { 4, 3, 2, 0 };
5594 static const int hi_shift[4] = { 4, 5, 6, 8 };
5595 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5596 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5597 uint32_t hash;
5598
5599 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5600 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5601 || (sc->sc_type == WM_T_PCH2)) {
5602 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5603 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5604 return (hash & 0x3ff);
5605 }
5606 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5607 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5608
5609 return (hash & 0xfff);
5610 }
5611
5612 /*
5613 * wm_set_filter:
5614 *
5615 * Set up the receive filter.
5616 */
5617 static void
5618 wm_set_filter(struct wm_softc *sc)
5619 {
5620 struct ethercom *ec = &sc->sc_ethercom;
5621 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5622 struct ether_multi *enm;
5623 struct ether_multistep step;
5624 bus_addr_t mta_reg;
5625 uint32_t hash, reg, bit;
5626 int i, size;
5627
5628 if (sc->sc_type >= WM_T_82544)
5629 mta_reg = WMREG_CORDOVA_MTA;
5630 else
5631 mta_reg = WMREG_MTA;
5632
5633 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5634
5635 if (ifp->if_flags & IFF_BROADCAST)
5636 sc->sc_rctl |= RCTL_BAM;
5637 if (ifp->if_flags & IFF_PROMISC) {
5638 sc->sc_rctl |= RCTL_UPE;
5639 goto allmulti;
5640 }
5641
5642 /*
5643 * Set the station address in the first RAL slot, and
5644 * clear the remaining slots.
5645 */
5646 if (sc->sc_type == WM_T_ICH8)
5647 size = WM_RAL_TABSIZE_ICH8 -1;
5648 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5649 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
5650 size = WM_RAL_TABSIZE_ICH8;
5651 else if (sc->sc_type == WM_T_82575)
5652 size = WM_RAL_TABSIZE_82575;
5653 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5654 size = WM_RAL_TABSIZE_82576;
5655 else if (sc->sc_type == WM_T_I350)
5656 size = WM_RAL_TABSIZE_I350;
5657 else
5658 size = WM_RAL_TABSIZE;
5659 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5660 for (i = 1; i < size; i++)
5661 wm_set_ral(sc, NULL, i);
5662
5663 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5664 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5665 || (sc->sc_type == WM_T_PCH2))
5666 size = WM_ICH8_MC_TABSIZE;
5667 else
5668 size = WM_MC_TABSIZE;
5669 /* Clear out the multicast table. */
5670 for (i = 0; i < size; i++)
5671 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5672
5673 ETHER_FIRST_MULTI(step, ec, enm);
5674 while (enm != NULL) {
5675 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5676 /*
5677 * We must listen to a range of multicast addresses.
5678 * For now, just accept all multicasts, rather than
5679 * trying to set only those filter bits needed to match
5680 * the range. (At this time, the only use of address
5681 * ranges is for IP multicast routing, for which the
5682 * range is big enough to require all bits set.)
5683 */
5684 goto allmulti;
5685 }
5686
5687 hash = wm_mchash(sc, enm->enm_addrlo);
5688
5689 reg = (hash >> 5);
5690 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5691 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5692 || (sc->sc_type == WM_T_PCH2))
5693 reg &= 0x1f;
5694 else
5695 reg &= 0x7f;
5696 bit = hash & 0x1f;
5697
5698 hash = CSR_READ(sc, mta_reg + (reg << 2));
5699 hash |= 1U << bit;
5700
5701 /* XXX Hardware bug?? */
5702 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5703 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5704 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5705 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5706 } else
5707 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5708
5709 ETHER_NEXT_MULTI(step, enm);
5710 }
5711
5712 ifp->if_flags &= ~IFF_ALLMULTI;
5713 goto setit;
5714
5715 allmulti:
5716 ifp->if_flags |= IFF_ALLMULTI;
5717 sc->sc_rctl |= RCTL_MPE;
5718
5719 setit:
5720 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5721 }
5722
5723 /*
5724 * wm_tbi_mediainit:
5725 *
5726 * Initialize media for use on 1000BASE-X devices.
5727 */
5728 static void
5729 wm_tbi_mediainit(struct wm_softc *sc)
5730 {
5731 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5732 const char *sep = "";
5733
5734 if (sc->sc_type < WM_T_82543)
5735 sc->sc_tipg = TIPG_WM_DFLT;
5736 else
5737 sc->sc_tipg = TIPG_LG_DFLT;
5738
5739 sc->sc_tbi_anegticks = 5;
5740
5741 /* Initialize our media structures */
5742 sc->sc_mii.mii_ifp = ifp;
5743
5744 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5745 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5746 wm_tbi_mediastatus);
5747
5748 /*
5749 * SWD Pins:
5750 *
5751 * 0 = Link LED (output)
5752 * 1 = Loss Of Signal (input)
5753 */
5754 sc->sc_ctrl |= CTRL_SWDPIO(0);
5755 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5756
5757 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5758
5759 #define ADD(ss, mm, dd) \
5760 do { \
5761 aprint_normal("%s%s", sep, ss); \
5762 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5763 sep = ", "; \
5764 } while (/*CONSTCOND*/0)
5765
5766 aprint_normal_dev(sc->sc_dev, "");
5767 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5768 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5769 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5770 aprint_normal("\n");
5771
5772 #undef ADD
5773
5774 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5775 }
5776
5777 /*
5778 * wm_tbi_mediastatus: [ifmedia interface function]
5779 *
5780 * Get the current interface media status on a 1000BASE-X device.
5781 */
5782 static void
5783 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5784 {
5785 struct wm_softc *sc = ifp->if_softc;
5786 uint32_t ctrl, status;
5787
5788 ifmr->ifm_status = IFM_AVALID;
5789 ifmr->ifm_active = IFM_ETHER;
5790
5791 status = CSR_READ(sc, WMREG_STATUS);
5792 if ((status & STATUS_LU) == 0) {
5793 ifmr->ifm_active |= IFM_NONE;
5794 return;
5795 }
5796
5797 ifmr->ifm_status |= IFM_ACTIVE;
5798 ifmr->ifm_active |= IFM_1000_SX;
5799 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5800 ifmr->ifm_active |= IFM_FDX;
5801 ctrl = CSR_READ(sc, WMREG_CTRL);
5802 if (ctrl & CTRL_RFCE)
5803 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5804 if (ctrl & CTRL_TFCE)
5805 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5806 }
5807
5808 /*
5809 * wm_tbi_mediachange: [ifmedia interface function]
5810 *
5811 * Set hardware to newly-selected media on a 1000BASE-X device.
5812 */
5813 static int
5814 wm_tbi_mediachange(struct ifnet *ifp)
5815 {
5816 struct wm_softc *sc = ifp->if_softc;
5817 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5818 uint32_t status;
5819 int i;
5820
5821 sc->sc_txcw = 0;
5822 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5823 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5824 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5825 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5826 sc->sc_txcw |= TXCW_ANE;
5827 } else {
5828 /*
5829 * If autonegotiation is turned off, force link up and turn on
5830 * full duplex
5831 */
5832 sc->sc_txcw &= ~TXCW_ANE;
5833 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5834 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5835 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5836 delay(1000);
5837 }
5838
5839 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5840 device_xname(sc->sc_dev),sc->sc_txcw));
5841 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5842 delay(10000);
5843
5844 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5845 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5846
5847 /*
5848 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5849 * optics detect a signal, 0 if they don't.
5850 */
5851 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5852 /* Have signal; wait for the link to come up. */
5853
5854 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5855 /*
5856 * Reset the link, and let autonegotiation do its thing
5857 */
5858 sc->sc_ctrl |= CTRL_LRST;
5859 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5860 delay(1000);
5861 sc->sc_ctrl &= ~CTRL_LRST;
5862 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5863 delay(1000);
5864 }
5865
5866 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5867 delay(10000);
5868 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5869 break;
5870 }
5871
5872 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5873 device_xname(sc->sc_dev),i));
5874
5875 status = CSR_READ(sc, WMREG_STATUS);
5876 DPRINTF(WM_DEBUG_LINK,
5877 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5878 device_xname(sc->sc_dev),status, STATUS_LU));
5879 if (status & STATUS_LU) {
5880 /* Link is up. */
5881 DPRINTF(WM_DEBUG_LINK,
5882 ("%s: LINK: set media -> link up %s\n",
5883 device_xname(sc->sc_dev),
5884 (status & STATUS_FD) ? "FDX" : "HDX"));
5885
5886 /*
5887 * NOTE: CTRL will update TFCE and RFCE automatically,
5888 * so we should update sc->sc_ctrl
5889 */
5890 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5891 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5892 sc->sc_fcrtl &= ~FCRTL_XONE;
5893 if (status & STATUS_FD)
5894 sc->sc_tctl |=
5895 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5896 else
5897 sc->sc_tctl |=
5898 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5899 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5900 sc->sc_fcrtl |= FCRTL_XONE;
5901 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5902 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5903 WMREG_OLD_FCRTL : WMREG_FCRTL,
5904 sc->sc_fcrtl);
5905 sc->sc_tbi_linkup = 1;
5906 } else {
5907 if (i == WM_LINKUP_TIMEOUT)
5908 wm_check_for_link(sc);
5909 /* Link is down. */
5910 DPRINTF(WM_DEBUG_LINK,
5911 ("%s: LINK: set media -> link down\n",
5912 device_xname(sc->sc_dev)));
5913 sc->sc_tbi_linkup = 0;
5914 }
5915 } else {
5916 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5917 device_xname(sc->sc_dev)));
5918 sc->sc_tbi_linkup = 0;
5919 }
5920
5921 wm_tbi_set_linkled(sc);
5922
5923 return 0;
5924 }
5925
5926 /*
5927 * wm_tbi_set_linkled:
5928 *
5929 * Update the link LED on 1000BASE-X devices.
5930 */
5931 static void
5932 wm_tbi_set_linkled(struct wm_softc *sc)
5933 {
5934
5935 if (sc->sc_tbi_linkup)
5936 sc->sc_ctrl |= CTRL_SWDPIN(0);
5937 else
5938 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5939
5940 /* 82540 or newer devices are active low */
5941 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5942
5943 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5944 }
5945
5946 /*
5947 * wm_tbi_check_link:
5948 *
5949 * Check the link on 1000BASE-X devices.
5950 */
5951 static void
5952 wm_tbi_check_link(struct wm_softc *sc)
5953 {
5954 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5955 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5956 uint32_t rxcw, ctrl, status;
5957
5958 status = CSR_READ(sc, WMREG_STATUS);
5959
5960 rxcw = CSR_READ(sc, WMREG_RXCW);
5961 ctrl = CSR_READ(sc, WMREG_CTRL);
5962
5963 /* set link status */
5964 if ((status & STATUS_LU) == 0) {
5965 DPRINTF(WM_DEBUG_LINK,
5966 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5967 sc->sc_tbi_linkup = 0;
5968 } else if (sc->sc_tbi_linkup == 0) {
5969 DPRINTF(WM_DEBUG_LINK,
5970 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5971 (status & STATUS_FD) ? "FDX" : "HDX"));
5972 sc->sc_tbi_linkup = 1;
5973 }
5974
5975 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5976 && ((status & STATUS_LU) == 0)) {
5977 sc->sc_tbi_linkup = 0;
5978 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5979 /* RXCFG storm! */
5980 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5981 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5982 wm_init(ifp);
5983 ifp->if_start(ifp);
5984 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5985 /* If the timer expired, retry autonegotiation */
5986 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5987 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5988 sc->sc_tbi_ticks = 0;
5989 /*
5990 * Reset the link, and let autonegotiation do
5991 * its thing
5992 */
5993 sc->sc_ctrl |= CTRL_LRST;
5994 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5995 delay(1000);
5996 sc->sc_ctrl &= ~CTRL_LRST;
5997 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5998 delay(1000);
5999 CSR_WRITE(sc, WMREG_TXCW,
6000 sc->sc_txcw & ~TXCW_ANE);
6001 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6002 }
6003 }
6004 }
6005
6006 wm_tbi_set_linkled(sc);
6007 }
6008
6009 /*
6010 * wm_gmii_reset:
6011 *
6012 * Reset the PHY.
6013 */
6014 static void
6015 wm_gmii_reset(struct wm_softc *sc)
6016 {
6017 uint32_t reg;
6018 int rv;
6019
6020 /* get phy semaphore */
6021 switch (sc->sc_type) {
6022 case WM_T_82571:
6023 case WM_T_82572:
6024 case WM_T_82573:
6025 case WM_T_82574:
6026 case WM_T_82583:
6027 /* XXX should get sw semaphore, too */
6028 rv = wm_get_swsm_semaphore(sc);
6029 break;
6030 case WM_T_82575:
6031 case WM_T_82576:
6032 case WM_T_82580:
6033 case WM_T_82580ER:
6034 case WM_T_I350:
6035 case WM_T_80003:
6036 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6037 break;
6038 case WM_T_ICH8:
6039 case WM_T_ICH9:
6040 case WM_T_ICH10:
6041 case WM_T_PCH:
6042 case WM_T_PCH2:
6043 rv = wm_get_swfwhw_semaphore(sc);
6044 break;
6045 default:
6046 /* nothing to do*/
6047 rv = 0;
6048 break;
6049 }
6050 if (rv != 0) {
6051 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6052 __func__);
6053 return;
6054 }
6055
6056 switch (sc->sc_type) {
6057 case WM_T_82542_2_0:
6058 case WM_T_82542_2_1:
6059 /* null */
6060 break;
6061 case WM_T_82543:
6062 /*
6063 * With 82543, we need to force speed and duplex on the MAC
6064 * equal to what the PHY speed and duplex configuration is.
6065 * In addition, we need to perform a hardware reset on the PHY
6066 * to take it out of reset.
6067 */
6068 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6069 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6070
6071 /* The PHY reset pin is active-low. */
6072 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6073 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6074 CTRL_EXT_SWDPIN(4));
6075 reg |= CTRL_EXT_SWDPIO(4);
6076
6077 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6078 delay(10*1000);
6079
6080 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6081 delay(150);
6082 #if 0
6083 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6084 #endif
6085 delay(20*1000); /* XXX extra delay to get PHY ID? */
6086 break;
6087 case WM_T_82544: /* reset 10000us */
6088 case WM_T_82540:
6089 case WM_T_82545:
6090 case WM_T_82545_3:
6091 case WM_T_82546:
6092 case WM_T_82546_3:
6093 case WM_T_82541:
6094 case WM_T_82541_2:
6095 case WM_T_82547:
6096 case WM_T_82547_2:
6097 case WM_T_82571: /* reset 100us */
6098 case WM_T_82572:
6099 case WM_T_82573:
6100 case WM_T_82574:
6101 case WM_T_82575:
6102 case WM_T_82576:
6103 case WM_T_82580:
6104 case WM_T_82580ER:
6105 case WM_T_I350:
6106 case WM_T_82583:
6107 case WM_T_80003:
6108 /* generic reset */
6109 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6110 delay(20000);
6111 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6112 delay(20000);
6113
6114 if ((sc->sc_type == WM_T_82541)
6115 || (sc->sc_type == WM_T_82541_2)
6116 || (sc->sc_type == WM_T_82547)
6117 || (sc->sc_type == WM_T_82547_2)) {
6118 /* workaround for igp are done in igp_reset() */
6119 /* XXX add code to set LED after phy reset */
6120 }
6121 break;
6122 case WM_T_ICH8:
6123 case WM_T_ICH9:
6124 case WM_T_ICH10:
6125 case WM_T_PCH:
6126 case WM_T_PCH2:
6127 /* generic reset */
6128 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6129 delay(100);
6130 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6131 delay(150);
6132 break;
6133 default:
6134 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6135 __func__);
6136 break;
6137 }
6138
6139 /* release PHY semaphore */
6140 switch (sc->sc_type) {
6141 case WM_T_82571:
6142 case WM_T_82572:
6143 case WM_T_82573:
6144 case WM_T_82574:
6145 case WM_T_82583:
6146 /* XXX should put sw semaphore, too */
6147 wm_put_swsm_semaphore(sc);
6148 break;
6149 case WM_T_82575:
6150 case WM_T_82576:
6151 case WM_T_82580:
6152 case WM_T_82580ER:
6153 case WM_T_I350:
6154 case WM_T_80003:
6155 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6156 break;
6157 case WM_T_ICH8:
6158 case WM_T_ICH9:
6159 case WM_T_ICH10:
6160 case WM_T_PCH:
6161 case WM_T_PCH2:
6162 wm_put_swfwhw_semaphore(sc);
6163 break;
6164 default:
6165 /* nothing to do*/
6166 rv = 0;
6167 break;
6168 }
6169
6170 /* get_cfg_done */
6171 wm_get_cfg_done(sc);
6172
6173 /* extra setup */
6174 switch (sc->sc_type) {
6175 case WM_T_82542_2_0:
6176 case WM_T_82542_2_1:
6177 case WM_T_82543:
6178 case WM_T_82544:
6179 case WM_T_82540:
6180 case WM_T_82545:
6181 case WM_T_82545_3:
6182 case WM_T_82546:
6183 case WM_T_82546_3:
6184 case WM_T_82541_2:
6185 case WM_T_82547_2:
6186 case WM_T_82571:
6187 case WM_T_82572:
6188 case WM_T_82573:
6189 case WM_T_82574:
6190 case WM_T_82575:
6191 case WM_T_82576:
6192 case WM_T_82580:
6193 case WM_T_82580ER:
6194 case WM_T_I350:
6195 case WM_T_82583:
6196 case WM_T_80003:
6197 /* null */
6198 break;
6199 case WM_T_82541:
6200 case WM_T_82547:
6201 /* XXX Configure actively LED after PHY reset */
6202 break;
6203 case WM_T_ICH8:
6204 case WM_T_ICH9:
6205 case WM_T_ICH10:
6206 case WM_T_PCH:
6207 case WM_T_PCH2:
6208 /* Allow time for h/w to get to a quiescent state afer reset */
6209 delay(10*1000);
6210
6211 if (sc->sc_type == WM_T_PCH)
6212 wm_hv_phy_workaround_ich8lan(sc);
6213
6214 if (sc->sc_type == WM_T_PCH2)
6215 wm_lv_phy_workaround_ich8lan(sc);
6216
6217 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6218 /*
6219 * dummy read to clear the phy wakeup bit after lcd
6220 * reset
6221 */
6222 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6223 }
6224
6225 /*
6226 * XXX Configure the LCD with th extended configuration region
6227 * in NVM
6228 */
6229
6230 /* Configure the LCD with the OEM bits in NVM */
6231 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6232 /*
6233 * Disable LPLU.
6234 * XXX It seems that 82567 has LPLU, too.
6235 */
6236 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6237 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6238 reg |= HV_OEM_BITS_ANEGNOW;
6239 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6240 }
6241 break;
6242 default:
6243 panic("%s: unknown type\n", __func__);
6244 break;
6245 }
6246 }
6247
6248 /*
6249 * wm_gmii_mediainit:
6250 *
6251 * Initialize media for use on 1000BASE-T devices.
6252 */
6253 static void
6254 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6255 {
6256 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6257 struct mii_data *mii = &sc->sc_mii;
6258
6259 /* We have MII. */
6260 sc->sc_flags |= WM_F_HAS_MII;
6261
6262 if (sc->sc_type == WM_T_80003)
6263 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6264 else
6265 sc->sc_tipg = TIPG_1000T_DFLT;
6266
6267 /*
6268 * Let the chip set speed/duplex on its own based on
6269 * signals from the PHY.
6270 * XXXbouyer - I'm not sure this is right for the 80003,
6271 * the em driver only sets CTRL_SLU here - but it seems to work.
6272 */
6273 sc->sc_ctrl |= CTRL_SLU;
6274 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6275
6276 /* Initialize our media structures and probe the GMII. */
6277 mii->mii_ifp = ifp;
6278
6279 /*
6280 * Determine the PHY access method.
6281 *
6282 * For SGMII, use SGMII specific method.
6283 *
6284 * For some devices, we can determine the PHY access method
6285 * from sc_type.
6286 *
6287 * For ICH8 variants, it's difficult to detemine the PHY access
6288 * method by sc_type, so use the PCI product ID for some devices.
6289 * For other ICH8 variants, try to use igp's method. If the PHY
6290 * can't detect, then use bm's method.
6291 */
6292 switch (prodid) {
6293 case PCI_PRODUCT_INTEL_PCH_M_LM:
6294 case PCI_PRODUCT_INTEL_PCH_M_LC:
6295 /* 82577 */
6296 sc->sc_phytype = WMPHY_82577;
6297 mii->mii_readreg = wm_gmii_hv_readreg;
6298 mii->mii_writereg = wm_gmii_hv_writereg;
6299 break;
6300 case PCI_PRODUCT_INTEL_PCH_D_DM:
6301 case PCI_PRODUCT_INTEL_PCH_D_DC:
6302 /* 82578 */
6303 sc->sc_phytype = WMPHY_82578;
6304 mii->mii_readreg = wm_gmii_hv_readreg;
6305 mii->mii_writereg = wm_gmii_hv_writereg;
6306 break;
6307 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6308 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6309 /* 82579 */
6310 sc->sc_phytype = WMPHY_82579;
6311 mii->mii_readreg = wm_gmii_hv_readreg;
6312 mii->mii_writereg = wm_gmii_hv_writereg;
6313 break;
6314 case PCI_PRODUCT_INTEL_82801I_BM:
6315 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6316 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6317 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6318 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6319 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6320 /* 82567 */
6321 sc->sc_phytype = WMPHY_BM;
6322 mii->mii_readreg = wm_gmii_bm_readreg;
6323 mii->mii_writereg = wm_gmii_bm_writereg;
6324 break;
6325 default:
6326 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6327 mii->mii_readreg = wm_sgmii_readreg;
6328 mii->mii_writereg = wm_sgmii_writereg;
6329 } else if (sc->sc_type >= WM_T_80003) {
6330 mii->mii_readreg = wm_gmii_i80003_readreg;
6331 mii->mii_writereg = wm_gmii_i80003_writereg;
6332 } else if (sc->sc_type >= WM_T_82580) {
6333 sc->sc_phytype = WMPHY_82580;
6334 mii->mii_readreg = wm_gmii_82580_readreg;
6335 mii->mii_writereg = wm_gmii_82580_writereg;
6336 } else if (sc->sc_type >= WM_T_82544) {
6337 mii->mii_readreg = wm_gmii_i82544_readreg;
6338 mii->mii_writereg = wm_gmii_i82544_writereg;
6339 } else {
6340 mii->mii_readreg = wm_gmii_i82543_readreg;
6341 mii->mii_writereg = wm_gmii_i82543_writereg;
6342 }
6343 break;
6344 }
6345 mii->mii_statchg = wm_gmii_statchg;
6346
6347 wm_gmii_reset(sc);
6348
6349 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6350 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6351 wm_gmii_mediastatus);
6352
6353 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6354 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6355 || (sc->sc_type == WM_T_I350)) {
6356 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6357 /* Attach only one port */
6358 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6359 MII_OFFSET_ANY, MIIF_DOPAUSE);
6360 } else {
6361 int i;
6362 uint32_t ctrl_ext;
6363
6364 /* Power on sgmii phy if it is disabled */
6365 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6366 CSR_WRITE(sc, WMREG_CTRL_EXT,
6367 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6368 CSR_WRITE_FLUSH(sc);
6369 delay(300*1000); /* XXX too long */
6370
6371 /* from 1 to 8 */
6372 for (i = 1; i < 8; i++)
6373 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6374 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6375
6376 /* restore previous sfp cage power state */
6377 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6378 }
6379 } else {
6380 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6381 MII_OFFSET_ANY, MIIF_DOPAUSE);
6382 }
6383
6384 /*
6385 * If the MAC is PCH2 and failed to detect MII PHY, call
6386 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6387 */
6388 if ((sc->sc_type == WM_T_PCH2) &&
6389 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6390 wm_set_mdio_slow_mode_hv(sc);
6391 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6392 MII_OFFSET_ANY, MIIF_DOPAUSE);
6393 }
6394
6395 /*
6396 * (For ICH8 variants)
6397 * If PHY detection failed, use BM's r/w function and retry.
6398 */
6399 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6400 /* if failed, retry with *_bm_* */
6401 mii->mii_readreg = wm_gmii_bm_readreg;
6402 mii->mii_writereg = wm_gmii_bm_writereg;
6403
6404 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6405 MII_OFFSET_ANY, MIIF_DOPAUSE);
6406 }
6407
6408 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6409 /* Any PHY wasn't find */
6410 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6411 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6412 sc->sc_phytype = WMPHY_NONE;
6413 } else {
6414 /*
6415 * PHY Found!
6416 * Check PHY type.
6417 */
6418 uint32_t model;
6419 struct mii_softc *child;
6420
6421 child = LIST_FIRST(&mii->mii_phys);
6422 if (device_is_a(child->mii_dev, "igphy")) {
6423 struct igphy_softc *isc = (struct igphy_softc *)child;
6424
6425 model = isc->sc_mii.mii_mpd_model;
6426 if (model == MII_MODEL_yyINTEL_I82566)
6427 sc->sc_phytype = WMPHY_IGP_3;
6428 }
6429
6430 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6431 }
6432 }
6433
6434 /*
6435 * wm_gmii_mediastatus: [ifmedia interface function]
6436 *
6437 * Get the current interface media status on a 1000BASE-T device.
6438 */
6439 static void
6440 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6441 {
6442 struct wm_softc *sc = ifp->if_softc;
6443
6444 ether_mediastatus(ifp, ifmr);
6445 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6446 | sc->sc_flowflags;
6447 }
6448
6449 /*
6450 * wm_gmii_mediachange: [ifmedia interface function]
6451 *
6452 * Set hardware to newly-selected media on a 1000BASE-T device.
6453 */
6454 static int
6455 wm_gmii_mediachange(struct ifnet *ifp)
6456 {
6457 struct wm_softc *sc = ifp->if_softc;
6458 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6459 int rc;
6460
6461 if ((ifp->if_flags & IFF_UP) == 0)
6462 return 0;
6463
6464 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6465 sc->sc_ctrl |= CTRL_SLU;
6466 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6467 || (sc->sc_type > WM_T_82543)) {
6468 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6469 } else {
6470 sc->sc_ctrl &= ~CTRL_ASDE;
6471 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6472 if (ife->ifm_media & IFM_FDX)
6473 sc->sc_ctrl |= CTRL_FD;
6474 switch (IFM_SUBTYPE(ife->ifm_media)) {
6475 case IFM_10_T:
6476 sc->sc_ctrl |= CTRL_SPEED_10;
6477 break;
6478 case IFM_100_TX:
6479 sc->sc_ctrl |= CTRL_SPEED_100;
6480 break;
6481 case IFM_1000_T:
6482 sc->sc_ctrl |= CTRL_SPEED_1000;
6483 break;
6484 default:
6485 panic("wm_gmii_mediachange: bad media 0x%x",
6486 ife->ifm_media);
6487 }
6488 }
6489 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6490 if (sc->sc_type <= WM_T_82543)
6491 wm_gmii_reset(sc);
6492
6493 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6494 return 0;
6495 return rc;
6496 }
6497
6498 #define MDI_IO CTRL_SWDPIN(2)
6499 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6500 #define MDI_CLK CTRL_SWDPIN(3)
6501
6502 static void
6503 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6504 {
6505 uint32_t i, v;
6506
6507 v = CSR_READ(sc, WMREG_CTRL);
6508 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6509 v |= MDI_DIR | CTRL_SWDPIO(3);
6510
6511 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6512 if (data & i)
6513 v |= MDI_IO;
6514 else
6515 v &= ~MDI_IO;
6516 CSR_WRITE(sc, WMREG_CTRL, v);
6517 delay(10);
6518 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6519 delay(10);
6520 CSR_WRITE(sc, WMREG_CTRL, v);
6521 delay(10);
6522 }
6523 }
6524
6525 static uint32_t
6526 i82543_mii_recvbits(struct wm_softc *sc)
6527 {
6528 uint32_t v, i, data = 0;
6529
6530 v = CSR_READ(sc, WMREG_CTRL);
6531 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6532 v |= CTRL_SWDPIO(3);
6533
6534 CSR_WRITE(sc, WMREG_CTRL, v);
6535 delay(10);
6536 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6537 delay(10);
6538 CSR_WRITE(sc, WMREG_CTRL, v);
6539 delay(10);
6540
6541 for (i = 0; i < 16; i++) {
6542 data <<= 1;
6543 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6544 delay(10);
6545 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6546 data |= 1;
6547 CSR_WRITE(sc, WMREG_CTRL, v);
6548 delay(10);
6549 }
6550
6551 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6552 delay(10);
6553 CSR_WRITE(sc, WMREG_CTRL, v);
6554 delay(10);
6555
6556 return data;
6557 }
6558
6559 #undef MDI_IO
6560 #undef MDI_DIR
6561 #undef MDI_CLK
6562
6563 /*
6564 * wm_gmii_i82543_readreg: [mii interface function]
6565 *
6566 * Read a PHY register on the GMII (i82543 version).
6567 */
6568 static int
6569 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6570 {
6571 struct wm_softc *sc = device_private(self);
6572 int rv;
6573
6574 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6575 i82543_mii_sendbits(sc, reg | (phy << 5) |
6576 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6577 rv = i82543_mii_recvbits(sc) & 0xffff;
6578
6579 DPRINTF(WM_DEBUG_GMII,
6580 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6581 device_xname(sc->sc_dev), phy, reg, rv));
6582
6583 return rv;
6584 }
6585
6586 /*
6587 * wm_gmii_i82543_writereg: [mii interface function]
6588 *
6589 * Write a PHY register on the GMII (i82543 version).
6590 */
6591 static void
6592 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6593 {
6594 struct wm_softc *sc = device_private(self);
6595
6596 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6597 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6598 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6599 (MII_COMMAND_START << 30), 32);
6600 }
6601
6602 /*
6603 * wm_gmii_i82544_readreg: [mii interface function]
6604 *
6605 * Read a PHY register on the GMII.
6606 */
6607 static int
6608 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6609 {
6610 struct wm_softc *sc = device_private(self);
6611 uint32_t mdic = 0;
6612 int i, rv;
6613
6614 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6615 MDIC_REGADD(reg));
6616
6617 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6618 mdic = CSR_READ(sc, WMREG_MDIC);
6619 if (mdic & MDIC_READY)
6620 break;
6621 delay(50);
6622 }
6623
6624 if ((mdic & MDIC_READY) == 0) {
6625 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6626 device_xname(sc->sc_dev), phy, reg);
6627 rv = 0;
6628 } else if (mdic & MDIC_E) {
6629 #if 0 /* This is normal if no PHY is present. */
6630 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6631 device_xname(sc->sc_dev), phy, reg);
6632 #endif
6633 rv = 0;
6634 } else {
6635 rv = MDIC_DATA(mdic);
6636 if (rv == 0xffff)
6637 rv = 0;
6638 }
6639
6640 return rv;
6641 }
6642
6643 /*
6644 * wm_gmii_i82544_writereg: [mii interface function]
6645 *
6646 * Write a PHY register on the GMII.
6647 */
6648 static void
6649 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6650 {
6651 struct wm_softc *sc = device_private(self);
6652 uint32_t mdic = 0;
6653 int i;
6654
6655 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6656 MDIC_REGADD(reg) | MDIC_DATA(val));
6657
6658 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6659 mdic = CSR_READ(sc, WMREG_MDIC);
6660 if (mdic & MDIC_READY)
6661 break;
6662 delay(50);
6663 }
6664
6665 if ((mdic & MDIC_READY) == 0)
6666 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6667 device_xname(sc->sc_dev), phy, reg);
6668 else if (mdic & MDIC_E)
6669 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6670 device_xname(sc->sc_dev), phy, reg);
6671 }
6672
6673 /*
6674 * wm_gmii_i80003_readreg: [mii interface function]
6675 *
6676 * Read a PHY register on the kumeran
6677 * This could be handled by the PHY layer if we didn't have to lock the
6678 * ressource ...
6679 */
6680 static int
6681 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6682 {
6683 struct wm_softc *sc = device_private(self);
6684 int sem;
6685 int rv;
6686
6687 if (phy != 1) /* only one PHY on kumeran bus */
6688 return 0;
6689
6690 sem = swfwphysem[sc->sc_funcid];
6691 if (wm_get_swfw_semaphore(sc, sem)) {
6692 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6693 __func__);
6694 return 0;
6695 }
6696
6697 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6698 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6699 reg >> GG82563_PAGE_SHIFT);
6700 } else {
6701 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6702 reg >> GG82563_PAGE_SHIFT);
6703 }
6704 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6705 delay(200);
6706 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6707 delay(200);
6708
6709 wm_put_swfw_semaphore(sc, sem);
6710 return rv;
6711 }
6712
6713 /*
6714 * wm_gmii_i80003_writereg: [mii interface function]
6715 *
6716 * Write a PHY register on the kumeran.
6717 * This could be handled by the PHY layer if we didn't have to lock the
6718 * ressource ...
6719 */
6720 static void
6721 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6722 {
6723 struct wm_softc *sc = device_private(self);
6724 int sem;
6725
6726 if (phy != 1) /* only one PHY on kumeran bus */
6727 return;
6728
6729 sem = swfwphysem[sc->sc_funcid];
6730 if (wm_get_swfw_semaphore(sc, sem)) {
6731 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6732 __func__);
6733 return;
6734 }
6735
6736 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6737 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6738 reg >> GG82563_PAGE_SHIFT);
6739 } else {
6740 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6741 reg >> GG82563_PAGE_SHIFT);
6742 }
6743 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6744 delay(200);
6745 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6746 delay(200);
6747
6748 wm_put_swfw_semaphore(sc, sem);
6749 }
6750
6751 /*
6752 * wm_gmii_bm_readreg: [mii interface function]
6753 *
6754 * Read a PHY register on the kumeran
6755 * This could be handled by the PHY layer if we didn't have to lock the
6756 * ressource ...
6757 */
6758 static int
6759 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6760 {
6761 struct wm_softc *sc = device_private(self);
6762 int sem;
6763 int rv;
6764
6765 sem = swfwphysem[sc->sc_funcid];
6766 if (wm_get_swfw_semaphore(sc, sem)) {
6767 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6768 __func__);
6769 return 0;
6770 }
6771
6772 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6773 if (phy == 1)
6774 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6775 reg);
6776 else
6777 wm_gmii_i82544_writereg(self, phy,
6778 GG82563_PHY_PAGE_SELECT,
6779 reg >> GG82563_PAGE_SHIFT);
6780 }
6781
6782 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6783 wm_put_swfw_semaphore(sc, sem);
6784 return rv;
6785 }
6786
6787 /*
6788 * wm_gmii_bm_writereg: [mii interface function]
6789 *
6790 * Write a PHY register on the kumeran.
6791 * This could be handled by the PHY layer if we didn't have to lock the
6792 * ressource ...
6793 */
6794 static void
6795 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6796 {
6797 struct wm_softc *sc = device_private(self);
6798 int sem;
6799
6800 sem = swfwphysem[sc->sc_funcid];
6801 if (wm_get_swfw_semaphore(sc, sem)) {
6802 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6803 __func__);
6804 return;
6805 }
6806
6807 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6808 if (phy == 1)
6809 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6810 reg);
6811 else
6812 wm_gmii_i82544_writereg(self, phy,
6813 GG82563_PHY_PAGE_SELECT,
6814 reg >> GG82563_PAGE_SHIFT);
6815 }
6816
6817 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6818 wm_put_swfw_semaphore(sc, sem);
6819 }
6820
6821 static void
6822 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6823 {
6824 struct wm_softc *sc = device_private(self);
6825 uint16_t regnum = BM_PHY_REG_NUM(offset);
6826 uint16_t wuce;
6827
6828 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6829 if (sc->sc_type == WM_T_PCH) {
6830 /* XXX e1000 driver do nothing... why? */
6831 }
6832
6833 /* Set page 769 */
6834 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6835 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6836
6837 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6838
6839 wuce &= ~BM_WUC_HOST_WU_BIT;
6840 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6841 wuce | BM_WUC_ENABLE_BIT);
6842
6843 /* Select page 800 */
6844 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6845 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6846
6847 /* Write page 800 */
6848 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6849
6850 if (rd)
6851 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6852 else
6853 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6854
6855 /* Set page 769 */
6856 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6857 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6858
6859 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6860 }
6861
6862 /*
6863 * wm_gmii_hv_readreg: [mii interface function]
6864 *
6865 * Read a PHY register on the kumeran
6866 * This could be handled by the PHY layer if we didn't have to lock the
6867 * ressource ...
6868 */
6869 static int
6870 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6871 {
6872 struct wm_softc *sc = device_private(self);
6873 uint16_t page = BM_PHY_REG_PAGE(reg);
6874 uint16_t regnum = BM_PHY_REG_NUM(reg);
6875 uint16_t val;
6876 int rv;
6877
6878 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6879 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6880 __func__);
6881 return 0;
6882 }
6883
6884 /* XXX Workaround failure in MDIO access while cable is disconnected */
6885 if (sc->sc_phytype == WMPHY_82577) {
6886 /* XXX must write */
6887 }
6888
6889 /* Page 800 works differently than the rest so it has its own func */
6890 if (page == BM_WUC_PAGE) {
6891 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6892 return val;
6893 }
6894
6895 /*
6896 * Lower than page 768 works differently than the rest so it has its
6897 * own func
6898 */
6899 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6900 printf("gmii_hv_readreg!!!\n");
6901 return 0;
6902 }
6903
6904 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6905 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6906 page << BME1000_PAGE_SHIFT);
6907 }
6908
6909 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6910 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6911 return rv;
6912 }
6913
6914 /*
6915 * wm_gmii_hv_writereg: [mii interface function]
6916 *
6917 * Write a PHY register on the kumeran.
6918 * This could be handled by the PHY layer if we didn't have to lock the
6919 * ressource ...
6920 */
6921 static void
6922 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6923 {
6924 struct wm_softc *sc = device_private(self);
6925 uint16_t page = BM_PHY_REG_PAGE(reg);
6926 uint16_t regnum = BM_PHY_REG_NUM(reg);
6927
6928 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6929 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6930 __func__);
6931 return;
6932 }
6933
6934 /* XXX Workaround failure in MDIO access while cable is disconnected */
6935
6936 /* Page 800 works differently than the rest so it has its own func */
6937 if (page == BM_WUC_PAGE) {
6938 uint16_t tmp;
6939
6940 tmp = val;
6941 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6942 return;
6943 }
6944
6945 /*
6946 * Lower than page 768 works differently than the rest so it has its
6947 * own func
6948 */
6949 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6950 printf("gmii_hv_writereg!!!\n");
6951 return;
6952 }
6953
6954 /*
6955 * XXX Workaround MDIO accesses being disabled after entering IEEE
6956 * Power Down (whenever bit 11 of the PHY control register is set)
6957 */
6958
6959 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6960 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6961 page << BME1000_PAGE_SHIFT);
6962 }
6963
6964 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6965 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6966 }
6967
6968 /*
6969 * wm_sgmii_readreg: [mii interface function]
6970 *
6971 * Read a PHY register on the SGMII
6972 * This could be handled by the PHY layer if we didn't have to lock the
6973 * ressource ...
6974 */
6975 static int
6976 wm_sgmii_readreg(device_t self, int phy, int reg)
6977 {
6978 struct wm_softc *sc = device_private(self);
6979 uint32_t i2ccmd;
6980 int i, rv;
6981
6982 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6983 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6984 __func__);
6985 return 0;
6986 }
6987
6988 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6989 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6990 | I2CCMD_OPCODE_READ;
6991 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6992
6993 /* Poll the ready bit */
6994 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6995 delay(50);
6996 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6997 if (i2ccmd & I2CCMD_READY)
6998 break;
6999 }
7000 if ((i2ccmd & I2CCMD_READY) == 0)
7001 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7002 if ((i2ccmd & I2CCMD_ERROR) != 0)
7003 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7004
7005 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7006
7007 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7008 return rv;
7009 }
7010
7011 /*
7012 * wm_sgmii_writereg: [mii interface function]
7013 *
7014 * Write a PHY register on the SGMII.
7015 * This could be handled by the PHY layer if we didn't have to lock the
7016 * ressource ...
7017 */
7018 static void
7019 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7020 {
7021 struct wm_softc *sc = device_private(self);
7022 uint32_t i2ccmd;
7023 int i;
7024
7025 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7026 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7027 __func__);
7028 return;
7029 }
7030
7031 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7032 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7033 | I2CCMD_OPCODE_WRITE;
7034 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7035
7036 /* Poll the ready bit */
7037 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7038 delay(50);
7039 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7040 if (i2ccmd & I2CCMD_READY)
7041 break;
7042 }
7043 if ((i2ccmd & I2CCMD_READY) == 0)
7044 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7045 if ((i2ccmd & I2CCMD_ERROR) != 0)
7046 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7047
7048 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7049 }
7050
7051 /*
7052 * wm_gmii_82580_readreg: [mii interface function]
7053 *
7054 * Read a PHY register on the 82580 and I350.
7055 * This could be handled by the PHY layer if we didn't have to lock the
7056 * ressource ...
7057 */
7058 static int
7059 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7060 {
7061 struct wm_softc *sc = device_private(self);
7062 int sem;
7063 int rv;
7064
7065 sem = swfwphysem[sc->sc_funcid];
7066 if (wm_get_swfw_semaphore(sc, sem)) {
7067 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7068 __func__);
7069 return 0;
7070 }
7071
7072 rv = wm_gmii_i82544_readreg(self, phy, reg);
7073
7074 wm_put_swfw_semaphore(sc, sem);
7075 return rv;
7076 }
7077
7078 /*
7079 * wm_gmii_82580_writereg: [mii interface function]
7080 *
7081 * Write a PHY register on the 82580 and I350.
7082 * This could be handled by the PHY layer if we didn't have to lock the
7083 * ressource ...
7084 */
7085 static void
7086 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7087 {
7088 struct wm_softc *sc = device_private(self);
7089 int sem;
7090
7091 sem = swfwphysem[sc->sc_funcid];
7092 if (wm_get_swfw_semaphore(sc, sem)) {
7093 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7094 __func__);
7095 return;
7096 }
7097
7098 wm_gmii_i82544_writereg(self, phy, reg, val);
7099
7100 wm_put_swfw_semaphore(sc, sem);
7101 }
7102
7103 /*
7104 * wm_gmii_statchg: [mii interface function]
7105 *
7106 * Callback from MII layer when media changes.
7107 */
7108 static void
7109 wm_gmii_statchg(struct ifnet *ifp)
7110 {
7111 struct wm_softc *sc = ifp->if_softc;
7112 struct mii_data *mii = &sc->sc_mii;
7113
7114 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7115 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7116 sc->sc_fcrtl &= ~FCRTL_XONE;
7117
7118 /*
7119 * Get flow control negotiation result.
7120 */
7121 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7122 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7123 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7124 mii->mii_media_active &= ~IFM_ETH_FMASK;
7125 }
7126
7127 if (sc->sc_flowflags & IFM_FLOW) {
7128 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7129 sc->sc_ctrl |= CTRL_TFCE;
7130 sc->sc_fcrtl |= FCRTL_XONE;
7131 }
7132 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7133 sc->sc_ctrl |= CTRL_RFCE;
7134 }
7135
7136 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7137 DPRINTF(WM_DEBUG_LINK,
7138 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7139 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7140 } else {
7141 DPRINTF(WM_DEBUG_LINK,
7142 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7143 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7144 }
7145
7146 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7147 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7148 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7149 : WMREG_FCRTL, sc->sc_fcrtl);
7150 if (sc->sc_type == WM_T_80003) {
7151 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7152 case IFM_1000_T:
7153 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7154 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7155 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7156 break;
7157 default:
7158 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7159 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7160 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7161 break;
7162 }
7163 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7164 }
7165 }
7166
7167 /*
7168 * wm_kmrn_readreg:
7169 *
7170 * Read a kumeran register
7171 */
7172 static int
7173 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7174 {
7175 int rv;
7176
7177 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7178 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7179 aprint_error_dev(sc->sc_dev,
7180 "%s: failed to get semaphore\n", __func__);
7181 return 0;
7182 }
7183 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7184 if (wm_get_swfwhw_semaphore(sc)) {
7185 aprint_error_dev(sc->sc_dev,
7186 "%s: failed to get semaphore\n", __func__);
7187 return 0;
7188 }
7189 }
7190
7191 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7192 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7193 KUMCTRLSTA_REN);
7194 delay(2);
7195
7196 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7197
7198 if (sc->sc_flags == WM_F_SWFW_SYNC)
7199 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7200 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7201 wm_put_swfwhw_semaphore(sc);
7202
7203 return rv;
7204 }
7205
7206 /*
7207 * wm_kmrn_writereg:
7208 *
7209 * Write a kumeran register
7210 */
7211 static void
7212 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7213 {
7214
7215 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7216 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7217 aprint_error_dev(sc->sc_dev,
7218 "%s: failed to get semaphore\n", __func__);
7219 return;
7220 }
7221 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7222 if (wm_get_swfwhw_semaphore(sc)) {
7223 aprint_error_dev(sc->sc_dev,
7224 "%s: failed to get semaphore\n", __func__);
7225 return;
7226 }
7227 }
7228
7229 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7230 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7231 (val & KUMCTRLSTA_MASK));
7232
7233 if (sc->sc_flags == WM_F_SWFW_SYNC)
7234 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7235 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7236 wm_put_swfwhw_semaphore(sc);
7237 }
7238
7239 static int
7240 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7241 {
7242 uint32_t eecd = 0;
7243
7244 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7245 || sc->sc_type == WM_T_82583) {
7246 eecd = CSR_READ(sc, WMREG_EECD);
7247
7248 /* Isolate bits 15 & 16 */
7249 eecd = ((eecd >> 15) & 0x03);
7250
7251 /* If both bits are set, device is Flash type */
7252 if (eecd == 0x03)
7253 return 0;
7254 }
7255 return 1;
7256 }
7257
7258 static int
7259 wm_get_swsm_semaphore(struct wm_softc *sc)
7260 {
7261 int32_t timeout;
7262 uint32_t swsm;
7263
7264 /* Get the FW semaphore. */
7265 timeout = 1000 + 1; /* XXX */
7266 while (timeout) {
7267 swsm = CSR_READ(sc, WMREG_SWSM);
7268 swsm |= SWSM_SWESMBI;
7269 CSR_WRITE(sc, WMREG_SWSM, swsm);
7270 /* if we managed to set the bit we got the semaphore. */
7271 swsm = CSR_READ(sc, WMREG_SWSM);
7272 if (swsm & SWSM_SWESMBI)
7273 break;
7274
7275 delay(50);
7276 timeout--;
7277 }
7278
7279 if (timeout == 0) {
7280 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7281 /* Release semaphores */
7282 wm_put_swsm_semaphore(sc);
7283 return 1;
7284 }
7285 return 0;
7286 }
7287
7288 static void
7289 wm_put_swsm_semaphore(struct wm_softc *sc)
7290 {
7291 uint32_t swsm;
7292
7293 swsm = CSR_READ(sc, WMREG_SWSM);
7294 swsm &= ~(SWSM_SWESMBI);
7295 CSR_WRITE(sc, WMREG_SWSM, swsm);
7296 }
7297
7298 static int
7299 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7300 {
7301 uint32_t swfw_sync;
7302 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7303 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7304 int timeout = 200;
7305
7306 for (timeout = 0; timeout < 200; timeout++) {
7307 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7308 if (wm_get_swsm_semaphore(sc)) {
7309 aprint_error_dev(sc->sc_dev,
7310 "%s: failed to get semaphore\n",
7311 __func__);
7312 return 1;
7313 }
7314 }
7315 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7316 if ((swfw_sync & (swmask | fwmask)) == 0) {
7317 swfw_sync |= swmask;
7318 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7319 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7320 wm_put_swsm_semaphore(sc);
7321 return 0;
7322 }
7323 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7324 wm_put_swsm_semaphore(sc);
7325 delay(5000);
7326 }
7327 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7328 device_xname(sc->sc_dev), mask, swfw_sync);
7329 return 1;
7330 }
7331
7332 static void
7333 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7334 {
7335 uint32_t swfw_sync;
7336
7337 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7338 while (wm_get_swsm_semaphore(sc) != 0)
7339 continue;
7340 }
7341 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7342 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7343 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7344 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7345 wm_put_swsm_semaphore(sc);
7346 }
7347
7348 static int
7349 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7350 {
7351 uint32_t ext_ctrl;
7352 int timeout = 200;
7353
7354 for (timeout = 0; timeout < 200; timeout++) {
7355 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7356 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7357 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7358
7359 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7360 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7361 return 0;
7362 delay(5000);
7363 }
7364 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7365 device_xname(sc->sc_dev), ext_ctrl);
7366 return 1;
7367 }
7368
7369 static void
7370 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7371 {
7372 uint32_t ext_ctrl;
7373 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7374 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7375 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7376 }
7377
7378 static int
7379 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7380 {
7381 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7382 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7383
7384 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7385 /* Value of bit 22 corresponds to the flash bank we're on. */
7386 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7387 } else {
7388 uint8_t sig_byte;
7389 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7390 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE)
7391 *bank = 0;
7392 else {
7393 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7394 &sig_byte);
7395 if ((sig_byte & ICH_NVM_VALID_SIG_MASK)
7396 == ICH_NVM_SIG_VALUE)
7397 *bank = 1;
7398 else {
7399 aprint_error_dev(sc->sc_dev,
7400 "EEPROM not present\n");
7401 return -1;
7402 }
7403 }
7404 }
7405
7406 return 0;
7407 }
7408
7409 /******************************************************************************
7410 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7411 * register.
7412 *
7413 * sc - Struct containing variables accessed by shared code
7414 * offset - offset of word in the EEPROM to read
7415 * data - word read from the EEPROM
7416 * words - number of words to read
7417 *****************************************************************************/
7418 static int
7419 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7420 {
7421 int32_t error = 0;
7422 uint32_t flash_bank = 0;
7423 uint32_t act_offset = 0;
7424 uint32_t bank_offset = 0;
7425 uint16_t word = 0;
7426 uint16_t i = 0;
7427
7428 /* We need to know which is the valid flash bank. In the event
7429 * that we didn't allocate eeprom_shadow_ram, we may not be
7430 * managing flash_bank. So it cannot be trusted and needs
7431 * to be updated with each read.
7432 */
7433 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7434 if (error) {
7435 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7436 __func__);
7437 return error;
7438 }
7439
7440 /*
7441 * Adjust offset appropriately if we're on bank 1 - adjust for word
7442 * size
7443 */
7444 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7445
7446 error = wm_get_swfwhw_semaphore(sc);
7447 if (error) {
7448 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7449 __func__);
7450 return error;
7451 }
7452
7453 for (i = 0; i < words; i++) {
7454 /* The NVM part needs a byte offset, hence * 2 */
7455 act_offset = bank_offset + ((offset + i) * 2);
7456 error = wm_read_ich8_word(sc, act_offset, &word);
7457 if (error) {
7458 aprint_error_dev(sc->sc_dev,
7459 "%s: failed to read NVM\n", __func__);
7460 break;
7461 }
7462 data[i] = word;
7463 }
7464
7465 wm_put_swfwhw_semaphore(sc);
7466 return error;
7467 }
7468
7469 /******************************************************************************
7470 * This function does initial flash setup so that a new read/write/erase cycle
7471 * can be started.
7472 *
7473 * sc - The pointer to the hw structure
7474 ****************************************************************************/
7475 static int32_t
7476 wm_ich8_cycle_init(struct wm_softc *sc)
7477 {
7478 uint16_t hsfsts;
7479 int32_t error = 1;
7480 int32_t i = 0;
7481
7482 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7483
7484 /* May be check the Flash Des Valid bit in Hw status */
7485 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7486 return error;
7487 }
7488
7489 /* Clear FCERR in Hw status by writing 1 */
7490 /* Clear DAEL in Hw status by writing a 1 */
7491 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7492
7493 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7494
7495 /*
7496 * Either we should have a hardware SPI cycle in progress bit to check
7497 * against, in order to start a new cycle or FDONE bit should be
7498 * changed in the hardware so that it is 1 after harware reset, which
7499 * can then be used as an indication whether a cycle is in progress or
7500 * has been completed .. we should also have some software semaphore
7501 * mechanism to guard FDONE or the cycle in progress bit so that two
7502 * threads access to those bits can be sequentiallized or a way so that
7503 * 2 threads dont start the cycle at the same time
7504 */
7505
7506 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7507 /*
7508 * There is no cycle running at present, so we can start a
7509 * cycle
7510 */
7511
7512 /* Begin by setting Flash Cycle Done. */
7513 hsfsts |= HSFSTS_DONE;
7514 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7515 error = 0;
7516 } else {
7517 /*
7518 * otherwise poll for sometime so the current cycle has a
7519 * chance to end before giving up.
7520 */
7521 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7522 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7523 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7524 error = 0;
7525 break;
7526 }
7527 delay(1);
7528 }
7529 if (error == 0) {
7530 /*
7531 * Successful in waiting for previous cycle to timeout,
7532 * now set the Flash Cycle Done.
7533 */
7534 hsfsts |= HSFSTS_DONE;
7535 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7536 }
7537 }
7538 return error;
7539 }
7540
7541 /******************************************************************************
7542 * This function starts a flash cycle and waits for its completion
7543 *
7544 * sc - The pointer to the hw structure
7545 ****************************************************************************/
7546 static int32_t
7547 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7548 {
7549 uint16_t hsflctl;
7550 uint16_t hsfsts;
7551 int32_t error = 1;
7552 uint32_t i = 0;
7553
7554 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7555 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7556 hsflctl |= HSFCTL_GO;
7557 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7558
7559 /* wait till FDONE bit is set to 1 */
7560 do {
7561 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7562 if (hsfsts & HSFSTS_DONE)
7563 break;
7564 delay(1);
7565 i++;
7566 } while (i < timeout);
7567 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7568 error = 0;
7569
7570 return error;
7571 }
7572
7573 /******************************************************************************
7574 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7575 *
7576 * sc - The pointer to the hw structure
7577 * index - The index of the byte or word to read.
7578 * size - Size of data to read, 1=byte 2=word
7579 * data - Pointer to the word to store the value read.
7580 *****************************************************************************/
7581 static int32_t
7582 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7583 uint32_t size, uint16_t* data)
7584 {
7585 uint16_t hsfsts;
7586 uint16_t hsflctl;
7587 uint32_t flash_linear_address;
7588 uint32_t flash_data = 0;
7589 int32_t error = 1;
7590 int32_t count = 0;
7591
7592 if (size < 1 || size > 2 || data == 0x0 ||
7593 index > ICH_FLASH_LINEAR_ADDR_MASK)
7594 return error;
7595
7596 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7597 sc->sc_ich8_flash_base;
7598
7599 do {
7600 delay(1);
7601 /* Steps */
7602 error = wm_ich8_cycle_init(sc);
7603 if (error)
7604 break;
7605
7606 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7607 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7608 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7609 & HSFCTL_BCOUNT_MASK;
7610 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7611 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7612
7613 /*
7614 * Write the last 24 bits of index into Flash Linear address
7615 * field in Flash Address
7616 */
7617 /* TODO: TBD maybe check the index against the size of flash */
7618
7619 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7620
7621 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7622
7623 /*
7624 * Check if FCERR is set to 1, if set to 1, clear it and try
7625 * the whole sequence a few more times, else read in (shift in)
7626 * the Flash Data0, the order is least significant byte first
7627 * msb to lsb
7628 */
7629 if (error == 0) {
7630 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7631 if (size == 1)
7632 *data = (uint8_t)(flash_data & 0x000000FF);
7633 else if (size == 2)
7634 *data = (uint16_t)(flash_data & 0x0000FFFF);
7635 break;
7636 } else {
7637 /*
7638 * If we've gotten here, then things are probably
7639 * completely hosed, but if the error condition is
7640 * detected, it won't hurt to give it another try...
7641 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7642 */
7643 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7644 if (hsfsts & HSFSTS_ERR) {
7645 /* Repeat for some time before giving up. */
7646 continue;
7647 } else if ((hsfsts & HSFSTS_DONE) == 0)
7648 break;
7649 }
7650 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7651
7652 return error;
7653 }
7654
7655 /******************************************************************************
7656 * Reads a single byte from the NVM using the ICH8 flash access registers.
7657 *
7658 * sc - pointer to wm_hw structure
7659 * index - The index of the byte to read.
7660 * data - Pointer to a byte to store the value read.
7661 *****************************************************************************/
7662 static int32_t
7663 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7664 {
7665 int32_t status;
7666 uint16_t word = 0;
7667
7668 status = wm_read_ich8_data(sc, index, 1, &word);
7669 if (status == 0)
7670 *data = (uint8_t)word;
7671 else
7672 *data = 0;
7673
7674 return status;
7675 }
7676
7677 /******************************************************************************
7678 * Reads a word from the NVM using the ICH8 flash access registers.
7679 *
7680 * sc - pointer to wm_hw structure
7681 * index - The starting byte index of the word to read.
7682 * data - Pointer to a word to store the value read.
7683 *****************************************************************************/
7684 static int32_t
7685 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7686 {
7687 int32_t status;
7688
7689 status = wm_read_ich8_data(sc, index, 2, data);
7690 return status;
7691 }
7692
7693 static int
7694 wm_check_mng_mode(struct wm_softc *sc)
7695 {
7696 int rv;
7697
7698 switch (sc->sc_type) {
7699 case WM_T_ICH8:
7700 case WM_T_ICH9:
7701 case WM_T_ICH10:
7702 case WM_T_PCH:
7703 case WM_T_PCH2:
7704 rv = wm_check_mng_mode_ich8lan(sc);
7705 break;
7706 case WM_T_82574:
7707 case WM_T_82583:
7708 rv = wm_check_mng_mode_82574(sc);
7709 break;
7710 case WM_T_82571:
7711 case WM_T_82572:
7712 case WM_T_82573:
7713 case WM_T_80003:
7714 rv = wm_check_mng_mode_generic(sc);
7715 break;
7716 default:
7717 /* noting to do */
7718 rv = 0;
7719 break;
7720 }
7721
7722 return rv;
7723 }
7724
7725 static int
7726 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7727 {
7728 uint32_t fwsm;
7729
7730 fwsm = CSR_READ(sc, WMREG_FWSM);
7731
7732 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7733 return 1;
7734
7735 return 0;
7736 }
7737
7738 static int
7739 wm_check_mng_mode_82574(struct wm_softc *sc)
7740 {
7741 uint16_t data;
7742
7743 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7744
7745 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7746 return 1;
7747
7748 return 0;
7749 }
7750
7751 static int
7752 wm_check_mng_mode_generic(struct wm_softc *sc)
7753 {
7754 uint32_t fwsm;
7755
7756 fwsm = CSR_READ(sc, WMREG_FWSM);
7757
7758 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7759 return 1;
7760
7761 return 0;
7762 }
7763
7764 static int
7765 wm_enable_mng_pass_thru(struct wm_softc *sc)
7766 {
7767 uint32_t manc, fwsm, factps;
7768
7769 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7770 return 0;
7771
7772 manc = CSR_READ(sc, WMREG_MANC);
7773
7774 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7775 device_xname(sc->sc_dev), manc));
7776 if (((manc & MANC_RECV_TCO_EN) == 0)
7777 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7778 return 0;
7779
7780 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7781 fwsm = CSR_READ(sc, WMREG_FWSM);
7782 factps = CSR_READ(sc, WMREG_FACTPS);
7783 if (((factps & FACTPS_MNGCG) == 0)
7784 && ((fwsm & FWSM_MODE_MASK)
7785 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7786 return 1;
7787 } else if (((manc & MANC_SMBUS_EN) != 0)
7788 && ((manc & MANC_ASF_EN) == 0))
7789 return 1;
7790
7791 return 0;
7792 }
7793
7794 static int
7795 wm_check_reset_block(struct wm_softc *sc)
7796 {
7797 uint32_t reg;
7798
7799 switch (sc->sc_type) {
7800 case WM_T_ICH8:
7801 case WM_T_ICH9:
7802 case WM_T_ICH10:
7803 case WM_T_PCH:
7804 case WM_T_PCH2:
7805 reg = CSR_READ(sc, WMREG_FWSM);
7806 if ((reg & FWSM_RSPCIPHY) != 0)
7807 return 0;
7808 else
7809 return -1;
7810 break;
7811 case WM_T_82571:
7812 case WM_T_82572:
7813 case WM_T_82573:
7814 case WM_T_82574:
7815 case WM_T_82583:
7816 case WM_T_80003:
7817 reg = CSR_READ(sc, WMREG_MANC);
7818 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7819 return -1;
7820 else
7821 return 0;
7822 break;
7823 default:
7824 /* no problem */
7825 break;
7826 }
7827
7828 return 0;
7829 }
7830
7831 static void
7832 wm_get_hw_control(struct wm_softc *sc)
7833 {
7834 uint32_t reg;
7835
7836 switch (sc->sc_type) {
7837 case WM_T_82573:
7838 reg = CSR_READ(sc, WMREG_SWSM);
7839 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7840 break;
7841 case WM_T_82571:
7842 case WM_T_82572:
7843 case WM_T_82574:
7844 case WM_T_82583:
7845 case WM_T_80003:
7846 case WM_T_ICH8:
7847 case WM_T_ICH9:
7848 case WM_T_ICH10:
7849 case WM_T_PCH:
7850 case WM_T_PCH2:
7851 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7852 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7853 break;
7854 default:
7855 break;
7856 }
7857 }
7858
7859 static void
7860 wm_release_hw_control(struct wm_softc *sc)
7861 {
7862 uint32_t reg;
7863
7864 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7865 return;
7866
7867 if (sc->sc_type == WM_T_82573) {
7868 reg = CSR_READ(sc, WMREG_SWSM);
7869 reg &= ~SWSM_DRV_LOAD;
7870 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7871 } else {
7872 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7873 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7874 }
7875 }
7876
7877 /* XXX Currently TBI only */
7878 static int
7879 wm_check_for_link(struct wm_softc *sc)
7880 {
7881 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7882 uint32_t rxcw;
7883 uint32_t ctrl;
7884 uint32_t status;
7885 uint32_t sig;
7886
7887 rxcw = CSR_READ(sc, WMREG_RXCW);
7888 ctrl = CSR_READ(sc, WMREG_CTRL);
7889 status = CSR_READ(sc, WMREG_STATUS);
7890
7891 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7892
7893 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7894 device_xname(sc->sc_dev), __func__,
7895 ((ctrl & CTRL_SWDPIN(1)) == sig),
7896 ((status & STATUS_LU) != 0),
7897 ((rxcw & RXCW_C) != 0)
7898 ));
7899
7900 /*
7901 * SWDPIN LU RXCW
7902 * 0 0 0
7903 * 0 0 1 (should not happen)
7904 * 0 1 0 (should not happen)
7905 * 0 1 1 (should not happen)
7906 * 1 0 0 Disable autonego and force linkup
7907 * 1 0 1 got /C/ but not linkup yet
7908 * 1 1 0 (linkup)
7909 * 1 1 1 If IFM_AUTO, back to autonego
7910 *
7911 */
7912 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7913 && ((status & STATUS_LU) == 0)
7914 && ((rxcw & RXCW_C) == 0)) {
7915 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7916 __func__));
7917 sc->sc_tbi_linkup = 0;
7918 /* Disable auto-negotiation in the TXCW register */
7919 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7920
7921 /*
7922 * Force link-up and also force full-duplex.
7923 *
7924 * NOTE: CTRL was updated TFCE and RFCE automatically,
7925 * so we should update sc->sc_ctrl
7926 */
7927 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7928 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7929 } else if (((status & STATUS_LU) != 0)
7930 && ((rxcw & RXCW_C) != 0)
7931 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7932 sc->sc_tbi_linkup = 1;
7933 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7934 __func__));
7935 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7936 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7937 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7938 && ((rxcw & RXCW_C) != 0)) {
7939 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7940 } else {
7941 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7942 status));
7943 }
7944
7945 return 0;
7946 }
7947
7948 /* Work-around for 82566 Kumeran PCS lock loss */
7949 static void
7950 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7951 {
7952 int miistatus, active, i;
7953 int reg;
7954
7955 miistatus = sc->sc_mii.mii_media_status;
7956
7957 /* If the link is not up, do nothing */
7958 if ((miistatus & IFM_ACTIVE) != 0)
7959 return;
7960
7961 active = sc->sc_mii.mii_media_active;
7962
7963 /* Nothing to do if the link is other than 1Gbps */
7964 if (IFM_SUBTYPE(active) != IFM_1000_T)
7965 return;
7966
7967 for (i = 0; i < 10; i++) {
7968 /* read twice */
7969 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7970 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7971 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7972 goto out; /* GOOD! */
7973
7974 /* Reset the PHY */
7975 wm_gmii_reset(sc);
7976 delay(5*1000);
7977 }
7978
7979 /* Disable GigE link negotiation */
7980 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7981 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7982 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7983
7984 /*
7985 * Call gig speed drop workaround on Gig disable before accessing
7986 * any PHY registers.
7987 */
7988 wm_gig_downshift_workaround_ich8lan(sc);
7989
7990 out:
7991 return;
7992 }
7993
7994 /* WOL from S5 stops working */
7995 static void
7996 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7997 {
7998 uint16_t kmrn_reg;
7999
8000 /* Only for igp3 */
8001 if (sc->sc_phytype == WMPHY_IGP_3) {
8002 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8003 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8004 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8005 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8006 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8007 }
8008 }
8009
8010 #ifdef WM_WOL
8011 /* Power down workaround on D3 */
8012 static void
8013 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8014 {
8015 uint32_t reg;
8016 int i;
8017
8018 for (i = 0; i < 2; i++) {
8019 /* Disable link */
8020 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8021 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8022 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8023
8024 /*
8025 * Call gig speed drop workaround on Gig disable before
8026 * accessing any PHY registers
8027 */
8028 if (sc->sc_type == WM_T_ICH8)
8029 wm_gig_downshift_workaround_ich8lan(sc);
8030
8031 /* Write VR power-down enable */
8032 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8033 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8034 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8035 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8036
8037 /* Read it back and test */
8038 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8039 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8040 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8041 break;
8042
8043 /* Issue PHY reset and repeat at most one more time */
8044 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8045 }
8046 }
8047 #endif /* WM_WOL */
8048
8049 /*
8050 * Workaround for pch's PHYs
8051 * XXX should be moved to new PHY driver?
8052 */
8053 static void
8054 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8055 {
8056 if (sc->sc_phytype == WMPHY_82577)
8057 wm_set_mdio_slow_mode_hv(sc);
8058
8059 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8060
8061 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8062
8063 /* 82578 */
8064 if (sc->sc_phytype == WMPHY_82578) {
8065 /* PCH rev. < 3 */
8066 if (sc->sc_rev < 3) {
8067 /* XXX 6 bit shift? Why? Is it page2? */
8068 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8069 0x66c0);
8070 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8071 0xffff);
8072 }
8073
8074 /* XXX phy rev. < 2 */
8075 }
8076
8077 /* Select page 0 */
8078
8079 /* XXX acquire semaphore */
8080 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8081 /* XXX release semaphore */
8082
8083 /*
8084 * Configure the K1 Si workaround during phy reset assuming there is
8085 * link so that it disables K1 if link is in 1Gbps.
8086 */
8087 wm_k1_gig_workaround_hv(sc, 1);
8088 }
8089
8090 static void
8091 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8092 {
8093
8094 wm_set_mdio_slow_mode_hv(sc);
8095 }
8096
8097 static void
8098 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8099 {
8100 int k1_enable = sc->sc_nvm_k1_enabled;
8101
8102 /* XXX acquire semaphore */
8103
8104 if (link) {
8105 k1_enable = 0;
8106
8107 /* Link stall fix for link up */
8108 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8109 } else {
8110 /* Link stall fix for link down */
8111 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8112 }
8113
8114 wm_configure_k1_ich8lan(sc, k1_enable);
8115
8116 /* XXX release semaphore */
8117 }
8118
8119 static void
8120 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8121 {
8122 uint32_t reg;
8123
8124 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8125 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8126 reg | HV_KMRN_MDIO_SLOW);
8127 }
8128
8129 static void
8130 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8131 {
8132 uint32_t ctrl, ctrl_ext, tmp;
8133 uint16_t kmrn_reg;
8134
8135 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8136
8137 if (k1_enable)
8138 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8139 else
8140 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8141
8142 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8143
8144 delay(20);
8145
8146 ctrl = CSR_READ(sc, WMREG_CTRL);
8147 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8148
8149 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8150 tmp |= CTRL_FRCSPD;
8151
8152 CSR_WRITE(sc, WMREG_CTRL, tmp);
8153 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8154 delay(20);
8155
8156 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8157 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8158 delay(20);
8159 }
8160
8161 static void
8162 wm_smbustopci(struct wm_softc *sc)
8163 {
8164 uint32_t fwsm;
8165
8166 fwsm = CSR_READ(sc, WMREG_FWSM);
8167 if (((fwsm & FWSM_FW_VALID) == 0)
8168 && ((wm_check_reset_block(sc) == 0))) {
8169 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8170 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8171 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8172 delay(10);
8173 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8174 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8175 delay(50*1000);
8176
8177 /*
8178 * Gate automatic PHY configuration by hardware on non-managed
8179 * 82579
8180 */
8181 if (sc->sc_type == WM_T_PCH2)
8182 wm_gate_hw_phy_config_ich8lan(sc, 1);
8183 }
8184 }
8185
8186 static void
8187 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8188 {
8189 uint32_t gcr;
8190 pcireg_t ctrl2;
8191
8192 gcr = CSR_READ(sc, WMREG_GCR);
8193
8194 /* Only take action if timeout value is defaulted to 0 */
8195 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8196 goto out;
8197
8198 if ((gcr & GCR_CAP_VER2) == 0) {
8199 gcr |= GCR_CMPL_TMOUT_10MS;
8200 goto out;
8201 }
8202
8203 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8204 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
8205 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
8206 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8207 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
8208
8209 out:
8210 /* Disable completion timeout resend */
8211 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8212
8213 CSR_WRITE(sc, WMREG_GCR, gcr);
8214 }
8215
8216 /* special case - for 82575 - need to do manual init ... */
8217 static void
8218 wm_reset_init_script_82575(struct wm_softc *sc)
8219 {
8220 /*
8221 * remark: this is untested code - we have no board without EEPROM
8222 * same setup as mentioned int the freeBSD driver for the i82575
8223 */
8224
8225 /* SerDes configuration via SERDESCTRL */
8226 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8227 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8228 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8229 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8230
8231 /* CCM configuration via CCMCTL register */
8232 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8233 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8234
8235 /* PCIe lanes configuration */
8236 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8237 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8238 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8239 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8240
8241 /* PCIe PLL Configuration */
8242 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8243 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8244 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8245 }
8246
8247 static void
8248 wm_init_manageability(struct wm_softc *sc)
8249 {
8250
8251 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8252 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8253 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8254
8255 /* disabl hardware interception of ARP */
8256 manc &= ~MANC_ARP_EN;
8257
8258 /* enable receiving management packets to the host */
8259 if (sc->sc_type >= WM_T_82571) {
8260 manc |= MANC_EN_MNG2HOST;
8261 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8262 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8263
8264 }
8265
8266 CSR_WRITE(sc, WMREG_MANC, manc);
8267 }
8268 }
8269
8270 static void
8271 wm_release_manageability(struct wm_softc *sc)
8272 {
8273
8274 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8275 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8276
8277 if (sc->sc_type >= WM_T_82571)
8278 manc &= ~MANC_EN_MNG2HOST;
8279
8280 CSR_WRITE(sc, WMREG_MANC, manc);
8281 }
8282 }
8283
8284 static void
8285 wm_get_wakeup(struct wm_softc *sc)
8286 {
8287
8288 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8289 switch (sc->sc_type) {
8290 case WM_T_82573:
8291 case WM_T_82583:
8292 sc->sc_flags |= WM_F_HAS_AMT;
8293 /* FALLTHROUGH */
8294 case WM_T_80003:
8295 case WM_T_82541:
8296 case WM_T_82547:
8297 case WM_T_82571:
8298 case WM_T_82572:
8299 case WM_T_82574:
8300 case WM_T_82575:
8301 case WM_T_82576:
8302 #if 0 /* XXX */
8303 case WM_T_82580:
8304 case WM_T_82580ER:
8305 case WM_T_I350:
8306 #endif
8307 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8308 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8309 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8310 break;
8311 case WM_T_ICH8:
8312 case WM_T_ICH9:
8313 case WM_T_ICH10:
8314 case WM_T_PCH:
8315 case WM_T_PCH2:
8316 sc->sc_flags |= WM_F_HAS_AMT;
8317 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8318 break;
8319 default:
8320 break;
8321 }
8322
8323 /* 1: HAS_MANAGE */
8324 if (wm_enable_mng_pass_thru(sc) != 0)
8325 sc->sc_flags |= WM_F_HAS_MANAGE;
8326
8327 #ifdef WM_DEBUG
8328 printf("\n");
8329 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8330 printf("HAS_AMT,");
8331 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8332 printf("ARC_SUBSYS_VALID,");
8333 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8334 printf("ASF_FIRMWARE_PRES,");
8335 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8336 printf("HAS_MANAGE,");
8337 printf("\n");
8338 #endif
8339 /*
8340 * Note that the WOL flags is set after the resetting of the eeprom
8341 * stuff
8342 */
8343 }
8344
8345 #ifdef WM_WOL
8346 /* WOL in the newer chipset interfaces (pchlan) */
8347 static void
8348 wm_enable_phy_wakeup(struct wm_softc *sc)
8349 {
8350 #if 0
8351 uint16_t preg;
8352
8353 /* Copy MAC RARs to PHY RARs */
8354
8355 /* Copy MAC MTA to PHY MTA */
8356
8357 /* Configure PHY Rx Control register */
8358
8359 /* Enable PHY wakeup in MAC register */
8360
8361 /* Configure and enable PHY wakeup in PHY registers */
8362
8363 /* Activate PHY wakeup */
8364
8365 /* XXX */
8366 #endif
8367 }
8368
8369 static void
8370 wm_enable_wakeup(struct wm_softc *sc)
8371 {
8372 uint32_t reg, pmreg;
8373 pcireg_t pmode;
8374
8375 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8376 &pmreg, NULL) == 0)
8377 return;
8378
8379 /* Advertise the wakeup capability */
8380 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8381 | CTRL_SWDPIN(3));
8382 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8383
8384 /* ICH workaround */
8385 switch (sc->sc_type) {
8386 case WM_T_ICH8:
8387 case WM_T_ICH9:
8388 case WM_T_ICH10:
8389 case WM_T_PCH:
8390 case WM_T_PCH2:
8391 /* Disable gig during WOL */
8392 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8393 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8394 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8395 if (sc->sc_type == WM_T_PCH)
8396 wm_gmii_reset(sc);
8397
8398 /* Power down workaround */
8399 if (sc->sc_phytype == WMPHY_82577) {
8400 struct mii_softc *child;
8401
8402 /* Assume that the PHY is copper */
8403 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8404 if (child->mii_mpd_rev <= 2)
8405 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8406 (768 << 5) | 25, 0x0444); /* magic num */
8407 }
8408 break;
8409 default:
8410 break;
8411 }
8412
8413 /* Keep the laser running on fiber adapters */
8414 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8415 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8416 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8417 reg |= CTRL_EXT_SWDPIN(3);
8418 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8419 }
8420
8421 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8422 #if 0 /* for the multicast packet */
8423 reg |= WUFC_MC;
8424 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8425 #endif
8426
8427 if (sc->sc_type == WM_T_PCH) {
8428 wm_enable_phy_wakeup(sc);
8429 } else {
8430 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8431 CSR_WRITE(sc, WMREG_WUFC, reg);
8432 }
8433
8434 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8435 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8436 || (sc->sc_type == WM_T_PCH2))
8437 && (sc->sc_phytype == WMPHY_IGP_3))
8438 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8439
8440 /* Request PME */
8441 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8442 #if 0
8443 /* Disable WOL */
8444 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8445 #else
8446 /* For WOL */
8447 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8448 #endif
8449 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8450 }
8451 #endif /* WM_WOL */
8452
8453 static bool
8454 wm_suspend(device_t self, const pmf_qual_t *qual)
8455 {
8456 struct wm_softc *sc = device_private(self);
8457
8458 wm_release_manageability(sc);
8459 wm_release_hw_control(sc);
8460 #ifdef WM_WOL
8461 wm_enable_wakeup(sc);
8462 #endif
8463
8464 return true;
8465 }
8466
8467 static bool
8468 wm_resume(device_t self, const pmf_qual_t *qual)
8469 {
8470 struct wm_softc *sc = device_private(self);
8471
8472 wm_init_manageability(sc);
8473
8474 return true;
8475 }
8476
8477 static void
8478 wm_set_eee_i350(struct wm_softc * sc)
8479 {
8480 uint32_t ipcnfg, eeer;
8481
8482 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8483 eeer = CSR_READ(sc, WMREG_EEER);
8484
8485 if ((sc->sc_flags & WM_F_EEE) != 0) {
8486 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8487 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8488 | EEER_LPI_FC);
8489 } else {
8490 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8491 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8492 | EEER_LPI_FC);
8493 }
8494
8495 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8496 CSR_WRITE(sc, WMREG_EEER, eeer);
8497 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8498 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8499 }
8500