if_wm.c revision 1.234 1 /* $NetBSD: if_wm.c,v 1.234 2012/09/01 02:08:28 matt Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.234 2012/09/01 02:08:28 matt Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
136 | WM_DEBUG_MANAGE;
137
138 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
139 #else
140 #define DPRINTF(x, y) /* nothing */
141 #endif /* WM_DEBUG */
142
143 /*
144 * Transmit descriptor list size. Due to errata, we can only have
145 * 256 hardware descriptors in the ring on < 82544, but we use 4096
146 * on >= 82544. We tell the upper layers that they can queue a lot
147 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
148 * of them at a time.
149 *
150 * We allow up to 256 (!) DMA segments per packet. Pathological packet
151 * chains containing many small mbufs have been observed in zero-copy
152 * situations with jumbo frames.
153 */
154 #define WM_NTXSEGS 256
155 #define WM_IFQUEUELEN 256
156 #define WM_TXQUEUELEN_MAX 64
157 #define WM_TXQUEUELEN_MAX_82547 16
158 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
159 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
160 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
161 #define WM_NTXDESC_82542 256
162 #define WM_NTXDESC_82544 4096
163 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
164 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
165 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
166 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
167 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
168
169 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
170
171 /*
172 * Receive descriptor list size. We have one Rx buffer for normal
173 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
174 * packet. We allocate 256 receive descriptors, each with a 2k
175 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
176 */
177 #define WM_NRXDESC 256
178 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
179 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
180 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
181
182 /*
183 * Control structures are DMA'd to the i82542 chip. We allocate them in
184 * a single clump that maps to a single DMA segment to make several things
185 * easier.
186 */
187 struct wm_control_data_82544 {
188 /*
189 * The receive descriptors.
190 */
191 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
192
193 /*
194 * The transmit descriptors. Put these at the end, because
195 * we might use a smaller number of them.
196 */
197 union {
198 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
199 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
200 } wdc_u;
201 };
202
203 struct wm_control_data_82542 {
204 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
211
212 /*
213 * Software state for transmit jobs.
214 */
215 struct wm_txsoft {
216 struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap; /* our DMA map */
218 int txs_firstdesc; /* first descriptor in packet */
219 int txs_lastdesc; /* last descriptor in packet */
220 int txs_ndesc; /* # of descriptors used */
221 };
222
223 /*
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
227 */
228 struct wm_rxsoft {
229 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap; /* our DMA map */
231 };
232
233 #define WM_LINKUP_TIMEOUT 50
234
235 static uint16_t swfwphysem[] = {
236 SWFW_PHY0_SM,
237 SWFW_PHY1_SM,
238 SWFW_PHY2_SM,
239 SWFW_PHY3_SM
240 };
241
242 /*
243 * Software state per device.
244 */
245 struct wm_softc {
246 device_t sc_dev; /* generic device information */
247 bus_space_tag_t sc_st; /* bus space tag */
248 bus_space_handle_t sc_sh; /* bus space handle */
249 bus_size_t sc_ss; /* bus space size */
250 bus_space_tag_t sc_iot; /* I/O space tag */
251 bus_space_handle_t sc_ioh; /* I/O space handle */
252 bus_size_t sc_ios; /* I/O space size */
253 bus_space_tag_t sc_flasht; /* flash registers space tag */
254 bus_space_handle_t sc_flashh; /* flash registers space handle */
255 bus_dma_tag_t sc_dmat; /* bus DMA tag */
256
257 struct ethercom sc_ethercom; /* ethernet common data */
258 struct mii_data sc_mii; /* MII/media information */
259
260 pci_chipset_tag_t sc_pc;
261 pcitag_t sc_pcitag;
262 int sc_bus_speed; /* PCI/PCIX bus speed */
263 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
264
265 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
266 wm_chip_type sc_type; /* MAC type */
267 int sc_rev; /* MAC revision */
268 wm_phy_type sc_phytype; /* PHY type */
269 int sc_funcid; /* unit number of the chip (0 to 3) */
270 int sc_flags; /* flags; see below */
271 int sc_if_flags; /* last if_flags */
272 int sc_flowflags; /* 802.3x flow control flags */
273 int sc_align_tweak;
274
275 void *sc_ih; /* interrupt cookie */
276 callout_t sc_tick_ch; /* tick callout */
277
278 int sc_ee_addrbits; /* EEPROM address bits */
279 int sc_ich8_flash_base;
280 int sc_ich8_flash_bank_size;
281 int sc_nvm_k1_enabled;
282
283 /*
284 * Software state for the transmit and receive descriptors.
285 */
286 int sc_txnum; /* must be a power of two */
287 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
288 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
289
290 /*
291 * Control data structures.
292 */
293 int sc_ntxdesc; /* must be a power of two */
294 struct wm_control_data_82544 *sc_control_data;
295 bus_dmamap_t sc_cddmamap; /* control data DMA map */
296 bus_dma_segment_t sc_cd_seg; /* control data segment */
297 int sc_cd_rseg; /* real number of control segment */
298 size_t sc_cd_size; /* control data size */
299 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
300 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
301 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
302 #define sc_rxdescs sc_control_data->wcd_rxdescs
303
304 #ifdef WM_EVENT_COUNTERS
305 /* Event counters. */
306 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
307 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
308 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
309 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
310 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
311 struct evcnt sc_ev_rxintr; /* Rx interrupts */
312 struct evcnt sc_ev_linkintr; /* Link interrupts */
313
314 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
315 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
316 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
317 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
318 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
319 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
320 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
321 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
322
323 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
324 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
325
326 struct evcnt sc_ev_tu; /* Tx underrun */
327
328 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
329 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
330 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
331 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
332 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
333 #endif /* WM_EVENT_COUNTERS */
334
335 bus_addr_t sc_tdt_reg; /* offset of TDT register */
336
337 int sc_txfree; /* number of free Tx descriptors */
338 int sc_txnext; /* next ready Tx descriptor */
339
340 int sc_txsfree; /* number of free Tx jobs */
341 int sc_txsnext; /* next free Tx job */
342 int sc_txsdirty; /* dirty Tx jobs */
343
344 /* These 5 variables are used only on the 82547. */
345 int sc_txfifo_size; /* Tx FIFO size */
346 int sc_txfifo_head; /* current head of FIFO */
347 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
348 int sc_txfifo_stall; /* Tx FIFO is stalled */
349 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
350
351 bus_addr_t sc_rdt_reg; /* offset of RDT register */
352
353 int sc_rxptr; /* next ready Rx descriptor/queue ent */
354 int sc_rxdiscard;
355 int sc_rxlen;
356 struct mbuf *sc_rxhead;
357 struct mbuf *sc_rxtail;
358 struct mbuf **sc_rxtailp;
359
360 uint32_t sc_ctrl; /* prototype CTRL register */
361 #if 0
362 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
363 #endif
364 uint32_t sc_icr; /* prototype interrupt bits */
365 uint32_t sc_itr; /* prototype intr throttling reg */
366 uint32_t sc_tctl; /* prototype TCTL register */
367 uint32_t sc_rctl; /* prototype RCTL register */
368 uint32_t sc_txcw; /* prototype TXCW register */
369 uint32_t sc_tipg; /* prototype TIPG register */
370 uint32_t sc_fcrtl; /* prototype FCRTL register */
371 uint32_t sc_pba; /* prototype PBA register */
372
373 int sc_tbi_linkup; /* TBI link status */
374 int sc_tbi_anegticks; /* autonegotiation ticks */
375 int sc_tbi_ticks; /* tbi ticks */
376 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
377 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
378
379 int sc_mchash_type; /* multicast filter offset */
380
381 krndsource_t rnd_source; /* random source */
382 };
383
384 #define WM_RXCHAIN_RESET(sc) \
385 do { \
386 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
387 *(sc)->sc_rxtailp = NULL; \
388 (sc)->sc_rxlen = 0; \
389 } while (/*CONSTCOND*/0)
390
391 #define WM_RXCHAIN_LINK(sc, m) \
392 do { \
393 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
394 (sc)->sc_rxtailp = &(m)->m_next; \
395 } while (/*CONSTCOND*/0)
396
397 #ifdef WM_EVENT_COUNTERS
398 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
399 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
400 #else
401 #define WM_EVCNT_INCR(ev) /* nothing */
402 #define WM_EVCNT_ADD(ev, val) /* nothing */
403 #endif
404
405 #define CSR_READ(sc, reg) \
406 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
407 #define CSR_WRITE(sc, reg, val) \
408 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
409 #define CSR_WRITE_FLUSH(sc) \
410 (void) CSR_READ((sc), WMREG_STATUS)
411
412 #define ICH8_FLASH_READ32(sc, reg) \
413 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
414 #define ICH8_FLASH_WRITE32(sc, reg, data) \
415 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
416
417 #define ICH8_FLASH_READ16(sc, reg) \
418 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
419 #define ICH8_FLASH_WRITE16(sc, reg, data) \
420 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
421
422 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
423 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
424
425 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
426 #define WM_CDTXADDR_HI(sc, x) \
427 (sizeof(bus_addr_t) == 8 ? \
428 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
429
430 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
431 #define WM_CDRXADDR_HI(sc, x) \
432 (sizeof(bus_addr_t) == 8 ? \
433 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
434
435 #define WM_CDTXSYNC(sc, x, n, ops) \
436 do { \
437 int __x, __n; \
438 \
439 __x = (x); \
440 __n = (n); \
441 \
442 /* If it will wrap around, sync to the end of the ring. */ \
443 if ((__x + __n) > WM_NTXDESC(sc)) { \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
446 (WM_NTXDESC(sc) - __x), (ops)); \
447 __n -= (WM_NTXDESC(sc) - __x); \
448 __x = 0; \
449 } \
450 \
451 /* Now sync whatever is left. */ \
452 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
453 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
454 } while (/*CONSTCOND*/0)
455
456 #define WM_CDRXSYNC(sc, x, ops) \
457 do { \
458 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
459 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
460 } while (/*CONSTCOND*/0)
461
462 #define WM_INIT_RXDESC(sc, x) \
463 do { \
464 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
465 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
466 struct mbuf *__m = __rxs->rxs_mbuf; \
467 \
468 /* \
469 * Note: We scoot the packet forward 2 bytes in the buffer \
470 * so that the payload after the Ethernet header is aligned \
471 * to a 4-byte boundary. \
472 * \
473 * XXX BRAINDAMAGE ALERT! \
474 * The stupid chip uses the same size for every buffer, which \
475 * is set in the Receive Control register. We are using the 2K \
476 * size option, but what we REALLY want is (2K - 2)! For this \
477 * reason, we can't "scoot" packets longer than the standard \
478 * Ethernet MTU. On strict-alignment platforms, if the total \
479 * size exceeds (2K - 2) we set align_tweak to 0 and let \
480 * the upper layer copy the headers. \
481 */ \
482 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
483 \
484 wm_set_dma_addr(&__rxd->wrx_addr, \
485 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
486 __rxd->wrx_len = 0; \
487 __rxd->wrx_cksum = 0; \
488 __rxd->wrx_status = 0; \
489 __rxd->wrx_errors = 0; \
490 __rxd->wrx_special = 0; \
491 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
492 \
493 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
494 } while (/*CONSTCOND*/0)
495
496 static void wm_start(struct ifnet *);
497 static void wm_nq_start(struct ifnet *);
498 static void wm_watchdog(struct ifnet *);
499 static int wm_ifflags_cb(struct ethercom *);
500 static int wm_ioctl(struct ifnet *, u_long, void *);
501 static int wm_init(struct ifnet *);
502 static void wm_stop(struct ifnet *, int);
503 static bool wm_suspend(device_t, const pmf_qual_t *);
504 static bool wm_resume(device_t, const pmf_qual_t *);
505
506 static void wm_reset(struct wm_softc *);
507 static void wm_rxdrain(struct wm_softc *);
508 static int wm_add_rxbuf(struct wm_softc *, int);
509 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
510 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_validate_eeprom_checksum(struct wm_softc *);
512 static int wm_check_alt_mac_addr(struct wm_softc *);
513 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
514 static void wm_tick(void *);
515
516 static void wm_set_filter(struct wm_softc *);
517 static void wm_set_vlan(struct wm_softc *);
518
519 static int wm_intr(void *);
520 static void wm_txintr(struct wm_softc *);
521 static void wm_rxintr(struct wm_softc *);
522 static void wm_linkintr(struct wm_softc *, uint32_t);
523
524 static void wm_tbi_mediainit(struct wm_softc *);
525 static int wm_tbi_mediachange(struct ifnet *);
526 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
527
528 static void wm_tbi_set_linkled(struct wm_softc *);
529 static void wm_tbi_check_link(struct wm_softc *);
530
531 static void wm_gmii_reset(struct wm_softc *);
532
533 static int wm_gmii_i82543_readreg(device_t, int, int);
534 static void wm_gmii_i82543_writereg(device_t, int, int, int);
535
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538
539 static int wm_gmii_i80003_readreg(device_t, int, int);
540 static void wm_gmii_i80003_writereg(device_t, int, int, int);
541 static int wm_gmii_bm_readreg(device_t, int, int);
542 static void wm_gmii_bm_writereg(device_t, int, int, int);
543 static int wm_gmii_hv_readreg(device_t, int, int);
544 static void wm_gmii_hv_writereg(device_t, int, int, int);
545 static int wm_sgmii_readreg(device_t, int, int);
546 static void wm_sgmii_writereg(device_t, int, int, int);
547
548 static void wm_gmii_statchg(struct ifnet *);
549
550 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
551 static int wm_gmii_mediachange(struct ifnet *);
552 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
553
554 static int wm_kmrn_readreg(struct wm_softc *, int);
555 static void wm_kmrn_writereg(struct wm_softc *, int, int);
556
557 static void wm_set_spiaddrbits(struct wm_softc *);
558 static int wm_match(device_t, cfdata_t, void *);
559 static void wm_attach(device_t, device_t, void *);
560 static int wm_detach(device_t, int);
561 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
562 static void wm_get_auto_rd_done(struct wm_softc *);
563 static void wm_lan_init_done(struct wm_softc *);
564 static void wm_get_cfg_done(struct wm_softc *);
565 static int wm_get_swsm_semaphore(struct wm_softc *);
566 static void wm_put_swsm_semaphore(struct wm_softc *);
567 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
568 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
569 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
570 static int wm_get_swfwhw_semaphore(struct wm_softc *);
571 static void wm_put_swfwhw_semaphore(struct wm_softc *);
572
573 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
574 static int32_t wm_ich8_cycle_init(struct wm_softc *);
575 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
576 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
577 uint32_t, uint16_t *);
578 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
579 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
580 static void wm_82547_txfifo_stall(void *);
581 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
582 static int wm_check_mng_mode(struct wm_softc *);
583 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
584 static int wm_check_mng_mode_82574(struct wm_softc *);
585 static int wm_check_mng_mode_generic(struct wm_softc *);
586 static int wm_enable_mng_pass_thru(struct wm_softc *);
587 static int wm_check_reset_block(struct wm_softc *);
588 static void wm_get_hw_control(struct wm_softc *);
589 static int wm_check_for_link(struct wm_softc *);
590 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
591 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
592 #ifdef WM_WOL
593 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
594 #endif
595 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
596 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
598 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
599 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
600 static void wm_smbustopci(struct wm_softc *);
601 static void wm_set_pcie_completion_timeout(struct wm_softc *);
602 static void wm_reset_init_script_82575(struct wm_softc *);
603 static void wm_release_manageability(struct wm_softc *);
604 static void wm_release_hw_control(struct wm_softc *);
605 static void wm_get_wakeup(struct wm_softc *);
606 #ifdef WM_WOL
607 static void wm_enable_phy_wakeup(struct wm_softc *);
608 static void wm_enable_wakeup(struct wm_softc *);
609 #endif
610 static void wm_init_manageability(struct wm_softc *);
611 static void wm_set_eee_i350(struct wm_softc *);
612
613 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
614 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
615
616 /*
617 * Devices supported by this driver.
618 */
619 static const struct wm_product {
620 pci_vendor_id_t wmp_vendor;
621 pci_product_id_t wmp_product;
622 const char *wmp_name;
623 wm_chip_type wmp_type;
624 int wmp_flags;
625 #define WMP_F_1000X 0x01
626 #define WMP_F_1000T 0x02
627 #define WMP_F_SERDES 0x04
628 } wm_products[] = {
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
630 "Intel i82542 1000BASE-X Ethernet",
631 WM_T_82542_2_1, WMP_F_1000X },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
634 "Intel i82543GC 1000BASE-X Ethernet",
635 WM_T_82543, WMP_F_1000X },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
638 "Intel i82543GC 1000BASE-T Ethernet",
639 WM_T_82543, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
642 "Intel i82544EI 1000BASE-T Ethernet",
643 WM_T_82544, WMP_F_1000T },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
646 "Intel i82544EI 1000BASE-X Ethernet",
647 WM_T_82544, WMP_F_1000X },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
650 "Intel i82544GC 1000BASE-T Ethernet",
651 WM_T_82544, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
654 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
655 WM_T_82544, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
658 "Intel i82540EM 1000BASE-T Ethernet",
659 WM_T_82540, WMP_F_1000T },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
662 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
663 WM_T_82540, WMP_F_1000T },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
666 "Intel i82540EP 1000BASE-T Ethernet",
667 WM_T_82540, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
670 "Intel i82540EP 1000BASE-T Ethernet",
671 WM_T_82540, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
674 "Intel i82540EP 1000BASE-T Ethernet",
675 WM_T_82540, WMP_F_1000T },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
678 "Intel i82545EM 1000BASE-T Ethernet",
679 WM_T_82545, WMP_F_1000T },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
682 "Intel i82545GM 1000BASE-T Ethernet",
683 WM_T_82545_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
686 "Intel i82545GM 1000BASE-X Ethernet",
687 WM_T_82545_3, WMP_F_1000X },
688 #if 0
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
690 "Intel i82545GM Gigabit Ethernet (SERDES)",
691 WM_T_82545_3, WMP_F_SERDES },
692 #endif
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
694 "Intel i82546EB 1000BASE-T Ethernet",
695 WM_T_82546, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
698 "Intel i82546EB 1000BASE-T Ethernet",
699 WM_T_82546, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
702 "Intel i82545EM 1000BASE-X Ethernet",
703 WM_T_82545, WMP_F_1000X },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
706 "Intel i82546EB 1000BASE-X Ethernet",
707 WM_T_82546, WMP_F_1000X },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
710 "Intel i82546GB 1000BASE-T Ethernet",
711 WM_T_82546_3, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
714 "Intel i82546GB 1000BASE-X Ethernet",
715 WM_T_82546_3, WMP_F_1000X },
716 #if 0
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
718 "Intel i82546GB Gigabit Ethernet (SERDES)",
719 WM_T_82546_3, WMP_F_SERDES },
720 #endif
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
722 "i82546GB quad-port Gigabit Ethernet",
723 WM_T_82546_3, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
726 "i82546GB quad-port Gigabit Ethernet (KSP3)",
727 WM_T_82546_3, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
730 "Intel PRO/1000MT (82546GB)",
731 WM_T_82546_3, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
734 "Intel i82541EI 1000BASE-T Ethernet",
735 WM_T_82541, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
738 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
739 WM_T_82541, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
742 "Intel i82541EI Mobile 1000BASE-T Ethernet",
743 WM_T_82541, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
746 "Intel i82541ER 1000BASE-T Ethernet",
747 WM_T_82541_2, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
750 "Intel i82541GI 1000BASE-T Ethernet",
751 WM_T_82541_2, WMP_F_1000T },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
754 "Intel i82541GI Mobile 1000BASE-T Ethernet",
755 WM_T_82541_2, WMP_F_1000T },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
758 "Intel i82541PI 1000BASE-T Ethernet",
759 WM_T_82541_2, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
762 "Intel i82547EI 1000BASE-T Ethernet",
763 WM_T_82547, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
766 "Intel i82547EI Mobile 1000BASE-T Ethernet",
767 WM_T_82547, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
770 "Intel i82547GI 1000BASE-T Ethernet",
771 WM_T_82547_2, WMP_F_1000T },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
774 "Intel PRO/1000 PT (82571EB)",
775 WM_T_82571, WMP_F_1000T },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
778 "Intel PRO/1000 PF (82571EB)",
779 WM_T_82571, WMP_F_1000X },
780 #if 0
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
782 "Intel PRO/1000 PB (82571EB)",
783 WM_T_82571, WMP_F_SERDES },
784 #endif
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
786 "Intel PRO/1000 QT (82571EB)",
787 WM_T_82571, WMP_F_1000T },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
790 "Intel i82572EI 1000baseT Ethernet",
791 WM_T_82572, WMP_F_1000T },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
794 "Intel PRO/1000 PT Quad Port Server Adapter",
795 WM_T_82571, WMP_F_1000T, },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
798 "Intel i82572EI 1000baseX Ethernet",
799 WM_T_82572, WMP_F_1000X },
800 #if 0
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
802 "Intel i82572EI Gigabit Ethernet (SERDES)",
803 WM_T_82572, WMP_F_SERDES },
804 #endif
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
807 "Intel i82572EI 1000baseT Ethernet",
808 WM_T_82572, WMP_F_1000T },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
811 "Intel i82573E",
812 WM_T_82573, WMP_F_1000T },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
815 "Intel i82573E IAMT",
816 WM_T_82573, WMP_F_1000T },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
819 "Intel i82573L Gigabit Ethernet",
820 WM_T_82573, WMP_F_1000T },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
823 "Intel i82574L",
824 WM_T_82574, WMP_F_1000T },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
827 "Intel i82583V",
828 WM_T_82583, WMP_F_1000T },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
831 "i80003 dual 1000baseT Ethernet",
832 WM_T_80003, WMP_F_1000T },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
835 "i80003 dual 1000baseX Ethernet",
836 WM_T_80003, WMP_F_1000T },
837 #if 0
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
839 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
840 WM_T_80003, WMP_F_SERDES },
841 #endif
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
844 "Intel i80003 1000baseT Ethernet",
845 WM_T_80003, WMP_F_1000T },
846 #if 0
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
848 "Intel i80003 Gigabit Ethernet (SERDES)",
849 WM_T_80003, WMP_F_SERDES },
850 #endif
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
852 "Intel i82801H (M_AMT) LAN Controller",
853 WM_T_ICH8, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
855 "Intel i82801H (AMT) LAN Controller",
856 WM_T_ICH8, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
858 "Intel i82801H LAN Controller",
859 WM_T_ICH8, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
861 "Intel i82801H (IFE) LAN Controller",
862 WM_T_ICH8, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
864 "Intel i82801H (M) LAN Controller",
865 WM_T_ICH8, WMP_F_1000T },
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
867 "Intel i82801H IFE (GT) LAN Controller",
868 WM_T_ICH8, WMP_F_1000T },
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
870 "Intel i82801H IFE (G) LAN Controller",
871 WM_T_ICH8, WMP_F_1000T },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
873 "82801I (AMT) LAN Controller",
874 WM_T_ICH9, WMP_F_1000T },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
876 "82801I LAN Controller",
877 WM_T_ICH9, WMP_F_1000T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
879 "82801I (G) LAN Controller",
880 WM_T_ICH9, WMP_F_1000T },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
882 "82801I (GT) LAN Controller",
883 WM_T_ICH9, WMP_F_1000T },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
885 "82801I (C) LAN Controller",
886 WM_T_ICH9, WMP_F_1000T },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
888 "82801I mobile LAN Controller",
889 WM_T_ICH9, WMP_F_1000T },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
891 "82801I mobile (V) LAN Controller",
892 WM_T_ICH9, WMP_F_1000T },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
894 "82801I mobile (AMT) LAN Controller",
895 WM_T_ICH9, WMP_F_1000T },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
897 "82567LM-4 LAN Controller",
898 WM_T_ICH9, WMP_F_1000T },
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
900 "82567V-3 LAN Controller",
901 WM_T_ICH9, WMP_F_1000T },
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
903 "82567LM-2 LAN Controller",
904 WM_T_ICH10, WMP_F_1000T },
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
906 "82567LF-2 LAN Controller",
907 WM_T_ICH10, WMP_F_1000T },
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
909 "82567LM-3 LAN Controller",
910 WM_T_ICH10, WMP_F_1000T },
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
912 "82567LF-3 LAN Controller",
913 WM_T_ICH10, WMP_F_1000T },
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
915 "82567V-2 LAN Controller",
916 WM_T_ICH10, WMP_F_1000T },
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
918 "82567V-3? LAN Controller",
919 WM_T_ICH10, WMP_F_1000T },
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
921 "HANKSVILLE LAN Controller",
922 WM_T_ICH10, WMP_F_1000T },
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
924 "PCH LAN (82577LM) Controller",
925 WM_T_PCH, WMP_F_1000T },
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
927 "PCH LAN (82577LC) Controller",
928 WM_T_PCH, WMP_F_1000T },
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
930 "PCH LAN (82578DM) Controller",
931 WM_T_PCH, WMP_F_1000T },
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
933 "PCH LAN (82578DC) Controller",
934 WM_T_PCH2, WMP_F_1000T },
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
936 "PCH2 LAN (82579LM) Controller",
937 WM_T_PCH2, WMP_F_1000T },
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
939 "PCH2 LAN (82579V) Controller",
940 WM_T_PCH, WMP_F_1000T },
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
942 "82575EB dual-1000baseT Ethernet",
943 WM_T_82575, WMP_F_1000T },
944 #if 0
945 /*
946 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
947 * disabled for now ...
948 */
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
950 "82575EB dual-1000baseX Ethernet (SERDES)",
951 WM_T_82575, WMP_F_SERDES },
952 #endif
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
954 "82575GB quad-1000baseT Ethernet",
955 WM_T_82575, WMP_F_1000T },
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
957 "82575GB quad-1000baseT Ethernet (PM)",
958 WM_T_82575, WMP_F_1000T },
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
960 "82576 1000BaseT Ethernet",
961 WM_T_82576, WMP_F_1000T },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
963 "82576 1000BaseX Ethernet",
964 WM_T_82576, WMP_F_1000X },
965 #if 0
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
967 "82576 gigabit Ethernet (SERDES)",
968 WM_T_82576, WMP_F_SERDES },
969 #endif
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
971 "82576 quad-1000BaseT Ethernet",
972 WM_T_82576, WMP_F_1000T },
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
974 "82576 gigabit Ethernet",
975 WM_T_82576, WMP_F_1000T },
976 #if 0
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
978 "82576 gigabit Ethernet (SERDES)",
979 WM_T_82576, WMP_F_SERDES },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
981 "82576 quad-gigabit Ethernet (SERDES)",
982 WM_T_82576, WMP_F_SERDES },
983 #endif
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
985 "82580 1000BaseT Ethernet",
986 WM_T_82580, WMP_F_1000T },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
988 "82580 1000BaseX Ethernet",
989 WM_T_82580, WMP_F_1000X },
990 #if 0
991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
992 "82580 1000BaseT Ethernet (SERDES)",
993 WM_T_82580, WMP_F_SERDES },
994 #endif
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
996 "82580 gigabit Ethernet (SGMII)",
997 WM_T_82580, WMP_F_1000T },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
999 "82580 dual-1000BaseT Ethernet",
1000 WM_T_82580, WMP_F_1000T },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1002 "82580 1000BaseT Ethernet",
1003 WM_T_82580ER, WMP_F_1000T },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1005 "82580 dual-1000BaseT Ethernet",
1006 WM_T_82580ER, WMP_F_1000T },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1008 "82580 quad-1000BaseX Ethernet",
1009 WM_T_82580, WMP_F_1000X },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1011 "I350 Gigabit Network Connection",
1012 WM_T_I350, WMP_F_1000T },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1014 "I350 Gigabit Fiber Network Connection",
1015 WM_T_I350, WMP_F_1000X },
1016 #if 0
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1018 "I350 Gigabit Backplane Connection",
1019 WM_T_I350, WMP_F_SERDES },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1021 "I350 Gigabit Connection",
1022 WM_T_I350, WMP_F_1000T },
1023 #endif
1024 { 0, 0,
1025 NULL,
1026 0, 0 },
1027 };
1028
1029 #ifdef WM_EVENT_COUNTERS
1030 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1031 #endif /* WM_EVENT_COUNTERS */
1032
1033 #if 0 /* Not currently used */
1034 static inline uint32_t
1035 wm_io_read(struct wm_softc *sc, int reg)
1036 {
1037
1038 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1039 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1040 }
1041 #endif
1042
1043 static inline void
1044 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1045 {
1046
1047 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1048 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1049 }
1050
1051 static inline void
1052 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1053 uint32_t data)
1054 {
1055 uint32_t regval;
1056 int i;
1057
1058 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1059
1060 CSR_WRITE(sc, reg, regval);
1061
1062 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1063 delay(5);
1064 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1065 break;
1066 }
1067 if (i == SCTL_CTL_POLL_TIMEOUT) {
1068 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1069 device_xname(sc->sc_dev), reg);
1070 }
1071 }
1072
1073 static inline void
1074 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1075 {
1076 wa->wa_low = htole32(v & 0xffffffffU);
1077 if (sizeof(bus_addr_t) == 8)
1078 wa->wa_high = htole32((uint64_t) v >> 32);
1079 else
1080 wa->wa_high = 0;
1081 }
1082
1083 static void
1084 wm_set_spiaddrbits(struct wm_softc *sc)
1085 {
1086 uint32_t reg;
1087
1088 sc->sc_flags |= WM_F_EEPROM_SPI;
1089 reg = CSR_READ(sc, WMREG_EECD);
1090 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1091 }
1092
1093 static const struct wm_product *
1094 wm_lookup(const struct pci_attach_args *pa)
1095 {
1096 const struct wm_product *wmp;
1097
1098 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1099 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1100 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1101 return wmp;
1102 }
1103 return NULL;
1104 }
1105
1106 static int
1107 wm_match(device_t parent, cfdata_t cf, void *aux)
1108 {
1109 struct pci_attach_args *pa = aux;
1110
1111 if (wm_lookup(pa) != NULL)
1112 return 1;
1113
1114 return 0;
1115 }
1116
1117 static void
1118 wm_attach(device_t parent, device_t self, void *aux)
1119 {
1120 struct wm_softc *sc = device_private(self);
1121 struct pci_attach_args *pa = aux;
1122 prop_dictionary_t dict;
1123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1124 pci_chipset_tag_t pc = pa->pa_pc;
1125 pci_intr_handle_t ih;
1126 const char *intrstr = NULL;
1127 const char *eetype, *xname;
1128 bus_space_tag_t memt;
1129 bus_space_handle_t memh;
1130 bus_size_t memsize;
1131 int memh_valid;
1132 int i, error;
1133 const struct wm_product *wmp;
1134 prop_data_t ea;
1135 prop_number_t pn;
1136 uint8_t enaddr[ETHER_ADDR_LEN];
1137 uint16_t cfg1, cfg2, swdpin, io3;
1138 pcireg_t preg, memtype;
1139 uint16_t eeprom_data, apme_mask;
1140 uint32_t reg;
1141
1142 sc->sc_dev = self;
1143 callout_init(&sc->sc_tick_ch, 0);
1144
1145 sc->sc_wmp = wmp = wm_lookup(pa);
1146 if (wmp == NULL) {
1147 printf("\n");
1148 panic("wm_attach: impossible");
1149 }
1150
1151 sc->sc_pc = pa->pa_pc;
1152 sc->sc_pcitag = pa->pa_tag;
1153
1154 if (pci_dma64_available(pa))
1155 sc->sc_dmat = pa->pa_dmat64;
1156 else
1157 sc->sc_dmat = pa->pa_dmat;
1158
1159 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1160 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1161
1162 sc->sc_type = wmp->wmp_type;
1163 if (sc->sc_type < WM_T_82543) {
1164 if (sc->sc_rev < 2) {
1165 aprint_error_dev(sc->sc_dev,
1166 "i82542 must be at least rev. 2\n");
1167 return;
1168 }
1169 if (sc->sc_rev < 3)
1170 sc->sc_type = WM_T_82542_2_0;
1171 }
1172
1173 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1174 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1175 || (sc->sc_type == WM_T_I350))
1176 sc->sc_flags |= WM_F_NEWQUEUE;
1177
1178 /* Set device properties (mactype) */
1179 dict = device_properties(sc->sc_dev);
1180 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1181
1182 /*
1183 * Map the device. All devices support memory-mapped acccess,
1184 * and it is really required for normal operation.
1185 */
1186 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1187 switch (memtype) {
1188 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1189 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1190 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1191 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1192 break;
1193 default:
1194 memh_valid = 0;
1195 break;
1196 }
1197
1198 if (memh_valid) {
1199 sc->sc_st = memt;
1200 sc->sc_sh = memh;
1201 sc->sc_ss = memsize;
1202 } else {
1203 aprint_error_dev(sc->sc_dev,
1204 "unable to map device registers\n");
1205 return;
1206 }
1207
1208 wm_get_wakeup(sc);
1209
1210 /*
1211 * In addition, i82544 and later support I/O mapped indirect
1212 * register access. It is not desirable (nor supported in
1213 * this driver) to use it for normal operation, though it is
1214 * required to work around bugs in some chip versions.
1215 */
1216 if (sc->sc_type >= WM_T_82544) {
1217 /* First we have to find the I/O BAR. */
1218 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1219 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1220 PCI_MAPREG_TYPE_IO)
1221 break;
1222 }
1223 if (i != PCI_MAPREG_END) {
1224 /*
1225 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1226 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1227 * It's no problem because newer chips has no this
1228 * bug.
1229 *
1230 * The i8254x doesn't apparently respond when the
1231 * I/O BAR is 0, which looks somewhat like it's not
1232 * been configured.
1233 */
1234 preg = pci_conf_read(pc, pa->pa_tag, i);
1235 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1236 aprint_error_dev(sc->sc_dev,
1237 "WARNING: I/O BAR at zero.\n");
1238 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1239 0, &sc->sc_iot, &sc->sc_ioh,
1240 NULL, &sc->sc_ios) == 0) {
1241 sc->sc_flags |= WM_F_IOH_VALID;
1242 } else {
1243 aprint_error_dev(sc->sc_dev,
1244 "WARNING: unable to map I/O space\n");
1245 }
1246 }
1247
1248 }
1249
1250 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1251 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1252 preg |= PCI_COMMAND_MASTER_ENABLE;
1253 if (sc->sc_type < WM_T_82542_2_1)
1254 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1255 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1256
1257 /* power up chip */
1258 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1259 NULL)) && error != EOPNOTSUPP) {
1260 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1261 return;
1262 }
1263
1264 /*
1265 * Map and establish our interrupt.
1266 */
1267 if (pci_intr_map(pa, &ih)) {
1268 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1269 return;
1270 }
1271 intrstr = pci_intr_string(pc, ih);
1272 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1273 if (sc->sc_ih == NULL) {
1274 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1275 if (intrstr != NULL)
1276 aprint_error(" at %s", intrstr);
1277 aprint_error("\n");
1278 return;
1279 }
1280 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1281
1282 /*
1283 * Check the function ID (unit number of the chip).
1284 */
1285 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1286 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1287 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1288 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1289 || (sc->sc_type == WM_T_I350))
1290 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1291 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1292 else
1293 sc->sc_funcid = 0;
1294
1295 /*
1296 * Determine a few things about the bus we're connected to.
1297 */
1298 if (sc->sc_type < WM_T_82543) {
1299 /* We don't really know the bus characteristics here. */
1300 sc->sc_bus_speed = 33;
1301 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1302 /*
1303 * CSA (Communication Streaming Architecture) is about as fast
1304 * a 32-bit 66MHz PCI Bus.
1305 */
1306 sc->sc_flags |= WM_F_CSA;
1307 sc->sc_bus_speed = 66;
1308 aprint_verbose_dev(sc->sc_dev,
1309 "Communication Streaming Architecture\n");
1310 if (sc->sc_type == WM_T_82547) {
1311 callout_init(&sc->sc_txfifo_ch, 0);
1312 callout_setfunc(&sc->sc_txfifo_ch,
1313 wm_82547_txfifo_stall, sc);
1314 aprint_verbose_dev(sc->sc_dev,
1315 "using 82547 Tx FIFO stall work-around\n");
1316 }
1317 } else if (sc->sc_type >= WM_T_82571) {
1318 sc->sc_flags |= WM_F_PCIE;
1319 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1320 && (sc->sc_type != WM_T_ICH10)
1321 && (sc->sc_type != WM_T_PCH)
1322 && (sc->sc_type != WM_T_PCH2)) {
1323 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1324 /* ICH* and PCH* have no PCIe capability registers */
1325 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1326 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1327 NULL) == 0)
1328 aprint_error_dev(sc->sc_dev,
1329 "unable to find PCIe capability\n");
1330 }
1331 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1332 } else {
1333 reg = CSR_READ(sc, WMREG_STATUS);
1334 if (reg & STATUS_BUS64)
1335 sc->sc_flags |= WM_F_BUS64;
1336 if ((reg & STATUS_PCIX_MODE) != 0) {
1337 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1338
1339 sc->sc_flags |= WM_F_PCIX;
1340 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1341 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1342 aprint_error_dev(sc->sc_dev,
1343 "unable to find PCIX capability\n");
1344 else if (sc->sc_type != WM_T_82545_3 &&
1345 sc->sc_type != WM_T_82546_3) {
1346 /*
1347 * Work around a problem caused by the BIOS
1348 * setting the max memory read byte count
1349 * incorrectly.
1350 */
1351 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1352 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1353 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1354 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1355
1356 bytecnt =
1357 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1358 PCI_PCIX_CMD_BYTECNT_SHIFT;
1359 maxb =
1360 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1361 PCI_PCIX_STATUS_MAXB_SHIFT;
1362 if (bytecnt > maxb) {
1363 aprint_verbose_dev(sc->sc_dev,
1364 "resetting PCI-X MMRBC: %d -> %d\n",
1365 512 << bytecnt, 512 << maxb);
1366 pcix_cmd = (pcix_cmd &
1367 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1368 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1369 pci_conf_write(pa->pa_pc, pa->pa_tag,
1370 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1371 pcix_cmd);
1372 }
1373 }
1374 }
1375 /*
1376 * The quad port adapter is special; it has a PCIX-PCIX
1377 * bridge on the board, and can run the secondary bus at
1378 * a higher speed.
1379 */
1380 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1381 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1382 : 66;
1383 } else if (sc->sc_flags & WM_F_PCIX) {
1384 switch (reg & STATUS_PCIXSPD_MASK) {
1385 case STATUS_PCIXSPD_50_66:
1386 sc->sc_bus_speed = 66;
1387 break;
1388 case STATUS_PCIXSPD_66_100:
1389 sc->sc_bus_speed = 100;
1390 break;
1391 case STATUS_PCIXSPD_100_133:
1392 sc->sc_bus_speed = 133;
1393 break;
1394 default:
1395 aprint_error_dev(sc->sc_dev,
1396 "unknown PCIXSPD %d; assuming 66MHz\n",
1397 reg & STATUS_PCIXSPD_MASK);
1398 sc->sc_bus_speed = 66;
1399 break;
1400 }
1401 } else
1402 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1403 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1404 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1405 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1406 }
1407
1408 /*
1409 * Allocate the control data structures, and create and load the
1410 * DMA map for it.
1411 *
1412 * NOTE: All Tx descriptors must be in the same 4G segment of
1413 * memory. So must Rx descriptors. We simplify by allocating
1414 * both sets within the same 4G segment.
1415 */
1416 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1417 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1418 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1419 sizeof(struct wm_control_data_82542) :
1420 sizeof(struct wm_control_data_82544);
1421 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1422 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1423 &sc->sc_cd_rseg, 0)) != 0) {
1424 aprint_error_dev(sc->sc_dev,
1425 "unable to allocate control data, error = %d\n",
1426 error);
1427 goto fail_0;
1428 }
1429
1430 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1431 sc->sc_cd_rseg, sc->sc_cd_size,
1432 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1433 aprint_error_dev(sc->sc_dev,
1434 "unable to map control data, error = %d\n", error);
1435 goto fail_1;
1436 }
1437
1438 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1439 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1440 aprint_error_dev(sc->sc_dev,
1441 "unable to create control data DMA map, error = %d\n",
1442 error);
1443 goto fail_2;
1444 }
1445
1446 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1447 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1448 aprint_error_dev(sc->sc_dev,
1449 "unable to load control data DMA map, error = %d\n",
1450 error);
1451 goto fail_3;
1452 }
1453
1454 /*
1455 * Create the transmit buffer DMA maps.
1456 */
1457 WM_TXQUEUELEN(sc) =
1458 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1459 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1460 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1461 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1462 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1463 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1464 aprint_error_dev(sc->sc_dev,
1465 "unable to create Tx DMA map %d, error = %d\n",
1466 i, error);
1467 goto fail_4;
1468 }
1469 }
1470
1471 /*
1472 * Create the receive buffer DMA maps.
1473 */
1474 for (i = 0; i < WM_NRXDESC; i++) {
1475 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1476 MCLBYTES, 0, 0,
1477 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1478 aprint_error_dev(sc->sc_dev,
1479 "unable to create Rx DMA map %d error = %d\n",
1480 i, error);
1481 goto fail_5;
1482 }
1483 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1484 }
1485
1486 /* clear interesting stat counters */
1487 CSR_READ(sc, WMREG_COLC);
1488 CSR_READ(sc, WMREG_RXERRC);
1489
1490 /* get PHY control from SMBus to PCIe */
1491 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1492 wm_smbustopci(sc);
1493
1494 /*
1495 * Reset the chip to a known state.
1496 */
1497 wm_reset(sc);
1498
1499 switch (sc->sc_type) {
1500 case WM_T_82571:
1501 case WM_T_82572:
1502 case WM_T_82573:
1503 case WM_T_82574:
1504 case WM_T_82583:
1505 case WM_T_80003:
1506 case WM_T_ICH8:
1507 case WM_T_ICH9:
1508 case WM_T_ICH10:
1509 case WM_T_PCH:
1510 case WM_T_PCH2:
1511 if (wm_check_mng_mode(sc) != 0)
1512 wm_get_hw_control(sc);
1513 break;
1514 default:
1515 break;
1516 }
1517
1518 /*
1519 * Get some information about the EEPROM.
1520 */
1521 switch (sc->sc_type) {
1522 case WM_T_82542_2_0:
1523 case WM_T_82542_2_1:
1524 case WM_T_82543:
1525 case WM_T_82544:
1526 /* Microwire */
1527 sc->sc_ee_addrbits = 6;
1528 break;
1529 case WM_T_82540:
1530 case WM_T_82545:
1531 case WM_T_82545_3:
1532 case WM_T_82546:
1533 case WM_T_82546_3:
1534 /* Microwire */
1535 reg = CSR_READ(sc, WMREG_EECD);
1536 if (reg & EECD_EE_SIZE)
1537 sc->sc_ee_addrbits = 8;
1538 else
1539 sc->sc_ee_addrbits = 6;
1540 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1541 break;
1542 case WM_T_82541:
1543 case WM_T_82541_2:
1544 case WM_T_82547:
1545 case WM_T_82547_2:
1546 reg = CSR_READ(sc, WMREG_EECD);
1547 if (reg & EECD_EE_TYPE) {
1548 /* SPI */
1549 wm_set_spiaddrbits(sc);
1550 } else
1551 /* Microwire */
1552 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1553 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1554 break;
1555 case WM_T_82571:
1556 case WM_T_82572:
1557 /* SPI */
1558 wm_set_spiaddrbits(sc);
1559 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1560 break;
1561 case WM_T_82573:
1562 case WM_T_82574:
1563 case WM_T_82583:
1564 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1565 sc->sc_flags |= WM_F_EEPROM_FLASH;
1566 else {
1567 /* SPI */
1568 wm_set_spiaddrbits(sc);
1569 }
1570 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1571 break;
1572 case WM_T_82575:
1573 case WM_T_82576:
1574 case WM_T_82580:
1575 case WM_T_82580ER:
1576 case WM_T_I350:
1577 case WM_T_80003:
1578 /* SPI */
1579 wm_set_spiaddrbits(sc);
1580 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1581 break;
1582 case WM_T_ICH8:
1583 case WM_T_ICH9:
1584 case WM_T_ICH10:
1585 case WM_T_PCH:
1586 case WM_T_PCH2:
1587 /* FLASH */
1588 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1589 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1590 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1591 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1592 aprint_error_dev(sc->sc_dev,
1593 "can't map FLASH registers\n");
1594 return;
1595 }
1596 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1597 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1598 ICH_FLASH_SECTOR_SIZE;
1599 sc->sc_ich8_flash_bank_size =
1600 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1601 sc->sc_ich8_flash_bank_size -=
1602 (reg & ICH_GFPREG_BASE_MASK);
1603 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1604 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1605 break;
1606 default:
1607 break;
1608 }
1609
1610 /*
1611 * Defer printing the EEPROM type until after verifying the checksum
1612 * This allows the EEPROM type to be printed correctly in the case
1613 * that no EEPROM is attached.
1614 */
1615 /*
1616 * Validate the EEPROM checksum. If the checksum fails, flag
1617 * this for later, so we can fail future reads from the EEPROM.
1618 */
1619 if (wm_validate_eeprom_checksum(sc)) {
1620 /*
1621 * Read twice again because some PCI-e parts fail the
1622 * first check due to the link being in sleep state.
1623 */
1624 if (wm_validate_eeprom_checksum(sc))
1625 sc->sc_flags |= WM_F_EEPROM_INVALID;
1626 }
1627
1628 /* Set device properties (macflags) */
1629 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1630
1631 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1632 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1633 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1634 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1635 } else {
1636 if (sc->sc_flags & WM_F_EEPROM_SPI)
1637 eetype = "SPI";
1638 else
1639 eetype = "MicroWire";
1640 aprint_verbose_dev(sc->sc_dev,
1641 "%u word (%d address bits) %s EEPROM\n",
1642 1U << sc->sc_ee_addrbits,
1643 sc->sc_ee_addrbits, eetype);
1644 }
1645
1646 /*
1647 * Read the Ethernet address from the EEPROM, if not first found
1648 * in device properties.
1649 */
1650 ea = prop_dictionary_get(dict, "mac-address");
1651 if (ea != NULL) {
1652 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1653 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1654 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1655 } else {
1656 if (wm_read_mac_addr(sc, enaddr) != 0) {
1657 aprint_error_dev(sc->sc_dev,
1658 "unable to read Ethernet address\n");
1659 return;
1660 }
1661 }
1662
1663 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1664 ether_sprintf(enaddr));
1665
1666 /*
1667 * Read the config info from the EEPROM, and set up various
1668 * bits in the control registers based on their contents.
1669 */
1670 pn = prop_dictionary_get(dict, "i82543-cfg1");
1671 if (pn != NULL) {
1672 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1673 cfg1 = (uint16_t) prop_number_integer_value(pn);
1674 } else {
1675 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1676 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1677 return;
1678 }
1679 }
1680
1681 pn = prop_dictionary_get(dict, "i82543-cfg2");
1682 if (pn != NULL) {
1683 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1684 cfg2 = (uint16_t) prop_number_integer_value(pn);
1685 } else {
1686 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1687 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1688 return;
1689 }
1690 }
1691
1692 /* check for WM_F_WOL */
1693 switch (sc->sc_type) {
1694 case WM_T_82542_2_0:
1695 case WM_T_82542_2_1:
1696 case WM_T_82543:
1697 /* dummy? */
1698 eeprom_data = 0;
1699 apme_mask = EEPROM_CFG3_APME;
1700 break;
1701 case WM_T_82544:
1702 apme_mask = EEPROM_CFG2_82544_APM_EN;
1703 eeprom_data = cfg2;
1704 break;
1705 case WM_T_82546:
1706 case WM_T_82546_3:
1707 case WM_T_82571:
1708 case WM_T_82572:
1709 case WM_T_82573:
1710 case WM_T_82574:
1711 case WM_T_82583:
1712 case WM_T_80003:
1713 default:
1714 apme_mask = EEPROM_CFG3_APME;
1715 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1716 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1717 break;
1718 case WM_T_82575:
1719 case WM_T_82576:
1720 case WM_T_82580:
1721 case WM_T_82580ER:
1722 case WM_T_I350:
1723 case WM_T_ICH8:
1724 case WM_T_ICH9:
1725 case WM_T_ICH10:
1726 case WM_T_PCH:
1727 case WM_T_PCH2:
1728 /* XXX The funcid should be checked on some devices */
1729 apme_mask = WUC_APME;
1730 eeprom_data = CSR_READ(sc, WMREG_WUC);
1731 break;
1732 }
1733
1734 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1735 if ((eeprom_data & apme_mask) != 0)
1736 sc->sc_flags |= WM_F_WOL;
1737 #ifdef WM_DEBUG
1738 if ((sc->sc_flags & WM_F_WOL) != 0)
1739 printf("WOL\n");
1740 #endif
1741
1742 /*
1743 * XXX need special handling for some multiple port cards
1744 * to disable a paticular port.
1745 */
1746
1747 if (sc->sc_type >= WM_T_82544) {
1748 pn = prop_dictionary_get(dict, "i82543-swdpin");
1749 if (pn != NULL) {
1750 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1751 swdpin = (uint16_t) prop_number_integer_value(pn);
1752 } else {
1753 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1754 aprint_error_dev(sc->sc_dev,
1755 "unable to read SWDPIN\n");
1756 return;
1757 }
1758 }
1759 }
1760
1761 if (cfg1 & EEPROM_CFG1_ILOS)
1762 sc->sc_ctrl |= CTRL_ILOS;
1763 if (sc->sc_type >= WM_T_82544) {
1764 sc->sc_ctrl |=
1765 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1766 CTRL_SWDPIO_SHIFT;
1767 sc->sc_ctrl |=
1768 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1769 CTRL_SWDPINS_SHIFT;
1770 } else {
1771 sc->sc_ctrl |=
1772 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1773 CTRL_SWDPIO_SHIFT;
1774 }
1775
1776 #if 0
1777 if (sc->sc_type >= WM_T_82544) {
1778 if (cfg1 & EEPROM_CFG1_IPS0)
1779 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1780 if (cfg1 & EEPROM_CFG1_IPS1)
1781 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1782 sc->sc_ctrl_ext |=
1783 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1784 CTRL_EXT_SWDPIO_SHIFT;
1785 sc->sc_ctrl_ext |=
1786 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1787 CTRL_EXT_SWDPINS_SHIFT;
1788 } else {
1789 sc->sc_ctrl_ext |=
1790 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1791 CTRL_EXT_SWDPIO_SHIFT;
1792 }
1793 #endif
1794
1795 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1796 #if 0
1797 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1798 #endif
1799
1800 /*
1801 * Set up some register offsets that are different between
1802 * the i82542 and the i82543 and later chips.
1803 */
1804 if (sc->sc_type < WM_T_82543) {
1805 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1806 sc->sc_tdt_reg = WMREG_OLD_TDT;
1807 } else {
1808 sc->sc_rdt_reg = WMREG_RDT;
1809 sc->sc_tdt_reg = WMREG_TDT;
1810 }
1811
1812 if (sc->sc_type == WM_T_PCH) {
1813 uint16_t val;
1814
1815 /* Save the NVM K1 bit setting */
1816 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1817
1818 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1819 sc->sc_nvm_k1_enabled = 1;
1820 else
1821 sc->sc_nvm_k1_enabled = 0;
1822 }
1823
1824 /*
1825 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1826 * media structures accordingly.
1827 */
1828 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1829 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1830 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1831 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1832 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1833 wm_gmii_mediainit(sc, wmp->wmp_product);
1834 } else if (sc->sc_type < WM_T_82543 ||
1835 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1836 if (wmp->wmp_flags & WMP_F_1000T)
1837 aprint_error_dev(sc->sc_dev,
1838 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1839 wm_tbi_mediainit(sc);
1840 } else {
1841 switch (sc->sc_type) {
1842 case WM_T_82575:
1843 case WM_T_82576:
1844 case WM_T_82580:
1845 case WM_T_82580ER:
1846 case WM_T_I350:
1847 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1848 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1849 case CTRL_EXT_LINK_MODE_SGMII:
1850 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1851 sc->sc_flags |= WM_F_SGMII;
1852 CSR_WRITE(sc, WMREG_CTRL_EXT,
1853 reg | CTRL_EXT_I2C_ENA);
1854 wm_gmii_mediainit(sc, wmp->wmp_product);
1855 break;
1856 case CTRL_EXT_LINK_MODE_1000KX:
1857 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1858 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1859 CSR_WRITE(sc, WMREG_CTRL_EXT,
1860 reg | CTRL_EXT_I2C_ENA);
1861 panic("not supported yet\n");
1862 break;
1863 case CTRL_EXT_LINK_MODE_GMII:
1864 default:
1865 CSR_WRITE(sc, WMREG_CTRL_EXT,
1866 reg & ~CTRL_EXT_I2C_ENA);
1867 wm_gmii_mediainit(sc, wmp->wmp_product);
1868 break;
1869 }
1870 break;
1871 default:
1872 if (wmp->wmp_flags & WMP_F_1000X)
1873 aprint_error_dev(sc->sc_dev,
1874 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1875 wm_gmii_mediainit(sc, wmp->wmp_product);
1876 }
1877 }
1878
1879 ifp = &sc->sc_ethercom.ec_if;
1880 xname = device_xname(sc->sc_dev);
1881 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1882 ifp->if_softc = sc;
1883 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1884 ifp->if_ioctl = wm_ioctl;
1885 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1886 ifp->if_start = wm_nq_start;
1887 else
1888 ifp->if_start = wm_start;
1889 ifp->if_watchdog = wm_watchdog;
1890 ifp->if_init = wm_init;
1891 ifp->if_stop = wm_stop;
1892 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1893 IFQ_SET_READY(&ifp->if_snd);
1894
1895 /* Check for jumbo frame */
1896 switch (sc->sc_type) {
1897 case WM_T_82573:
1898 /* XXX limited to 9234 if ASPM is disabled */
1899 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1900 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1901 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1902 break;
1903 case WM_T_82571:
1904 case WM_T_82572:
1905 case WM_T_82574:
1906 case WM_T_82575:
1907 case WM_T_82576:
1908 case WM_T_82580:
1909 case WM_T_82580ER:
1910 case WM_T_I350:
1911 case WM_T_80003:
1912 case WM_T_ICH9:
1913 case WM_T_ICH10:
1914 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1915 /* XXX limited to 9234 */
1916 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1917 break;
1918 case WM_T_PCH:
1919 /* XXX limited to 4096 */
1920 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1921 break;
1922 case WM_T_82542_2_0:
1923 case WM_T_82542_2_1:
1924 case WM_T_82583:
1925 case WM_T_ICH8:
1926 /* No support for jumbo frame */
1927 break;
1928 default:
1929 /* ETHER_MAX_LEN_JUMBO */
1930 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1931 break;
1932 }
1933
1934 /*
1935 * If we're a i82543 or greater, we can support VLANs.
1936 */
1937 if (sc->sc_type >= WM_T_82543)
1938 sc->sc_ethercom.ec_capabilities |=
1939 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1940
1941 /*
1942 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1943 * on i82543 and later.
1944 */
1945 if (sc->sc_type >= WM_T_82543) {
1946 ifp->if_capabilities |=
1947 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1948 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1949 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1950 IFCAP_CSUM_TCPv6_Tx |
1951 IFCAP_CSUM_UDPv6_Tx;
1952 }
1953
1954 /*
1955 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1956 *
1957 * 82541GI (8086:1076) ... no
1958 * 82572EI (8086:10b9) ... yes
1959 */
1960 if (sc->sc_type >= WM_T_82571) {
1961 ifp->if_capabilities |=
1962 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1963 }
1964
1965 /*
1966 * If we're a i82544 or greater (except i82547), we can do
1967 * TCP segmentation offload.
1968 */
1969 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1970 ifp->if_capabilities |= IFCAP_TSOv4;
1971 }
1972
1973 if (sc->sc_type >= WM_T_82571) {
1974 ifp->if_capabilities |= IFCAP_TSOv6;
1975 }
1976
1977 /*
1978 * Attach the interface.
1979 */
1980 if_attach(ifp);
1981 ether_ifattach(ifp, enaddr);
1982 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1983 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1984
1985 #ifdef WM_EVENT_COUNTERS
1986 /* Attach event counters. */
1987 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1988 NULL, xname, "txsstall");
1989 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1990 NULL, xname, "txdstall");
1991 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1992 NULL, xname, "txfifo_stall");
1993 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1994 NULL, xname, "txdw");
1995 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1996 NULL, xname, "txqe");
1997 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1998 NULL, xname, "rxintr");
1999 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2000 NULL, xname, "linkintr");
2001
2002 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2003 NULL, xname, "rxipsum");
2004 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2005 NULL, xname, "rxtusum");
2006 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2007 NULL, xname, "txipsum");
2008 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2009 NULL, xname, "txtusum");
2010 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2011 NULL, xname, "txtusum6");
2012
2013 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2014 NULL, xname, "txtso");
2015 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2016 NULL, xname, "txtso6");
2017 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2018 NULL, xname, "txtsopain");
2019
2020 for (i = 0; i < WM_NTXSEGS; i++) {
2021 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2022 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2023 NULL, xname, wm_txseg_evcnt_names[i]);
2024 }
2025
2026 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2027 NULL, xname, "txdrop");
2028
2029 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2030 NULL, xname, "tu");
2031
2032 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2033 NULL, xname, "tx_xoff");
2034 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2035 NULL, xname, "tx_xon");
2036 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2037 NULL, xname, "rx_xoff");
2038 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2039 NULL, xname, "rx_xon");
2040 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2041 NULL, xname, "rx_macctl");
2042 #endif /* WM_EVENT_COUNTERS */
2043
2044 if (pmf_device_register(self, wm_suspend, wm_resume))
2045 pmf_class_network_register(self, ifp);
2046 else
2047 aprint_error_dev(self, "couldn't establish power handler\n");
2048
2049 return;
2050
2051 /*
2052 * Free any resources we've allocated during the failed attach
2053 * attempt. Do this in reverse order and fall through.
2054 */
2055 fail_5:
2056 for (i = 0; i < WM_NRXDESC; i++) {
2057 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2058 bus_dmamap_destroy(sc->sc_dmat,
2059 sc->sc_rxsoft[i].rxs_dmamap);
2060 }
2061 fail_4:
2062 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2063 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2064 bus_dmamap_destroy(sc->sc_dmat,
2065 sc->sc_txsoft[i].txs_dmamap);
2066 }
2067 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2068 fail_3:
2069 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2070 fail_2:
2071 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2072 sc->sc_cd_size);
2073 fail_1:
2074 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2075 fail_0:
2076 return;
2077 }
2078
2079 static int
2080 wm_detach(device_t self, int flags __unused)
2081 {
2082 struct wm_softc *sc = device_private(self);
2083 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2084 int i, s;
2085
2086 s = splnet();
2087 /* Stop the interface. Callouts are stopped in it. */
2088 wm_stop(ifp, 1);
2089 splx(s);
2090
2091 pmf_device_deregister(self);
2092
2093 /* Tell the firmware about the release */
2094 wm_release_manageability(sc);
2095 wm_release_hw_control(sc);
2096
2097 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2098
2099 /* Delete all remaining media. */
2100 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2101
2102 ether_ifdetach(ifp);
2103 if_detach(ifp);
2104
2105
2106 /* Unload RX dmamaps and free mbufs */
2107 wm_rxdrain(sc);
2108
2109 /* Free dmamap. It's the same as the end of the wm_attach() function */
2110 for (i = 0; i < WM_NRXDESC; i++) {
2111 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2112 bus_dmamap_destroy(sc->sc_dmat,
2113 sc->sc_rxsoft[i].rxs_dmamap);
2114 }
2115 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2116 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2117 bus_dmamap_destroy(sc->sc_dmat,
2118 sc->sc_txsoft[i].txs_dmamap);
2119 }
2120 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2121 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2122 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2123 sc->sc_cd_size);
2124 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2125
2126 /* Disestablish the interrupt handler */
2127 if (sc->sc_ih != NULL) {
2128 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2129 sc->sc_ih = NULL;
2130 }
2131
2132 /* Unmap the registers */
2133 if (sc->sc_ss) {
2134 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2135 sc->sc_ss = 0;
2136 }
2137
2138 if (sc->sc_ios) {
2139 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2140 sc->sc_ios = 0;
2141 }
2142
2143 return 0;
2144 }
2145
2146 /*
2147 * wm_tx_offload:
2148 *
2149 * Set up TCP/IP checksumming parameters for the
2150 * specified packet.
2151 */
2152 static int
2153 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2154 uint8_t *fieldsp)
2155 {
2156 struct mbuf *m0 = txs->txs_mbuf;
2157 struct livengood_tcpip_ctxdesc *t;
2158 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2159 uint32_t ipcse;
2160 struct ether_header *eh;
2161 int offset, iphl;
2162 uint8_t fields;
2163
2164 /*
2165 * XXX It would be nice if the mbuf pkthdr had offset
2166 * fields for the protocol headers.
2167 */
2168
2169 eh = mtod(m0, struct ether_header *);
2170 switch (htons(eh->ether_type)) {
2171 case ETHERTYPE_IP:
2172 case ETHERTYPE_IPV6:
2173 offset = ETHER_HDR_LEN;
2174 break;
2175
2176 case ETHERTYPE_VLAN:
2177 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2178 break;
2179
2180 default:
2181 /*
2182 * Don't support this protocol or encapsulation.
2183 */
2184 *fieldsp = 0;
2185 *cmdp = 0;
2186 return 0;
2187 }
2188
2189 if ((m0->m_pkthdr.csum_flags &
2190 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2191 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2192 } else {
2193 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2194 }
2195 ipcse = offset + iphl - 1;
2196
2197 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2198 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2199 seg = 0;
2200 fields = 0;
2201
2202 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2203 int hlen = offset + iphl;
2204 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2205
2206 if (__predict_false(m0->m_len <
2207 (hlen + sizeof(struct tcphdr)))) {
2208 /*
2209 * TCP/IP headers are not in the first mbuf; we need
2210 * to do this the slow and painful way. Let's just
2211 * hope this doesn't happen very often.
2212 */
2213 struct tcphdr th;
2214
2215 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2216
2217 m_copydata(m0, hlen, sizeof(th), &th);
2218 if (v4) {
2219 struct ip ip;
2220
2221 m_copydata(m0, offset, sizeof(ip), &ip);
2222 ip.ip_len = 0;
2223 m_copyback(m0,
2224 offset + offsetof(struct ip, ip_len),
2225 sizeof(ip.ip_len), &ip.ip_len);
2226 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2227 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2228 } else {
2229 struct ip6_hdr ip6;
2230
2231 m_copydata(m0, offset, sizeof(ip6), &ip6);
2232 ip6.ip6_plen = 0;
2233 m_copyback(m0,
2234 offset + offsetof(struct ip6_hdr, ip6_plen),
2235 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2236 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2237 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2238 }
2239 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2240 sizeof(th.th_sum), &th.th_sum);
2241
2242 hlen += th.th_off << 2;
2243 } else {
2244 /*
2245 * TCP/IP headers are in the first mbuf; we can do
2246 * this the easy way.
2247 */
2248 struct tcphdr *th;
2249
2250 if (v4) {
2251 struct ip *ip =
2252 (void *)(mtod(m0, char *) + offset);
2253 th = (void *)(mtod(m0, char *) + hlen);
2254
2255 ip->ip_len = 0;
2256 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2257 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2258 } else {
2259 struct ip6_hdr *ip6 =
2260 (void *)(mtod(m0, char *) + offset);
2261 th = (void *)(mtod(m0, char *) + hlen);
2262
2263 ip6->ip6_plen = 0;
2264 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2265 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2266 }
2267 hlen += th->th_off << 2;
2268 }
2269
2270 if (v4) {
2271 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2272 cmdlen |= WTX_TCPIP_CMD_IP;
2273 } else {
2274 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2275 ipcse = 0;
2276 }
2277 cmd |= WTX_TCPIP_CMD_TSE;
2278 cmdlen |= WTX_TCPIP_CMD_TSE |
2279 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2280 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2281 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2282 }
2283
2284 /*
2285 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2286 * offload feature, if we load the context descriptor, we
2287 * MUST provide valid values for IPCSS and TUCSS fields.
2288 */
2289
2290 ipcs = WTX_TCPIP_IPCSS(offset) |
2291 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2292 WTX_TCPIP_IPCSE(ipcse);
2293 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2294 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2295 fields |= WTX_IXSM;
2296 }
2297
2298 offset += iphl;
2299
2300 if (m0->m_pkthdr.csum_flags &
2301 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2302 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2303 fields |= WTX_TXSM;
2304 tucs = WTX_TCPIP_TUCSS(offset) |
2305 WTX_TCPIP_TUCSO(offset +
2306 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2307 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2308 } else if ((m0->m_pkthdr.csum_flags &
2309 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2310 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2311 fields |= WTX_TXSM;
2312 tucs = WTX_TCPIP_TUCSS(offset) |
2313 WTX_TCPIP_TUCSO(offset +
2314 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2315 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2316 } else {
2317 /* Just initialize it to a valid TCP context. */
2318 tucs = WTX_TCPIP_TUCSS(offset) |
2319 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2320 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2321 }
2322
2323 /* Fill in the context descriptor. */
2324 t = (struct livengood_tcpip_ctxdesc *)
2325 &sc->sc_txdescs[sc->sc_txnext];
2326 t->tcpip_ipcs = htole32(ipcs);
2327 t->tcpip_tucs = htole32(tucs);
2328 t->tcpip_cmdlen = htole32(cmdlen);
2329 t->tcpip_seg = htole32(seg);
2330 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2331
2332 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2333 txs->txs_ndesc++;
2334
2335 *cmdp = cmd;
2336 *fieldsp = fields;
2337
2338 return 0;
2339 }
2340
2341 static void
2342 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2343 {
2344 struct mbuf *m;
2345 int i;
2346
2347 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2348 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2349 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2350 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2351 m->m_data, m->m_len, m->m_flags);
2352 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2353 i, i == 1 ? "" : "s");
2354 }
2355
2356 /*
2357 * wm_82547_txfifo_stall:
2358 *
2359 * Callout used to wait for the 82547 Tx FIFO to drain,
2360 * reset the FIFO pointers, and restart packet transmission.
2361 */
2362 static void
2363 wm_82547_txfifo_stall(void *arg)
2364 {
2365 struct wm_softc *sc = arg;
2366 int s;
2367
2368 s = splnet();
2369
2370 if (sc->sc_txfifo_stall) {
2371 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2372 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2373 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2374 /*
2375 * Packets have drained. Stop transmitter, reset
2376 * FIFO pointers, restart transmitter, and kick
2377 * the packet queue.
2378 */
2379 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2380 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2381 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2382 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2383 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2384 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2385 CSR_WRITE(sc, WMREG_TCTL, tctl);
2386 CSR_WRITE_FLUSH(sc);
2387
2388 sc->sc_txfifo_head = 0;
2389 sc->sc_txfifo_stall = 0;
2390 wm_start(&sc->sc_ethercom.ec_if);
2391 } else {
2392 /*
2393 * Still waiting for packets to drain; try again in
2394 * another tick.
2395 */
2396 callout_schedule(&sc->sc_txfifo_ch, 1);
2397 }
2398 }
2399
2400 splx(s);
2401 }
2402
2403 static void
2404 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2405 {
2406 uint32_t reg;
2407
2408 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2409
2410 if (on != 0)
2411 reg |= EXTCNFCTR_GATE_PHY_CFG;
2412 else
2413 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2414
2415 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2416 }
2417
2418 /*
2419 * wm_82547_txfifo_bugchk:
2420 *
2421 * Check for bug condition in the 82547 Tx FIFO. We need to
2422 * prevent enqueueing a packet that would wrap around the end
2423 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2424 *
2425 * We do this by checking the amount of space before the end
2426 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2427 * the Tx FIFO, wait for all remaining packets to drain, reset
2428 * the internal FIFO pointers to the beginning, and restart
2429 * transmission on the interface.
2430 */
2431 #define WM_FIFO_HDR 0x10
2432 #define WM_82547_PAD_LEN 0x3e0
2433 static int
2434 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2435 {
2436 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2437 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2438
2439 /* Just return if already stalled. */
2440 if (sc->sc_txfifo_stall)
2441 return 1;
2442
2443 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2444 /* Stall only occurs in half-duplex mode. */
2445 goto send_packet;
2446 }
2447
2448 if (len >= WM_82547_PAD_LEN + space) {
2449 sc->sc_txfifo_stall = 1;
2450 callout_schedule(&sc->sc_txfifo_ch, 1);
2451 return 1;
2452 }
2453
2454 send_packet:
2455 sc->sc_txfifo_head += len;
2456 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2457 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2458
2459 return 0;
2460 }
2461
2462 /*
2463 * wm_start: [ifnet interface function]
2464 *
2465 * Start packet transmission on the interface.
2466 */
2467 static void
2468 wm_start(struct ifnet *ifp)
2469 {
2470 struct wm_softc *sc = ifp->if_softc;
2471 struct mbuf *m0;
2472 struct m_tag *mtag;
2473 struct wm_txsoft *txs;
2474 bus_dmamap_t dmamap;
2475 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2476 bus_addr_t curaddr;
2477 bus_size_t seglen, curlen;
2478 uint32_t cksumcmd;
2479 uint8_t cksumfields;
2480
2481 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2482 return;
2483
2484 /*
2485 * Remember the previous number of free descriptors.
2486 */
2487 ofree = sc->sc_txfree;
2488
2489 /*
2490 * Loop through the send queue, setting up transmit descriptors
2491 * until we drain the queue, or use up all available transmit
2492 * descriptors.
2493 */
2494 for (;;) {
2495 /* Grab a packet off the queue. */
2496 IFQ_POLL(&ifp->if_snd, m0);
2497 if (m0 == NULL)
2498 break;
2499
2500 DPRINTF(WM_DEBUG_TX,
2501 ("%s: TX: have packet to transmit: %p\n",
2502 device_xname(sc->sc_dev), m0));
2503
2504 /* Get a work queue entry. */
2505 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2506 wm_txintr(sc);
2507 if (sc->sc_txsfree == 0) {
2508 DPRINTF(WM_DEBUG_TX,
2509 ("%s: TX: no free job descriptors\n",
2510 device_xname(sc->sc_dev)));
2511 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2512 break;
2513 }
2514 }
2515
2516 txs = &sc->sc_txsoft[sc->sc_txsnext];
2517 dmamap = txs->txs_dmamap;
2518
2519 use_tso = (m0->m_pkthdr.csum_flags &
2520 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2521
2522 /*
2523 * So says the Linux driver:
2524 * The controller does a simple calculation to make sure
2525 * there is enough room in the FIFO before initiating the
2526 * DMA for each buffer. The calc is:
2527 * 4 = ceil(buffer len / MSS)
2528 * To make sure we don't overrun the FIFO, adjust the max
2529 * buffer len if the MSS drops.
2530 */
2531 dmamap->dm_maxsegsz =
2532 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2533 ? m0->m_pkthdr.segsz << 2
2534 : WTX_MAX_LEN;
2535
2536 /*
2537 * Load the DMA map. If this fails, the packet either
2538 * didn't fit in the allotted number of segments, or we
2539 * were short on resources. For the too-many-segments
2540 * case, we simply report an error and drop the packet,
2541 * since we can't sanely copy a jumbo packet to a single
2542 * buffer.
2543 */
2544 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2545 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2546 if (error) {
2547 if (error == EFBIG) {
2548 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2549 log(LOG_ERR, "%s: Tx packet consumes too many "
2550 "DMA segments, dropping...\n",
2551 device_xname(sc->sc_dev));
2552 IFQ_DEQUEUE(&ifp->if_snd, m0);
2553 wm_dump_mbuf_chain(sc, m0);
2554 m_freem(m0);
2555 continue;
2556 }
2557 /*
2558 * Short on resources, just stop for now.
2559 */
2560 DPRINTF(WM_DEBUG_TX,
2561 ("%s: TX: dmamap load failed: %d\n",
2562 device_xname(sc->sc_dev), error));
2563 break;
2564 }
2565
2566 segs_needed = dmamap->dm_nsegs;
2567 if (use_tso) {
2568 /* For sentinel descriptor; see below. */
2569 segs_needed++;
2570 }
2571
2572 /*
2573 * Ensure we have enough descriptors free to describe
2574 * the packet. Note, we always reserve one descriptor
2575 * at the end of the ring due to the semantics of the
2576 * TDT register, plus one more in the event we need
2577 * to load offload context.
2578 */
2579 if (segs_needed > sc->sc_txfree - 2) {
2580 /*
2581 * Not enough free descriptors to transmit this
2582 * packet. We haven't committed anything yet,
2583 * so just unload the DMA map, put the packet
2584 * pack on the queue, and punt. Notify the upper
2585 * layer that there are no more slots left.
2586 */
2587 DPRINTF(WM_DEBUG_TX,
2588 ("%s: TX: need %d (%d) descriptors, have %d\n",
2589 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2590 segs_needed, sc->sc_txfree - 1));
2591 ifp->if_flags |= IFF_OACTIVE;
2592 bus_dmamap_unload(sc->sc_dmat, dmamap);
2593 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2594 break;
2595 }
2596
2597 /*
2598 * Check for 82547 Tx FIFO bug. We need to do this
2599 * once we know we can transmit the packet, since we
2600 * do some internal FIFO space accounting here.
2601 */
2602 if (sc->sc_type == WM_T_82547 &&
2603 wm_82547_txfifo_bugchk(sc, m0)) {
2604 DPRINTF(WM_DEBUG_TX,
2605 ("%s: TX: 82547 Tx FIFO bug detected\n",
2606 device_xname(sc->sc_dev)));
2607 ifp->if_flags |= IFF_OACTIVE;
2608 bus_dmamap_unload(sc->sc_dmat, dmamap);
2609 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2610 break;
2611 }
2612
2613 IFQ_DEQUEUE(&ifp->if_snd, m0);
2614
2615 /*
2616 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2617 */
2618
2619 DPRINTF(WM_DEBUG_TX,
2620 ("%s: TX: packet has %d (%d) DMA segments\n",
2621 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2622
2623 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2624
2625 /*
2626 * Store a pointer to the packet so that we can free it
2627 * later.
2628 *
2629 * Initially, we consider the number of descriptors the
2630 * packet uses the number of DMA segments. This may be
2631 * incremented by 1 if we do checksum offload (a descriptor
2632 * is used to set the checksum context).
2633 */
2634 txs->txs_mbuf = m0;
2635 txs->txs_firstdesc = sc->sc_txnext;
2636 txs->txs_ndesc = segs_needed;
2637
2638 /* Set up offload parameters for this packet. */
2639 if (m0->m_pkthdr.csum_flags &
2640 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2641 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2642 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2643 if (wm_tx_offload(sc, txs, &cksumcmd,
2644 &cksumfields) != 0) {
2645 /* Error message already displayed. */
2646 bus_dmamap_unload(sc->sc_dmat, dmamap);
2647 continue;
2648 }
2649 } else {
2650 cksumcmd = 0;
2651 cksumfields = 0;
2652 }
2653
2654 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2655
2656 /* Sync the DMA map. */
2657 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2658 BUS_DMASYNC_PREWRITE);
2659
2660 /*
2661 * Initialize the transmit descriptor.
2662 */
2663 for (nexttx = sc->sc_txnext, seg = 0;
2664 seg < dmamap->dm_nsegs; seg++) {
2665 for (seglen = dmamap->dm_segs[seg].ds_len,
2666 curaddr = dmamap->dm_segs[seg].ds_addr;
2667 seglen != 0;
2668 curaddr += curlen, seglen -= curlen,
2669 nexttx = WM_NEXTTX(sc, nexttx)) {
2670 curlen = seglen;
2671
2672 /*
2673 * So says the Linux driver:
2674 * Work around for premature descriptor
2675 * write-backs in TSO mode. Append a
2676 * 4-byte sentinel descriptor.
2677 */
2678 if (use_tso &&
2679 seg == dmamap->dm_nsegs - 1 &&
2680 curlen > 8)
2681 curlen -= 4;
2682
2683 wm_set_dma_addr(
2684 &sc->sc_txdescs[nexttx].wtx_addr,
2685 curaddr);
2686 sc->sc_txdescs[nexttx].wtx_cmdlen =
2687 htole32(cksumcmd | curlen);
2688 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2689 0;
2690 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2691 cksumfields;
2692 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2693 lasttx = nexttx;
2694
2695 DPRINTF(WM_DEBUG_TX,
2696 ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2697 "len %#04zx\n",
2698 device_xname(sc->sc_dev), nexttx,
2699 curaddr & 0xffffffffUL, curlen));
2700 }
2701 }
2702
2703 KASSERT(lasttx != -1);
2704
2705 /*
2706 * Set up the command byte on the last descriptor of
2707 * the packet. If we're in the interrupt delay window,
2708 * delay the interrupt.
2709 */
2710 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2711 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2712
2713 /*
2714 * If VLANs are enabled and the packet has a VLAN tag, set
2715 * up the descriptor to encapsulate the packet for us.
2716 *
2717 * This is only valid on the last descriptor of the packet.
2718 */
2719 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2720 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2721 htole32(WTX_CMD_VLE);
2722 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2723 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2724 }
2725
2726 txs->txs_lastdesc = lasttx;
2727
2728 DPRINTF(WM_DEBUG_TX,
2729 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2730 device_xname(sc->sc_dev),
2731 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2732
2733 /* Sync the descriptors we're using. */
2734 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2735 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2736
2737 /* Give the packet to the chip. */
2738 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2739
2740 DPRINTF(WM_DEBUG_TX,
2741 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2742
2743 DPRINTF(WM_DEBUG_TX,
2744 ("%s: TX: finished transmitting packet, job %d\n",
2745 device_xname(sc->sc_dev), sc->sc_txsnext));
2746
2747 /* Advance the tx pointer. */
2748 sc->sc_txfree -= txs->txs_ndesc;
2749 sc->sc_txnext = nexttx;
2750
2751 sc->sc_txsfree--;
2752 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2753
2754 /* Pass the packet to any BPF listeners. */
2755 bpf_mtap(ifp, m0);
2756 }
2757
2758 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2759 /* No more slots; notify upper layer. */
2760 ifp->if_flags |= IFF_OACTIVE;
2761 }
2762
2763 if (sc->sc_txfree != ofree) {
2764 /* Set a watchdog timer in case the chip flakes out. */
2765 ifp->if_timer = 5;
2766 }
2767 }
2768
2769 /*
2770 * wm_nq_tx_offload:
2771 *
2772 * Set up TCP/IP checksumming parameters for the
2773 * specified packet, for NEWQUEUE devices
2774 */
2775 static int
2776 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2777 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2778 {
2779 struct mbuf *m0 = txs->txs_mbuf;
2780 struct m_tag *mtag;
2781 uint32_t vl_len, mssidx, cmdc;
2782 struct ether_header *eh;
2783 int offset, iphl;
2784
2785 /*
2786 * XXX It would be nice if the mbuf pkthdr had offset
2787 * fields for the protocol headers.
2788 */
2789 *cmdlenp = 0;
2790 *fieldsp = 0;
2791
2792 eh = mtod(m0, struct ether_header *);
2793 switch (htons(eh->ether_type)) {
2794 case ETHERTYPE_IP:
2795 case ETHERTYPE_IPV6:
2796 offset = ETHER_HDR_LEN;
2797 break;
2798
2799 case ETHERTYPE_VLAN:
2800 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2801 break;
2802
2803 default:
2804 /*
2805 * Don't support this protocol or encapsulation.
2806 */
2807 *do_csum = false;
2808 return 0;
2809 }
2810 *do_csum = true;
2811 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2812 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2813
2814 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2815 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2816
2817 if ((m0->m_pkthdr.csum_flags &
2818 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2819 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2820 } else {
2821 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2822 }
2823 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2824 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2825
2826 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2827 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2828 << NQTXC_VLLEN_VLAN_SHIFT);
2829 *cmdlenp |= NQTX_CMD_VLE;
2830 }
2831
2832 mssidx = 0;
2833
2834 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2835 int hlen = offset + iphl;
2836 int tcp_hlen;
2837 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2838
2839 if (__predict_false(m0->m_len <
2840 (hlen + sizeof(struct tcphdr)))) {
2841 /*
2842 * TCP/IP headers are not in the first mbuf; we need
2843 * to do this the slow and painful way. Let's just
2844 * hope this doesn't happen very often.
2845 */
2846 struct tcphdr th;
2847
2848 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2849
2850 m_copydata(m0, hlen, sizeof(th), &th);
2851 if (v4) {
2852 struct ip ip;
2853
2854 m_copydata(m0, offset, sizeof(ip), &ip);
2855 ip.ip_len = 0;
2856 m_copyback(m0,
2857 offset + offsetof(struct ip, ip_len),
2858 sizeof(ip.ip_len), &ip.ip_len);
2859 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2860 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2861 } else {
2862 struct ip6_hdr ip6;
2863
2864 m_copydata(m0, offset, sizeof(ip6), &ip6);
2865 ip6.ip6_plen = 0;
2866 m_copyback(m0,
2867 offset + offsetof(struct ip6_hdr, ip6_plen),
2868 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2869 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2870 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2871 }
2872 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2873 sizeof(th.th_sum), &th.th_sum);
2874
2875 tcp_hlen = th.th_off << 2;
2876 } else {
2877 /*
2878 * TCP/IP headers are in the first mbuf; we can do
2879 * this the easy way.
2880 */
2881 struct tcphdr *th;
2882
2883 if (v4) {
2884 struct ip *ip =
2885 (void *)(mtod(m0, char *) + offset);
2886 th = (void *)(mtod(m0, char *) + hlen);
2887
2888 ip->ip_len = 0;
2889 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2890 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2891 } else {
2892 struct ip6_hdr *ip6 =
2893 (void *)(mtod(m0, char *) + offset);
2894 th = (void *)(mtod(m0, char *) + hlen);
2895
2896 ip6->ip6_plen = 0;
2897 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2898 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2899 }
2900 tcp_hlen = th->th_off << 2;
2901 }
2902 hlen += tcp_hlen;
2903 *cmdlenp |= NQTX_CMD_TSE;
2904
2905 if (v4) {
2906 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2907 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2908 } else {
2909 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2910 *fieldsp |= NQTXD_FIELDS_TUXSM;
2911 }
2912 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2913 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2914 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2915 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2916 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2917 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2918 } else {
2919 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2920 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2921 }
2922
2923 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2924 *fieldsp |= NQTXD_FIELDS_IXSM;
2925 cmdc |= NQTXC_CMD_IP4;
2926 }
2927
2928 if (m0->m_pkthdr.csum_flags &
2929 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2930 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2931 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2932 cmdc |= NQTXC_CMD_TCP;
2933 } else {
2934 cmdc |= NQTXC_CMD_UDP;
2935 }
2936 cmdc |= NQTXC_CMD_IP4;
2937 *fieldsp |= NQTXD_FIELDS_TUXSM;
2938 }
2939 if (m0->m_pkthdr.csum_flags &
2940 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2941 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2942 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2943 cmdc |= NQTXC_CMD_TCP;
2944 } else {
2945 cmdc |= NQTXC_CMD_UDP;
2946 }
2947 cmdc |= NQTXC_CMD_IP6;
2948 *fieldsp |= NQTXD_FIELDS_TUXSM;
2949 }
2950
2951 /* Fill in the context descriptor. */
2952 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
2953 htole32(vl_len);
2954 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
2955 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
2956 htole32(cmdc);
2957 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
2958 htole32(mssidx);
2959 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2960 DPRINTF(WM_DEBUG_TX,
2961 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
2962 sc->sc_txnext, 0, vl_len));
2963 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
2964 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2965 txs->txs_ndesc++;
2966 return 0;
2967 }
2968
2969 /*
2970 * wm_nq_start: [ifnet interface function]
2971 *
2972 * Start packet transmission on the interface for NEWQUEUE devices
2973 */
2974 static void
2975 wm_nq_start(struct ifnet *ifp)
2976 {
2977 struct wm_softc *sc = ifp->if_softc;
2978 struct mbuf *m0;
2979 struct m_tag *mtag;
2980 struct wm_txsoft *txs;
2981 bus_dmamap_t dmamap;
2982 int error, nexttx, lasttx = -1, seg, segs_needed;
2983 bool do_csum, sent;
2984
2985 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2986 return;
2987
2988 sent = false;
2989
2990 /*
2991 * Loop through the send queue, setting up transmit descriptors
2992 * until we drain the queue, or use up all available transmit
2993 * descriptors.
2994 */
2995 for (;;) {
2996 /* Grab a packet off the queue. */
2997 IFQ_POLL(&ifp->if_snd, m0);
2998 if (m0 == NULL)
2999 break;
3000
3001 DPRINTF(WM_DEBUG_TX,
3002 ("%s: TX: have packet to transmit: %p\n",
3003 device_xname(sc->sc_dev), m0));
3004
3005 /* Get a work queue entry. */
3006 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3007 wm_txintr(sc);
3008 if (sc->sc_txsfree == 0) {
3009 DPRINTF(WM_DEBUG_TX,
3010 ("%s: TX: no free job descriptors\n",
3011 device_xname(sc->sc_dev)));
3012 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3013 break;
3014 }
3015 }
3016
3017 txs = &sc->sc_txsoft[sc->sc_txsnext];
3018 dmamap = txs->txs_dmamap;
3019
3020 /*
3021 * Load the DMA map. If this fails, the packet either
3022 * didn't fit in the allotted number of segments, or we
3023 * were short on resources. For the too-many-segments
3024 * case, we simply report an error and drop the packet,
3025 * since we can't sanely copy a jumbo packet to a single
3026 * buffer.
3027 */
3028 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3029 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3030 if (error) {
3031 if (error == EFBIG) {
3032 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3033 log(LOG_ERR, "%s: Tx packet consumes too many "
3034 "DMA segments, dropping...\n",
3035 device_xname(sc->sc_dev));
3036 IFQ_DEQUEUE(&ifp->if_snd, m0);
3037 wm_dump_mbuf_chain(sc, m0);
3038 m_freem(m0);
3039 continue;
3040 }
3041 /*
3042 * Short on resources, just stop for now.
3043 */
3044 DPRINTF(WM_DEBUG_TX,
3045 ("%s: TX: dmamap load failed: %d\n",
3046 device_xname(sc->sc_dev), error));
3047 break;
3048 }
3049
3050 segs_needed = dmamap->dm_nsegs;
3051
3052 /*
3053 * Ensure we have enough descriptors free to describe
3054 * the packet. Note, we always reserve one descriptor
3055 * at the end of the ring due to the semantics of the
3056 * TDT register, plus one more in the event we need
3057 * to load offload context.
3058 */
3059 if (segs_needed > sc->sc_txfree - 2) {
3060 /*
3061 * Not enough free descriptors to transmit this
3062 * packet. We haven't committed anything yet,
3063 * so just unload the DMA map, put the packet
3064 * pack on the queue, and punt. Notify the upper
3065 * layer that there are no more slots left.
3066 */
3067 DPRINTF(WM_DEBUG_TX,
3068 ("%s: TX: need %d (%d) descriptors, have %d\n",
3069 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3070 segs_needed, sc->sc_txfree - 1));
3071 ifp->if_flags |= IFF_OACTIVE;
3072 bus_dmamap_unload(sc->sc_dmat, dmamap);
3073 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3074 break;
3075 }
3076
3077 IFQ_DEQUEUE(&ifp->if_snd, m0);
3078
3079 /*
3080 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3081 */
3082
3083 DPRINTF(WM_DEBUG_TX,
3084 ("%s: TX: packet has %d (%d) DMA segments\n",
3085 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3086
3087 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3088
3089 /*
3090 * Store a pointer to the packet so that we can free it
3091 * later.
3092 *
3093 * Initially, we consider the number of descriptors the
3094 * packet uses the number of DMA segments. This may be
3095 * incremented by 1 if we do checksum offload (a descriptor
3096 * is used to set the checksum context).
3097 */
3098 txs->txs_mbuf = m0;
3099 txs->txs_firstdesc = sc->sc_txnext;
3100 txs->txs_ndesc = segs_needed;
3101
3102 /* Set up offload parameters for this packet. */
3103 uint32_t cmdlen, fields, dcmdlen;
3104 if (m0->m_pkthdr.csum_flags &
3105 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3106 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3107 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3108 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3109 &do_csum) != 0) {
3110 /* Error message already displayed. */
3111 bus_dmamap_unload(sc->sc_dmat, dmamap);
3112 continue;
3113 }
3114 } else {
3115 do_csum = false;
3116 cmdlen = 0;
3117 fields = 0;
3118 }
3119
3120 /* Sync the DMA map. */
3121 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3122 BUS_DMASYNC_PREWRITE);
3123
3124 /*
3125 * Initialize the first transmit descriptor.
3126 */
3127 nexttx = sc->sc_txnext;
3128 if (!do_csum) {
3129 /* setup a legacy descriptor */
3130 wm_set_dma_addr(
3131 &sc->sc_txdescs[nexttx].wtx_addr,
3132 dmamap->dm_segs[0].ds_addr);
3133 sc->sc_txdescs[nexttx].wtx_cmdlen =
3134 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3135 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3136 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3137 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3138 NULL) {
3139 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3140 htole32(WTX_CMD_VLE);
3141 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3142 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3143 } else {
3144 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3145 }
3146 dcmdlen = 0;
3147 } else {
3148 /* setup an advanced data descriptor */
3149 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3150 htole64(dmamap->dm_segs[0].ds_addr);
3151 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3152 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3153 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3154 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3155 htole32(fields);
3156 DPRINTF(WM_DEBUG_TX,
3157 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3158 device_xname(sc->sc_dev), nexttx,
3159 dmamap->dm_segs[0].ds_addr));
3160 DPRINTF(WM_DEBUG_TX,
3161 ("\t 0x%08x%08x\n", fields,
3162 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3163 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3164 }
3165
3166 lasttx = nexttx;
3167 nexttx = WM_NEXTTX(sc, nexttx);
3168 /*
3169 * fill in the next descriptors. legacy or adcanced format
3170 * is the same here
3171 */
3172 for (seg = 1; seg < dmamap->dm_nsegs;
3173 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3174 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3175 htole64(dmamap->dm_segs[seg].ds_addr);
3176 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3177 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3178 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3179 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3180 lasttx = nexttx;
3181
3182 DPRINTF(WM_DEBUG_TX,
3183 ("%s: TX: desc %d: %#" PRIxPADDR ", "
3184 "len %#04zx\n",
3185 device_xname(sc->sc_dev), nexttx,
3186 dmamap->dm_segs[seg].ds_addr,
3187 dmamap->dm_segs[seg].ds_len));
3188 }
3189
3190 KASSERT(lasttx != -1);
3191
3192 /*
3193 * Set up the command byte on the last descriptor of
3194 * the packet. If we're in the interrupt delay window,
3195 * delay the interrupt.
3196 */
3197 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3198 (NQTX_CMD_EOP | NQTX_CMD_RS));
3199 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3200 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3201
3202 txs->txs_lastdesc = lasttx;
3203
3204 DPRINTF(WM_DEBUG_TX,
3205 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3206 device_xname(sc->sc_dev),
3207 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3208
3209 /* Sync the descriptors we're using. */
3210 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3211 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3212
3213 /* Give the packet to the chip. */
3214 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3215 sent = true;
3216
3217 DPRINTF(WM_DEBUG_TX,
3218 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3219
3220 DPRINTF(WM_DEBUG_TX,
3221 ("%s: TX: finished transmitting packet, job %d\n",
3222 device_xname(sc->sc_dev), sc->sc_txsnext));
3223
3224 /* Advance the tx pointer. */
3225 sc->sc_txfree -= txs->txs_ndesc;
3226 sc->sc_txnext = nexttx;
3227
3228 sc->sc_txsfree--;
3229 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3230
3231 /* Pass the packet to any BPF listeners. */
3232 bpf_mtap(ifp, m0);
3233 }
3234
3235 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3236 /* No more slots; notify upper layer. */
3237 ifp->if_flags |= IFF_OACTIVE;
3238 }
3239
3240 if (sent) {
3241 /* Set a watchdog timer in case the chip flakes out. */
3242 ifp->if_timer = 5;
3243 }
3244 }
3245
3246 /*
3247 * wm_watchdog: [ifnet interface function]
3248 *
3249 * Watchdog timer handler.
3250 */
3251 static void
3252 wm_watchdog(struct ifnet *ifp)
3253 {
3254 struct wm_softc *sc = ifp->if_softc;
3255
3256 /*
3257 * Since we're using delayed interrupts, sweep up
3258 * before we report an error.
3259 */
3260 wm_txintr(sc);
3261
3262 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3263 #ifdef WM_DEBUG
3264 int i, j;
3265 struct wm_txsoft *txs;
3266 #endif
3267 log(LOG_ERR,
3268 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3269 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3270 sc->sc_txnext);
3271 ifp->if_oerrors++;
3272 #ifdef WM_DEBUG
3273 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3274 i = WM_NEXTTXS(sc, i)) {
3275 txs = &sc->sc_txsoft[i];
3276 printf("txs %d tx %d -> %d\n",
3277 i, txs->txs_firstdesc, txs->txs_lastdesc);
3278 for (j = txs->txs_firstdesc; ;
3279 j = WM_NEXTTX(sc, j)) {
3280 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3281 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3282 printf("\t %#08x%08x\n",
3283 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3284 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3285 if (j == txs->txs_lastdesc)
3286 break;
3287 }
3288 }
3289 #endif
3290 /* Reset the interface. */
3291 (void) wm_init(ifp);
3292 }
3293
3294 /* Try to get more packets going. */
3295 ifp->if_start(ifp);
3296 }
3297
3298 static int
3299 wm_ifflags_cb(struct ethercom *ec)
3300 {
3301 struct ifnet *ifp = &ec->ec_if;
3302 struct wm_softc *sc = ifp->if_softc;
3303 int change = ifp->if_flags ^ sc->sc_if_flags;
3304
3305 if (change != 0)
3306 sc->sc_if_flags = ifp->if_flags;
3307
3308 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3309 return ENETRESET;
3310
3311 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3312 wm_set_filter(sc);
3313
3314 wm_set_vlan(sc);
3315
3316 return 0;
3317 }
3318
3319 /*
3320 * wm_ioctl: [ifnet interface function]
3321 *
3322 * Handle control requests from the operator.
3323 */
3324 static int
3325 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3326 {
3327 struct wm_softc *sc = ifp->if_softc;
3328 struct ifreq *ifr = (struct ifreq *) data;
3329 struct ifaddr *ifa = (struct ifaddr *)data;
3330 struct sockaddr_dl *sdl;
3331 int s, error;
3332
3333 s = splnet();
3334
3335 switch (cmd) {
3336 case SIOCSIFMEDIA:
3337 case SIOCGIFMEDIA:
3338 /* Flow control requires full-duplex mode. */
3339 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3340 (ifr->ifr_media & IFM_FDX) == 0)
3341 ifr->ifr_media &= ~IFM_ETH_FMASK;
3342 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3343 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3344 /* We can do both TXPAUSE and RXPAUSE. */
3345 ifr->ifr_media |=
3346 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3347 }
3348 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3349 }
3350 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3351 break;
3352 case SIOCINITIFADDR:
3353 if (ifa->ifa_addr->sa_family == AF_LINK) {
3354 sdl = satosdl(ifp->if_dl->ifa_addr);
3355 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3356 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3357 /* unicast address is first multicast entry */
3358 wm_set_filter(sc);
3359 error = 0;
3360 break;
3361 }
3362 /*FALLTHROUGH*/
3363 default:
3364 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3365 break;
3366
3367 error = 0;
3368
3369 if (cmd == SIOCSIFCAP)
3370 error = (*ifp->if_init)(ifp);
3371 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3372 ;
3373 else if (ifp->if_flags & IFF_RUNNING) {
3374 /*
3375 * Multicast list has changed; set the hardware filter
3376 * accordingly.
3377 */
3378 wm_set_filter(sc);
3379 }
3380 break;
3381 }
3382
3383 /* Try to get more packets going. */
3384 ifp->if_start(ifp);
3385
3386 splx(s);
3387 return error;
3388 }
3389
3390 /*
3391 * wm_intr:
3392 *
3393 * Interrupt service routine.
3394 */
3395 static int
3396 wm_intr(void *arg)
3397 {
3398 struct wm_softc *sc = arg;
3399 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3400 uint32_t icr;
3401 int handled = 0;
3402
3403 while (1 /* CONSTCOND */) {
3404 icr = CSR_READ(sc, WMREG_ICR);
3405 if ((icr & sc->sc_icr) == 0)
3406 break;
3407 rnd_add_uint32(&sc->rnd_source, icr);
3408
3409 handled = 1;
3410
3411 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3412 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3413 DPRINTF(WM_DEBUG_RX,
3414 ("%s: RX: got Rx intr 0x%08x\n",
3415 device_xname(sc->sc_dev),
3416 icr & (ICR_RXDMT0|ICR_RXT0)));
3417 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3418 }
3419 #endif
3420 wm_rxintr(sc);
3421
3422 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3423 if (icr & ICR_TXDW) {
3424 DPRINTF(WM_DEBUG_TX,
3425 ("%s: TX: got TXDW interrupt\n",
3426 device_xname(sc->sc_dev)));
3427 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3428 }
3429 #endif
3430 wm_txintr(sc);
3431
3432 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3433 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3434 wm_linkintr(sc, icr);
3435 }
3436
3437 if (icr & ICR_RXO) {
3438 #if defined(WM_DEBUG)
3439 log(LOG_WARNING, "%s: Receive overrun\n",
3440 device_xname(sc->sc_dev));
3441 #endif /* defined(WM_DEBUG) */
3442 }
3443 }
3444
3445 if (handled) {
3446 /* Try to get more packets going. */
3447 ifp->if_start(ifp);
3448 }
3449
3450 return handled;
3451 }
3452
3453 /*
3454 * wm_txintr:
3455 *
3456 * Helper; handle transmit interrupts.
3457 */
3458 static void
3459 wm_txintr(struct wm_softc *sc)
3460 {
3461 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3462 struct wm_txsoft *txs;
3463 uint8_t status;
3464 int i;
3465
3466 ifp->if_flags &= ~IFF_OACTIVE;
3467
3468 /*
3469 * Go through the Tx list and free mbufs for those
3470 * frames which have been transmitted.
3471 */
3472 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3473 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3474 txs = &sc->sc_txsoft[i];
3475
3476 DPRINTF(WM_DEBUG_TX,
3477 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3478
3479 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3480 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3481
3482 status =
3483 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3484 if ((status & WTX_ST_DD) == 0) {
3485 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3486 BUS_DMASYNC_PREREAD);
3487 break;
3488 }
3489
3490 DPRINTF(WM_DEBUG_TX,
3491 ("%s: TX: job %d done: descs %d..%d\n",
3492 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3493 txs->txs_lastdesc));
3494
3495 /*
3496 * XXX We should probably be using the statistics
3497 * XXX registers, but I don't know if they exist
3498 * XXX on chips before the i82544.
3499 */
3500
3501 #ifdef WM_EVENT_COUNTERS
3502 if (status & WTX_ST_TU)
3503 WM_EVCNT_INCR(&sc->sc_ev_tu);
3504 #endif /* WM_EVENT_COUNTERS */
3505
3506 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3507 ifp->if_oerrors++;
3508 if (status & WTX_ST_LC)
3509 log(LOG_WARNING, "%s: late collision\n",
3510 device_xname(sc->sc_dev));
3511 else if (status & WTX_ST_EC) {
3512 ifp->if_collisions += 16;
3513 log(LOG_WARNING, "%s: excessive collisions\n",
3514 device_xname(sc->sc_dev));
3515 }
3516 } else
3517 ifp->if_opackets++;
3518
3519 sc->sc_txfree += txs->txs_ndesc;
3520 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3521 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3522 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3523 m_freem(txs->txs_mbuf);
3524 txs->txs_mbuf = NULL;
3525 }
3526
3527 /* Update the dirty transmit buffer pointer. */
3528 sc->sc_txsdirty = i;
3529 DPRINTF(WM_DEBUG_TX,
3530 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3531
3532 /*
3533 * If there are no more pending transmissions, cancel the watchdog
3534 * timer.
3535 */
3536 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3537 ifp->if_timer = 0;
3538 }
3539
3540 /*
3541 * wm_rxintr:
3542 *
3543 * Helper; handle receive interrupts.
3544 */
3545 static void
3546 wm_rxintr(struct wm_softc *sc)
3547 {
3548 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3549 struct wm_rxsoft *rxs;
3550 struct mbuf *m;
3551 int i, len;
3552 uint8_t status, errors;
3553 uint16_t vlantag;
3554
3555 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3556 rxs = &sc->sc_rxsoft[i];
3557
3558 DPRINTF(WM_DEBUG_RX,
3559 ("%s: RX: checking descriptor %d\n",
3560 device_xname(sc->sc_dev), i));
3561
3562 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3563
3564 status = sc->sc_rxdescs[i].wrx_status;
3565 errors = sc->sc_rxdescs[i].wrx_errors;
3566 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3567 vlantag = sc->sc_rxdescs[i].wrx_special;
3568
3569 if ((status & WRX_ST_DD) == 0) {
3570 /*
3571 * We have processed all of the receive descriptors.
3572 */
3573 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3574 break;
3575 }
3576
3577 if (__predict_false(sc->sc_rxdiscard)) {
3578 DPRINTF(WM_DEBUG_RX,
3579 ("%s: RX: discarding contents of descriptor %d\n",
3580 device_xname(sc->sc_dev), i));
3581 WM_INIT_RXDESC(sc, i);
3582 if (status & WRX_ST_EOP) {
3583 /* Reset our state. */
3584 DPRINTF(WM_DEBUG_RX,
3585 ("%s: RX: resetting rxdiscard -> 0\n",
3586 device_xname(sc->sc_dev)));
3587 sc->sc_rxdiscard = 0;
3588 }
3589 continue;
3590 }
3591
3592 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3593 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3594
3595 m = rxs->rxs_mbuf;
3596
3597 /*
3598 * Add a new receive buffer to the ring, unless of
3599 * course the length is zero. Treat the latter as a
3600 * failed mapping.
3601 */
3602 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3603 /*
3604 * Failed, throw away what we've done so
3605 * far, and discard the rest of the packet.
3606 */
3607 ifp->if_ierrors++;
3608 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3609 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3610 WM_INIT_RXDESC(sc, i);
3611 if ((status & WRX_ST_EOP) == 0)
3612 sc->sc_rxdiscard = 1;
3613 if (sc->sc_rxhead != NULL)
3614 m_freem(sc->sc_rxhead);
3615 WM_RXCHAIN_RESET(sc);
3616 DPRINTF(WM_DEBUG_RX,
3617 ("%s: RX: Rx buffer allocation failed, "
3618 "dropping packet%s\n", device_xname(sc->sc_dev),
3619 sc->sc_rxdiscard ? " (discard)" : ""));
3620 continue;
3621 }
3622
3623 m->m_len = len;
3624 sc->sc_rxlen += len;
3625 DPRINTF(WM_DEBUG_RX,
3626 ("%s: RX: buffer at %p len %d\n",
3627 device_xname(sc->sc_dev), m->m_data, len));
3628
3629 /*
3630 * If this is not the end of the packet, keep
3631 * looking.
3632 */
3633 if ((status & WRX_ST_EOP) == 0) {
3634 WM_RXCHAIN_LINK(sc, m);
3635 DPRINTF(WM_DEBUG_RX,
3636 ("%s: RX: not yet EOP, rxlen -> %d\n",
3637 device_xname(sc->sc_dev), sc->sc_rxlen));
3638 continue;
3639 }
3640
3641 /*
3642 * Okay, we have the entire packet now. The chip is
3643 * configured to include the FCS except I350
3644 * (not all chips can be configured to strip it),
3645 * so we need to trim it.
3646 * May need to adjust length of previous mbuf in the
3647 * chain if the current mbuf is too short.
3648 * For an eratta, the RCTL_SECRC bit in RCTL register
3649 * is always set in I350, so we don't trim it.
3650 */
3651 if (sc->sc_type != WM_T_I350) {
3652 if (m->m_len < ETHER_CRC_LEN) {
3653 sc->sc_rxtail->m_len
3654 -= (ETHER_CRC_LEN - m->m_len);
3655 m->m_len = 0;
3656 } else
3657 m->m_len -= ETHER_CRC_LEN;
3658 len = sc->sc_rxlen - ETHER_CRC_LEN;
3659 } else
3660 len = sc->sc_rxlen;
3661
3662 WM_RXCHAIN_LINK(sc, m);
3663
3664 *sc->sc_rxtailp = NULL;
3665 m = sc->sc_rxhead;
3666
3667 WM_RXCHAIN_RESET(sc);
3668
3669 DPRINTF(WM_DEBUG_RX,
3670 ("%s: RX: have entire packet, len -> %d\n",
3671 device_xname(sc->sc_dev), len));
3672
3673 /*
3674 * If an error occurred, update stats and drop the packet.
3675 */
3676 if (errors &
3677 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3678 if (errors & WRX_ER_SE)
3679 log(LOG_WARNING, "%s: symbol error\n",
3680 device_xname(sc->sc_dev));
3681 else if (errors & WRX_ER_SEQ)
3682 log(LOG_WARNING, "%s: receive sequence error\n",
3683 device_xname(sc->sc_dev));
3684 else if (errors & WRX_ER_CE)
3685 log(LOG_WARNING, "%s: CRC error\n",
3686 device_xname(sc->sc_dev));
3687 m_freem(m);
3688 continue;
3689 }
3690
3691 /*
3692 * No errors. Receive the packet.
3693 */
3694 m->m_pkthdr.rcvif = ifp;
3695 m->m_pkthdr.len = len;
3696
3697 /*
3698 * If VLANs are enabled, VLAN packets have been unwrapped
3699 * for us. Associate the tag with the packet.
3700 */
3701 if ((status & WRX_ST_VP) != 0) {
3702 VLAN_INPUT_TAG(ifp, m,
3703 le16toh(vlantag),
3704 continue);
3705 }
3706
3707 /*
3708 * Set up checksum info for this packet.
3709 */
3710 if ((status & WRX_ST_IXSM) == 0) {
3711 if (status & WRX_ST_IPCS) {
3712 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3713 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3714 if (errors & WRX_ER_IPE)
3715 m->m_pkthdr.csum_flags |=
3716 M_CSUM_IPv4_BAD;
3717 }
3718 if (status & WRX_ST_TCPCS) {
3719 /*
3720 * Note: we don't know if this was TCP or UDP,
3721 * so we just set both bits, and expect the
3722 * upper layers to deal.
3723 */
3724 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3725 m->m_pkthdr.csum_flags |=
3726 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3727 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3728 if (errors & WRX_ER_TCPE)
3729 m->m_pkthdr.csum_flags |=
3730 M_CSUM_TCP_UDP_BAD;
3731 }
3732 }
3733
3734 ifp->if_ipackets++;
3735
3736 /* Pass this up to any BPF listeners. */
3737 bpf_mtap(ifp, m);
3738
3739 /* Pass it on. */
3740 (*ifp->if_input)(ifp, m);
3741 }
3742
3743 /* Update the receive pointer. */
3744 sc->sc_rxptr = i;
3745
3746 DPRINTF(WM_DEBUG_RX,
3747 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3748 }
3749
3750 /*
3751 * wm_linkintr_gmii:
3752 *
3753 * Helper; handle link interrupts for GMII.
3754 */
3755 static void
3756 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3757 {
3758
3759 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3760 __func__));
3761
3762 if (icr & ICR_LSC) {
3763 DPRINTF(WM_DEBUG_LINK,
3764 ("%s: LINK: LSC -> mii_tick\n",
3765 device_xname(sc->sc_dev)));
3766 mii_tick(&sc->sc_mii);
3767 if (sc->sc_type == WM_T_82543) {
3768 int miistatus, active;
3769
3770 /*
3771 * With 82543, we need to force speed and
3772 * duplex on the MAC equal to what the PHY
3773 * speed and duplex configuration is.
3774 */
3775 miistatus = sc->sc_mii.mii_media_status;
3776
3777 if (miistatus & IFM_ACTIVE) {
3778 active = sc->sc_mii.mii_media_active;
3779 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3780 switch (IFM_SUBTYPE(active)) {
3781 case IFM_10_T:
3782 sc->sc_ctrl |= CTRL_SPEED_10;
3783 break;
3784 case IFM_100_TX:
3785 sc->sc_ctrl |= CTRL_SPEED_100;
3786 break;
3787 case IFM_1000_T:
3788 sc->sc_ctrl |= CTRL_SPEED_1000;
3789 break;
3790 default:
3791 /*
3792 * fiber?
3793 * Shoud not enter here.
3794 */
3795 printf("unknown media (%x)\n",
3796 active);
3797 break;
3798 }
3799 if (active & IFM_FDX)
3800 sc->sc_ctrl |= CTRL_FD;
3801 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3802 }
3803 } else if ((sc->sc_type == WM_T_ICH8)
3804 && (sc->sc_phytype == WMPHY_IGP_3)) {
3805 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3806 } else if (sc->sc_type == WM_T_PCH) {
3807 wm_k1_gig_workaround_hv(sc,
3808 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3809 }
3810
3811 if ((sc->sc_phytype == WMPHY_82578)
3812 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3813 == IFM_1000_T)) {
3814
3815 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3816 delay(200*1000); /* XXX too big */
3817
3818 /* Link stall fix for link up */
3819 wm_gmii_hv_writereg(sc->sc_dev, 1,
3820 HV_MUX_DATA_CTRL,
3821 HV_MUX_DATA_CTRL_GEN_TO_MAC
3822 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3823 wm_gmii_hv_writereg(sc->sc_dev, 1,
3824 HV_MUX_DATA_CTRL,
3825 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3826 }
3827 }
3828 } else if (icr & ICR_RXSEQ) {
3829 DPRINTF(WM_DEBUG_LINK,
3830 ("%s: LINK Receive sequence error\n",
3831 device_xname(sc->sc_dev)));
3832 }
3833 }
3834
3835 /*
3836 * wm_linkintr_tbi:
3837 *
3838 * Helper; handle link interrupts for TBI mode.
3839 */
3840 static void
3841 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3842 {
3843 uint32_t status;
3844
3845 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3846 __func__));
3847
3848 status = CSR_READ(sc, WMREG_STATUS);
3849 if (icr & ICR_LSC) {
3850 if (status & STATUS_LU) {
3851 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3852 device_xname(sc->sc_dev),
3853 (status & STATUS_FD) ? "FDX" : "HDX"));
3854 /*
3855 * NOTE: CTRL will update TFCE and RFCE automatically,
3856 * so we should update sc->sc_ctrl
3857 */
3858
3859 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3860 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3861 sc->sc_fcrtl &= ~FCRTL_XONE;
3862 if (status & STATUS_FD)
3863 sc->sc_tctl |=
3864 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3865 else
3866 sc->sc_tctl |=
3867 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3868 if (sc->sc_ctrl & CTRL_TFCE)
3869 sc->sc_fcrtl |= FCRTL_XONE;
3870 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3871 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3872 WMREG_OLD_FCRTL : WMREG_FCRTL,
3873 sc->sc_fcrtl);
3874 sc->sc_tbi_linkup = 1;
3875 } else {
3876 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3877 device_xname(sc->sc_dev)));
3878 sc->sc_tbi_linkup = 0;
3879 }
3880 wm_tbi_set_linkled(sc);
3881 } else if (icr & ICR_RXCFG) {
3882 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3883 device_xname(sc->sc_dev)));
3884 sc->sc_tbi_nrxcfg++;
3885 wm_check_for_link(sc);
3886 } else if (icr & ICR_RXSEQ) {
3887 DPRINTF(WM_DEBUG_LINK,
3888 ("%s: LINK: Receive sequence error\n",
3889 device_xname(sc->sc_dev)));
3890 }
3891 }
3892
3893 /*
3894 * wm_linkintr:
3895 *
3896 * Helper; handle link interrupts.
3897 */
3898 static void
3899 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3900 {
3901
3902 if (sc->sc_flags & WM_F_HAS_MII)
3903 wm_linkintr_gmii(sc, icr);
3904 else
3905 wm_linkintr_tbi(sc, icr);
3906 }
3907
3908 /*
3909 * wm_tick:
3910 *
3911 * One second timer, used to check link status, sweep up
3912 * completed transmit jobs, etc.
3913 */
3914 static void
3915 wm_tick(void *arg)
3916 {
3917 struct wm_softc *sc = arg;
3918 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3919 int s;
3920
3921 s = splnet();
3922
3923 if (sc->sc_type >= WM_T_82542_2_1) {
3924 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3925 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3926 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3927 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3928 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3929 }
3930
3931 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3932 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3933 + CSR_READ(sc, WMREG_CRCERRS)
3934 + CSR_READ(sc, WMREG_ALGNERRC)
3935 + CSR_READ(sc, WMREG_SYMERRC)
3936 + CSR_READ(sc, WMREG_RXERRC)
3937 + CSR_READ(sc, WMREG_SEC)
3938 + CSR_READ(sc, WMREG_CEXTERR)
3939 + CSR_READ(sc, WMREG_RLEC);
3940 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3941
3942 if (sc->sc_flags & WM_F_HAS_MII)
3943 mii_tick(&sc->sc_mii);
3944 else
3945 wm_tbi_check_link(sc);
3946
3947 splx(s);
3948
3949 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3950 }
3951
3952 /*
3953 * wm_reset:
3954 *
3955 * Reset the i82542 chip.
3956 */
3957 static void
3958 wm_reset(struct wm_softc *sc)
3959 {
3960 int phy_reset = 0;
3961 uint32_t reg, mask;
3962 int i;
3963
3964 /*
3965 * Allocate on-chip memory according to the MTU size.
3966 * The Packet Buffer Allocation register must be written
3967 * before the chip is reset.
3968 */
3969 switch (sc->sc_type) {
3970 case WM_T_82547:
3971 case WM_T_82547_2:
3972 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3973 PBA_22K : PBA_30K;
3974 sc->sc_txfifo_head = 0;
3975 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3976 sc->sc_txfifo_size =
3977 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3978 sc->sc_txfifo_stall = 0;
3979 break;
3980 case WM_T_82571:
3981 case WM_T_82572:
3982 case WM_T_82575: /* XXX need special handing for jumbo frames */
3983 case WM_T_I350:
3984 case WM_T_80003:
3985 sc->sc_pba = PBA_32K;
3986 break;
3987 case WM_T_82580:
3988 case WM_T_82580ER:
3989 sc->sc_pba = PBA_35K;
3990 break;
3991 case WM_T_82576:
3992 sc->sc_pba = PBA_64K;
3993 break;
3994 case WM_T_82573:
3995 sc->sc_pba = PBA_12K;
3996 break;
3997 case WM_T_82574:
3998 case WM_T_82583:
3999 sc->sc_pba = PBA_20K;
4000 break;
4001 case WM_T_ICH8:
4002 sc->sc_pba = PBA_8K;
4003 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4004 break;
4005 case WM_T_ICH9:
4006 case WM_T_ICH10:
4007 sc->sc_pba = PBA_10K;
4008 break;
4009 case WM_T_PCH:
4010 case WM_T_PCH2:
4011 sc->sc_pba = PBA_26K;
4012 break;
4013 default:
4014 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4015 PBA_40K : PBA_48K;
4016 break;
4017 }
4018 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4019
4020 /* Prevent the PCI-E bus from sticking */
4021 if (sc->sc_flags & WM_F_PCIE) {
4022 int timeout = 800;
4023
4024 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4025 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4026
4027 while (timeout--) {
4028 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
4029 break;
4030 delay(100);
4031 }
4032 }
4033
4034 /* Set the completion timeout for interface */
4035 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4036 || (sc->sc_type == WM_T_I350))
4037 wm_set_pcie_completion_timeout(sc);
4038
4039 /* Clear interrupt */
4040 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4041
4042 /* Stop the transmit and receive processes. */
4043 CSR_WRITE(sc, WMREG_RCTL, 0);
4044 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4045 sc->sc_rctl &= ~RCTL_EN;
4046
4047 /* XXX set_tbi_sbp_82543() */
4048
4049 delay(10*1000);
4050
4051 /* Must acquire the MDIO ownership before MAC reset */
4052 switch (sc->sc_type) {
4053 case WM_T_82573:
4054 case WM_T_82574:
4055 case WM_T_82583:
4056 i = 0;
4057 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4058 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4059 do {
4060 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4061 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4062 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4063 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4064 break;
4065 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4066 delay(2*1000);
4067 i++;
4068 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4069 break;
4070 default:
4071 break;
4072 }
4073
4074 /*
4075 * 82541 Errata 29? & 82547 Errata 28?
4076 * See also the description about PHY_RST bit in CTRL register
4077 * in 8254x_GBe_SDM.pdf.
4078 */
4079 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4080 CSR_WRITE(sc, WMREG_CTRL,
4081 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4082 delay(5000);
4083 }
4084
4085 switch (sc->sc_type) {
4086 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4087 case WM_T_82541:
4088 case WM_T_82541_2:
4089 case WM_T_82547:
4090 case WM_T_82547_2:
4091 /*
4092 * On some chipsets, a reset through a memory-mapped write
4093 * cycle can cause the chip to reset before completing the
4094 * write cycle. This causes major headache that can be
4095 * avoided by issuing the reset via indirect register writes
4096 * through I/O space.
4097 *
4098 * So, if we successfully mapped the I/O BAR at attach time,
4099 * use that. Otherwise, try our luck with a memory-mapped
4100 * reset.
4101 */
4102 if (sc->sc_flags & WM_F_IOH_VALID)
4103 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4104 else
4105 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4106 break;
4107 case WM_T_82545_3:
4108 case WM_T_82546_3:
4109 /* Use the shadow control register on these chips. */
4110 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4111 break;
4112 case WM_T_80003:
4113 mask = swfwphysem[sc->sc_funcid];
4114 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4115 wm_get_swfw_semaphore(sc, mask);
4116 CSR_WRITE(sc, WMREG_CTRL, reg);
4117 wm_put_swfw_semaphore(sc, mask);
4118 break;
4119 case WM_T_ICH8:
4120 case WM_T_ICH9:
4121 case WM_T_ICH10:
4122 case WM_T_PCH:
4123 case WM_T_PCH2:
4124 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4125 if (wm_check_reset_block(sc) == 0) {
4126 /*
4127 * Gate automatic PHY configuration by hardware on
4128 * manaed 82579
4129 */
4130 if ((sc->sc_type == WM_T_PCH2)
4131 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4132 != 0))
4133 wm_gate_hw_phy_config_ich8lan(sc, 1);
4134
4135
4136 reg |= CTRL_PHY_RESET;
4137 phy_reset = 1;
4138 }
4139 wm_get_swfwhw_semaphore(sc);
4140 CSR_WRITE(sc, WMREG_CTRL, reg);
4141 delay(20*1000);
4142 wm_put_swfwhw_semaphore(sc);
4143 break;
4144 case WM_T_82542_2_0:
4145 case WM_T_82542_2_1:
4146 case WM_T_82543:
4147 case WM_T_82540:
4148 case WM_T_82545:
4149 case WM_T_82546:
4150 case WM_T_82571:
4151 case WM_T_82572:
4152 case WM_T_82573:
4153 case WM_T_82574:
4154 case WM_T_82575:
4155 case WM_T_82576:
4156 case WM_T_82580:
4157 case WM_T_82580ER:
4158 case WM_T_82583:
4159 case WM_T_I350:
4160 default:
4161 /* Everything else can safely use the documented method. */
4162 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4163 break;
4164 }
4165
4166 if (phy_reset != 0)
4167 wm_get_cfg_done(sc);
4168
4169 /* reload EEPROM */
4170 switch (sc->sc_type) {
4171 case WM_T_82542_2_0:
4172 case WM_T_82542_2_1:
4173 case WM_T_82543:
4174 case WM_T_82544:
4175 delay(10);
4176 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4177 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4178 delay(2000);
4179 break;
4180 case WM_T_82540:
4181 case WM_T_82545:
4182 case WM_T_82545_3:
4183 case WM_T_82546:
4184 case WM_T_82546_3:
4185 delay(5*1000);
4186 /* XXX Disable HW ARPs on ASF enabled adapters */
4187 break;
4188 case WM_T_82541:
4189 case WM_T_82541_2:
4190 case WM_T_82547:
4191 case WM_T_82547_2:
4192 delay(20000);
4193 /* XXX Disable HW ARPs on ASF enabled adapters */
4194 break;
4195 case WM_T_82571:
4196 case WM_T_82572:
4197 case WM_T_82573:
4198 case WM_T_82574:
4199 case WM_T_82583:
4200 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4201 delay(10);
4202 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4203 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4204 }
4205 /* check EECD_EE_AUTORD */
4206 wm_get_auto_rd_done(sc);
4207 /*
4208 * Phy configuration from NVM just starts after EECD_AUTO_RD
4209 * is set.
4210 */
4211 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4212 || (sc->sc_type == WM_T_82583))
4213 delay(25*1000);
4214 break;
4215 case WM_T_82575:
4216 case WM_T_82576:
4217 case WM_T_82580:
4218 case WM_T_82580ER:
4219 case WM_T_I350:
4220 case WM_T_80003:
4221 case WM_T_ICH8:
4222 case WM_T_ICH9:
4223 /* check EECD_EE_AUTORD */
4224 wm_get_auto_rd_done(sc);
4225 break;
4226 case WM_T_ICH10:
4227 case WM_T_PCH:
4228 case WM_T_PCH2:
4229 wm_lan_init_done(sc);
4230 break;
4231 default:
4232 panic("%s: unknown type\n", __func__);
4233 }
4234
4235 /* Check whether EEPROM is present or not */
4236 switch (sc->sc_type) {
4237 case WM_T_82575:
4238 case WM_T_82576:
4239 #if 0 /* XXX */
4240 case WM_T_82580:
4241 case WM_T_82580ER:
4242 #endif
4243 case WM_T_I350:
4244 case WM_T_ICH8:
4245 case WM_T_ICH9:
4246 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4247 /* Not found */
4248 sc->sc_flags |= WM_F_EEPROM_INVALID;
4249 if ((sc->sc_type == WM_T_82575)
4250 || (sc->sc_type == WM_T_82576)
4251 || (sc->sc_type == WM_T_82580)
4252 || (sc->sc_type == WM_T_82580ER)
4253 || (sc->sc_type == WM_T_I350))
4254 wm_reset_init_script_82575(sc);
4255 }
4256 break;
4257 default:
4258 break;
4259 }
4260
4261 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4262 || (sc->sc_type == WM_T_I350)) {
4263 /* clear global device reset status bit */
4264 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4265 }
4266
4267 /* Clear any pending interrupt events. */
4268 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4269 reg = CSR_READ(sc, WMREG_ICR);
4270
4271 /* reload sc_ctrl */
4272 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4273
4274 if (sc->sc_type == WM_T_I350)
4275 wm_set_eee_i350(sc);
4276
4277 /* dummy read from WUC */
4278 if (sc->sc_type == WM_T_PCH)
4279 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4280 /*
4281 * For PCH, this write will make sure that any noise will be detected
4282 * as a CRC error and be dropped rather than show up as a bad packet
4283 * to the DMA engine
4284 */
4285 if (sc->sc_type == WM_T_PCH)
4286 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4287
4288 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4289 CSR_WRITE(sc, WMREG_WUC, 0);
4290
4291 /* XXX need special handling for 82580 */
4292 }
4293
4294 static void
4295 wm_set_vlan(struct wm_softc *sc)
4296 {
4297 /* Deal with VLAN enables. */
4298 if (VLAN_ATTACHED(&sc->sc_ethercom))
4299 sc->sc_ctrl |= CTRL_VME;
4300 else
4301 sc->sc_ctrl &= ~CTRL_VME;
4302
4303 /* Write the control registers. */
4304 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4305 }
4306
4307 /*
4308 * wm_init: [ifnet interface function]
4309 *
4310 * Initialize the interface. Must be called at splnet().
4311 */
4312 static int
4313 wm_init(struct ifnet *ifp)
4314 {
4315 struct wm_softc *sc = ifp->if_softc;
4316 struct wm_rxsoft *rxs;
4317 int i, j, trynum, error = 0;
4318 uint32_t reg;
4319
4320 /*
4321 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4322 * There is a small but measurable benefit to avoiding the adjusment
4323 * of the descriptor so that the headers are aligned, for normal mtu,
4324 * on such platforms. One possibility is that the DMA itself is
4325 * slightly more efficient if the front of the entire packet (instead
4326 * of the front of the headers) is aligned.
4327 *
4328 * Note we must always set align_tweak to 0 if we are using
4329 * jumbo frames.
4330 */
4331 #ifdef __NO_STRICT_ALIGNMENT
4332 sc->sc_align_tweak = 0;
4333 #else
4334 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4335 sc->sc_align_tweak = 0;
4336 else
4337 sc->sc_align_tweak = 2;
4338 #endif /* __NO_STRICT_ALIGNMENT */
4339
4340 /* Cancel any pending I/O. */
4341 wm_stop(ifp, 0);
4342
4343 /* update statistics before reset */
4344 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4345 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4346
4347 /* Reset the chip to a known state. */
4348 wm_reset(sc);
4349
4350 switch (sc->sc_type) {
4351 case WM_T_82571:
4352 case WM_T_82572:
4353 case WM_T_82573:
4354 case WM_T_82574:
4355 case WM_T_82583:
4356 case WM_T_80003:
4357 case WM_T_ICH8:
4358 case WM_T_ICH9:
4359 case WM_T_ICH10:
4360 case WM_T_PCH:
4361 case WM_T_PCH2:
4362 if (wm_check_mng_mode(sc) != 0)
4363 wm_get_hw_control(sc);
4364 break;
4365 default:
4366 break;
4367 }
4368
4369 /* Reset the PHY. */
4370 if (sc->sc_flags & WM_F_HAS_MII)
4371 wm_gmii_reset(sc);
4372
4373 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4374 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4375 if ((sc->sc_type == WM_T_PCH) && (sc->sc_type == WM_T_PCH2))
4376 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4377
4378 /* Initialize the transmit descriptor ring. */
4379 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4380 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4381 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4382 sc->sc_txfree = WM_NTXDESC(sc);
4383 sc->sc_txnext = 0;
4384
4385 if (sc->sc_type < WM_T_82543) {
4386 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4387 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4388 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4389 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4390 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4391 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4392 } else {
4393 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4394 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4395 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4396 CSR_WRITE(sc, WMREG_TDH, 0);
4397 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4398 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4399
4400 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4401 /*
4402 * Don't write TDT before TCTL.EN is set.
4403 * See the document.
4404 */
4405 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4406 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4407 | TXDCTL_WTHRESH(0));
4408 else {
4409 CSR_WRITE(sc, WMREG_TDT, 0);
4410 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4411 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4412 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4413 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4414 }
4415 }
4416 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4417 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4418
4419 /* Initialize the transmit job descriptors. */
4420 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4421 sc->sc_txsoft[i].txs_mbuf = NULL;
4422 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4423 sc->sc_txsnext = 0;
4424 sc->sc_txsdirty = 0;
4425
4426 /*
4427 * Initialize the receive descriptor and receive job
4428 * descriptor rings.
4429 */
4430 if (sc->sc_type < WM_T_82543) {
4431 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4432 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4433 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4434 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4435 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4436 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4437
4438 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4439 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4440 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4441 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4442 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4443 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4444 } else {
4445 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4446 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4447 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4448 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4449 CSR_WRITE(sc, WMREG_EITR(0), 450);
4450 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4451 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4452 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4453 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4454 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4455 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4456 | RXDCTL_WTHRESH(1));
4457 } else {
4458 CSR_WRITE(sc, WMREG_RDH, 0);
4459 CSR_WRITE(sc, WMREG_RDT, 0);
4460 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4461 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4462 }
4463 }
4464 for (i = 0; i < WM_NRXDESC; i++) {
4465 rxs = &sc->sc_rxsoft[i];
4466 if (rxs->rxs_mbuf == NULL) {
4467 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4468 log(LOG_ERR, "%s: unable to allocate or map rx "
4469 "buffer %d, error = %d\n",
4470 device_xname(sc->sc_dev), i, error);
4471 /*
4472 * XXX Should attempt to run with fewer receive
4473 * XXX buffers instead of just failing.
4474 */
4475 wm_rxdrain(sc);
4476 goto out;
4477 }
4478 } else {
4479 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4480 WM_INIT_RXDESC(sc, i);
4481 /*
4482 * For 82575 and newer device, the RX descriptors
4483 * must be initialized after the setting of RCTL.EN in
4484 * wm_set_filter()
4485 */
4486 }
4487 }
4488 sc->sc_rxptr = 0;
4489 sc->sc_rxdiscard = 0;
4490 WM_RXCHAIN_RESET(sc);
4491
4492 /*
4493 * Clear out the VLAN table -- we don't use it (yet).
4494 */
4495 CSR_WRITE(sc, WMREG_VET, 0);
4496 if (sc->sc_type == WM_T_I350)
4497 trynum = 10; /* Due to hw errata */
4498 else
4499 trynum = 1;
4500 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4501 for (j = 0; j < trynum; j++)
4502 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4503
4504 /*
4505 * Set up flow-control parameters.
4506 *
4507 * XXX Values could probably stand some tuning.
4508 */
4509 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4510 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4511 && (sc->sc_type != WM_T_PCH2)) {
4512 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4513 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4514 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4515 }
4516
4517 sc->sc_fcrtl = FCRTL_DFLT;
4518 if (sc->sc_type < WM_T_82543) {
4519 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4520 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4521 } else {
4522 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4523 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4524 }
4525
4526 if (sc->sc_type == WM_T_80003)
4527 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4528 else
4529 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4530
4531 /* Writes the control register. */
4532 wm_set_vlan(sc);
4533
4534 if (sc->sc_flags & WM_F_HAS_MII) {
4535 int val;
4536
4537 switch (sc->sc_type) {
4538 case WM_T_80003:
4539 case WM_T_ICH8:
4540 case WM_T_ICH9:
4541 case WM_T_ICH10:
4542 case WM_T_PCH:
4543 case WM_T_PCH2:
4544 /*
4545 * Set the mac to wait the maximum time between each
4546 * iteration and increase the max iterations when
4547 * polling the phy; this fixes erroneous timeouts at
4548 * 10Mbps.
4549 */
4550 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4551 0xFFFF);
4552 val = wm_kmrn_readreg(sc,
4553 KUMCTRLSTA_OFFSET_INB_PARAM);
4554 val |= 0x3F;
4555 wm_kmrn_writereg(sc,
4556 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4557 break;
4558 default:
4559 break;
4560 }
4561
4562 if (sc->sc_type == WM_T_80003) {
4563 val = CSR_READ(sc, WMREG_CTRL_EXT);
4564 val &= ~CTRL_EXT_LINK_MODE_MASK;
4565 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4566
4567 /* Bypass RX and TX FIFO's */
4568 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4569 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4570 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4571 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4572 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4573 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4574 }
4575 }
4576 #if 0
4577 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4578 #endif
4579
4580 /*
4581 * Set up checksum offload parameters.
4582 */
4583 reg = CSR_READ(sc, WMREG_RXCSUM);
4584 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4585 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4586 reg |= RXCSUM_IPOFL;
4587 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4588 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4589 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4590 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4591 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4592
4593 /* Reset TBI's RXCFG count */
4594 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4595
4596 /*
4597 * Set up the interrupt registers.
4598 */
4599 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4600 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4601 ICR_RXO | ICR_RXT0;
4602 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4603 sc->sc_icr |= ICR_RXCFG;
4604 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4605
4606 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4607 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4608 || (sc->sc_type == WM_T_PCH2)) {
4609 reg = CSR_READ(sc, WMREG_KABGTXD);
4610 reg |= KABGTXD_BGSQLBIAS;
4611 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4612 }
4613
4614 /* Set up the inter-packet gap. */
4615 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4616
4617 if (sc->sc_type >= WM_T_82543) {
4618 /*
4619 * Set up the interrupt throttling register (units of 256ns)
4620 * Note that a footnote in Intel's documentation says this
4621 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4622 * or 10Mbit mode. Empirically, it appears to be the case
4623 * that that is also true for the 1024ns units of the other
4624 * interrupt-related timer registers -- so, really, we ought
4625 * to divide this value by 4 when the link speed is low.
4626 *
4627 * XXX implement this division at link speed change!
4628 */
4629
4630 /*
4631 * For N interrupts/sec, set this value to:
4632 * 1000000000 / (N * 256). Note that we set the
4633 * absolute and packet timer values to this value
4634 * divided by 4 to get "simple timer" behavior.
4635 */
4636
4637 sc->sc_itr = 1500; /* 2604 ints/sec */
4638 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4639 }
4640
4641 /* Set the VLAN ethernetype. */
4642 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4643
4644 /*
4645 * Set up the transmit control register; we start out with
4646 * a collision distance suitable for FDX, but update it whe
4647 * we resolve the media type.
4648 */
4649 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4650 | TCTL_CT(TX_COLLISION_THRESHOLD)
4651 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4652 if (sc->sc_type >= WM_T_82571)
4653 sc->sc_tctl |= TCTL_MULR;
4654 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4655
4656 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4657 /*
4658 * Write TDT after TCTL.EN is set.
4659 * See the document.
4660 */
4661 CSR_WRITE(sc, WMREG_TDT, 0);
4662 }
4663
4664 if (sc->sc_type == WM_T_80003) {
4665 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4666 reg &= ~TCTL_EXT_GCEX_MASK;
4667 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4668 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4669 }
4670
4671 /* Set the media. */
4672 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4673 goto out;
4674
4675 /* Configure for OS presence */
4676 wm_init_manageability(sc);
4677
4678 /*
4679 * Set up the receive control register; we actually program
4680 * the register when we set the receive filter. Use multicast
4681 * address offset type 0.
4682 *
4683 * Only the i82544 has the ability to strip the incoming
4684 * CRC, so we don't enable that feature.
4685 */
4686 sc->sc_mchash_type = 0;
4687 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4688 | RCTL_MO(sc->sc_mchash_type);
4689
4690 /*
4691 * The I350 has a bug where it always strips the CRC whether
4692 * asked to or not. So ask for stripped CRC here and cope in rxeof
4693 */
4694 if (sc->sc_type == WM_T_I350)
4695 sc->sc_rctl |= RCTL_SECRC;
4696
4697 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4698 && (ifp->if_mtu > ETHERMTU)) {
4699 sc->sc_rctl |= RCTL_LPE;
4700 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4701 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4702 }
4703
4704 if (MCLBYTES == 2048) {
4705 sc->sc_rctl |= RCTL_2k;
4706 } else {
4707 if (sc->sc_type >= WM_T_82543) {
4708 switch (MCLBYTES) {
4709 case 4096:
4710 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4711 break;
4712 case 8192:
4713 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4714 break;
4715 case 16384:
4716 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4717 break;
4718 default:
4719 panic("wm_init: MCLBYTES %d unsupported",
4720 MCLBYTES);
4721 break;
4722 }
4723 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4724 }
4725
4726 /* Set the receive filter. */
4727 wm_set_filter(sc);
4728
4729 /* On 575 and later set RDT only if RX enabled */
4730 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4731 for (i = 0; i < WM_NRXDESC; i++)
4732 WM_INIT_RXDESC(sc, i);
4733
4734 /* Start the one second link check clock. */
4735 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4736
4737 /* ...all done! */
4738 ifp->if_flags |= IFF_RUNNING;
4739 ifp->if_flags &= ~IFF_OACTIVE;
4740
4741 out:
4742 sc->sc_if_flags = ifp->if_flags;
4743 if (error)
4744 log(LOG_ERR, "%s: interface not running\n",
4745 device_xname(sc->sc_dev));
4746 return error;
4747 }
4748
4749 /*
4750 * wm_rxdrain:
4751 *
4752 * Drain the receive queue.
4753 */
4754 static void
4755 wm_rxdrain(struct wm_softc *sc)
4756 {
4757 struct wm_rxsoft *rxs;
4758 int i;
4759
4760 for (i = 0; i < WM_NRXDESC; i++) {
4761 rxs = &sc->sc_rxsoft[i];
4762 if (rxs->rxs_mbuf != NULL) {
4763 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4764 m_freem(rxs->rxs_mbuf);
4765 rxs->rxs_mbuf = NULL;
4766 }
4767 }
4768 }
4769
4770 /*
4771 * wm_stop: [ifnet interface function]
4772 *
4773 * Stop transmission on the interface.
4774 */
4775 static void
4776 wm_stop(struct ifnet *ifp, int disable)
4777 {
4778 struct wm_softc *sc = ifp->if_softc;
4779 struct wm_txsoft *txs;
4780 int i;
4781
4782 /* Stop the one second clock. */
4783 callout_stop(&sc->sc_tick_ch);
4784
4785 /* Stop the 82547 Tx FIFO stall check timer. */
4786 if (sc->sc_type == WM_T_82547)
4787 callout_stop(&sc->sc_txfifo_ch);
4788
4789 if (sc->sc_flags & WM_F_HAS_MII) {
4790 /* Down the MII. */
4791 mii_down(&sc->sc_mii);
4792 } else {
4793 #if 0
4794 /* Should we clear PHY's status properly? */
4795 wm_reset(sc);
4796 #endif
4797 }
4798
4799 /* Stop the transmit and receive processes. */
4800 CSR_WRITE(sc, WMREG_TCTL, 0);
4801 CSR_WRITE(sc, WMREG_RCTL, 0);
4802 sc->sc_rctl &= ~RCTL_EN;
4803
4804 /*
4805 * Clear the interrupt mask to ensure the device cannot assert its
4806 * interrupt line.
4807 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4808 * any currently pending or shared interrupt.
4809 */
4810 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4811 sc->sc_icr = 0;
4812
4813 /* Release any queued transmit buffers. */
4814 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4815 txs = &sc->sc_txsoft[i];
4816 if (txs->txs_mbuf != NULL) {
4817 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4818 m_freem(txs->txs_mbuf);
4819 txs->txs_mbuf = NULL;
4820 }
4821 }
4822
4823 /* Mark the interface as down and cancel the watchdog timer. */
4824 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4825 ifp->if_timer = 0;
4826
4827 if (disable)
4828 wm_rxdrain(sc);
4829
4830 #if 0 /* notyet */
4831 if (sc->sc_type >= WM_T_82544)
4832 CSR_WRITE(sc, WMREG_WUC, 0);
4833 #endif
4834 }
4835
4836 void
4837 wm_get_auto_rd_done(struct wm_softc *sc)
4838 {
4839 int i;
4840
4841 /* wait for eeprom to reload */
4842 switch (sc->sc_type) {
4843 case WM_T_82571:
4844 case WM_T_82572:
4845 case WM_T_82573:
4846 case WM_T_82574:
4847 case WM_T_82583:
4848 case WM_T_82575:
4849 case WM_T_82576:
4850 case WM_T_82580:
4851 case WM_T_82580ER:
4852 case WM_T_I350:
4853 case WM_T_80003:
4854 case WM_T_ICH8:
4855 case WM_T_ICH9:
4856 for (i = 0; i < 10; i++) {
4857 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4858 break;
4859 delay(1000);
4860 }
4861 if (i == 10) {
4862 log(LOG_ERR, "%s: auto read from eeprom failed to "
4863 "complete\n", device_xname(sc->sc_dev));
4864 }
4865 break;
4866 default:
4867 break;
4868 }
4869 }
4870
4871 void
4872 wm_lan_init_done(struct wm_softc *sc)
4873 {
4874 uint32_t reg = 0;
4875 int i;
4876
4877 /* wait for eeprom to reload */
4878 switch (sc->sc_type) {
4879 case WM_T_ICH10:
4880 case WM_T_PCH:
4881 case WM_T_PCH2:
4882 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4883 reg = CSR_READ(sc, WMREG_STATUS);
4884 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4885 break;
4886 delay(100);
4887 }
4888 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4889 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4890 "complete\n", device_xname(sc->sc_dev), __func__);
4891 }
4892 break;
4893 default:
4894 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4895 __func__);
4896 break;
4897 }
4898
4899 reg &= ~STATUS_LAN_INIT_DONE;
4900 CSR_WRITE(sc, WMREG_STATUS, reg);
4901 }
4902
4903 void
4904 wm_get_cfg_done(struct wm_softc *sc)
4905 {
4906 int mask;
4907 uint32_t reg;
4908 int i;
4909
4910 /* wait for eeprom to reload */
4911 switch (sc->sc_type) {
4912 case WM_T_82542_2_0:
4913 case WM_T_82542_2_1:
4914 /* null */
4915 break;
4916 case WM_T_82543:
4917 case WM_T_82544:
4918 case WM_T_82540:
4919 case WM_T_82545:
4920 case WM_T_82545_3:
4921 case WM_T_82546:
4922 case WM_T_82546_3:
4923 case WM_T_82541:
4924 case WM_T_82541_2:
4925 case WM_T_82547:
4926 case WM_T_82547_2:
4927 case WM_T_82573:
4928 case WM_T_82574:
4929 case WM_T_82583:
4930 /* generic */
4931 delay(10*1000);
4932 break;
4933 case WM_T_80003:
4934 case WM_T_82571:
4935 case WM_T_82572:
4936 case WM_T_82575:
4937 case WM_T_82576:
4938 case WM_T_82580:
4939 case WM_T_82580ER:
4940 case WM_T_I350:
4941 if (sc->sc_type == WM_T_82571) {
4942 /* Only 82571 shares port 0 */
4943 mask = EEMNGCTL_CFGDONE_0;
4944 } else
4945 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4946 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4947 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4948 break;
4949 delay(1000);
4950 }
4951 if (i >= WM_PHY_CFG_TIMEOUT) {
4952 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4953 device_xname(sc->sc_dev), __func__));
4954 }
4955 break;
4956 case WM_T_ICH8:
4957 case WM_T_ICH9:
4958 case WM_T_ICH10:
4959 case WM_T_PCH:
4960 case WM_T_PCH2:
4961 if (sc->sc_type >= WM_T_PCH) {
4962 reg = CSR_READ(sc, WMREG_STATUS);
4963 if ((reg & STATUS_PHYRA) != 0)
4964 CSR_WRITE(sc, WMREG_STATUS,
4965 reg & ~STATUS_PHYRA);
4966 }
4967 delay(10*1000);
4968 break;
4969 default:
4970 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4971 __func__);
4972 break;
4973 }
4974 }
4975
4976 /*
4977 * wm_acquire_eeprom:
4978 *
4979 * Perform the EEPROM handshake required on some chips.
4980 */
4981 static int
4982 wm_acquire_eeprom(struct wm_softc *sc)
4983 {
4984 uint32_t reg;
4985 int x;
4986 int ret = 0;
4987
4988 /* always success */
4989 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4990 return 0;
4991
4992 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4993 ret = wm_get_swfwhw_semaphore(sc);
4994 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4995 /* this will also do wm_get_swsm_semaphore() if needed */
4996 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4997 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4998 ret = wm_get_swsm_semaphore(sc);
4999 }
5000
5001 if (ret) {
5002 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5003 __func__);
5004 return 1;
5005 }
5006
5007 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5008 reg = CSR_READ(sc, WMREG_EECD);
5009
5010 /* Request EEPROM access. */
5011 reg |= EECD_EE_REQ;
5012 CSR_WRITE(sc, WMREG_EECD, reg);
5013
5014 /* ..and wait for it to be granted. */
5015 for (x = 0; x < 1000; x++) {
5016 reg = CSR_READ(sc, WMREG_EECD);
5017 if (reg & EECD_EE_GNT)
5018 break;
5019 delay(5);
5020 }
5021 if ((reg & EECD_EE_GNT) == 0) {
5022 aprint_error_dev(sc->sc_dev,
5023 "could not acquire EEPROM GNT\n");
5024 reg &= ~EECD_EE_REQ;
5025 CSR_WRITE(sc, WMREG_EECD, reg);
5026 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5027 wm_put_swfwhw_semaphore(sc);
5028 if (sc->sc_flags & WM_F_SWFW_SYNC)
5029 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5030 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5031 wm_put_swsm_semaphore(sc);
5032 return 1;
5033 }
5034 }
5035
5036 return 0;
5037 }
5038
5039 /*
5040 * wm_release_eeprom:
5041 *
5042 * Release the EEPROM mutex.
5043 */
5044 static void
5045 wm_release_eeprom(struct wm_softc *sc)
5046 {
5047 uint32_t reg;
5048
5049 /* always success */
5050 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5051 return;
5052
5053 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5054 reg = CSR_READ(sc, WMREG_EECD);
5055 reg &= ~EECD_EE_REQ;
5056 CSR_WRITE(sc, WMREG_EECD, reg);
5057 }
5058
5059 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5060 wm_put_swfwhw_semaphore(sc);
5061 if (sc->sc_flags & WM_F_SWFW_SYNC)
5062 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5063 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5064 wm_put_swsm_semaphore(sc);
5065 }
5066
5067 /*
5068 * wm_eeprom_sendbits:
5069 *
5070 * Send a series of bits to the EEPROM.
5071 */
5072 static void
5073 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5074 {
5075 uint32_t reg;
5076 int x;
5077
5078 reg = CSR_READ(sc, WMREG_EECD);
5079
5080 for (x = nbits; x > 0; x--) {
5081 if (bits & (1U << (x - 1)))
5082 reg |= EECD_DI;
5083 else
5084 reg &= ~EECD_DI;
5085 CSR_WRITE(sc, WMREG_EECD, reg);
5086 delay(2);
5087 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5088 delay(2);
5089 CSR_WRITE(sc, WMREG_EECD, reg);
5090 delay(2);
5091 }
5092 }
5093
5094 /*
5095 * wm_eeprom_recvbits:
5096 *
5097 * Receive a series of bits from the EEPROM.
5098 */
5099 static void
5100 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5101 {
5102 uint32_t reg, val;
5103 int x;
5104
5105 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5106
5107 val = 0;
5108 for (x = nbits; x > 0; x--) {
5109 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5110 delay(2);
5111 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5112 val |= (1U << (x - 1));
5113 CSR_WRITE(sc, WMREG_EECD, reg);
5114 delay(2);
5115 }
5116 *valp = val;
5117 }
5118
5119 /*
5120 * wm_read_eeprom_uwire:
5121 *
5122 * Read a word from the EEPROM using the MicroWire protocol.
5123 */
5124 static int
5125 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5126 {
5127 uint32_t reg, val;
5128 int i;
5129
5130 for (i = 0; i < wordcnt; i++) {
5131 /* Clear SK and DI. */
5132 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5133 CSR_WRITE(sc, WMREG_EECD, reg);
5134
5135 /*
5136 * XXX: workaround for a bug in qemu-0.12.x and prior
5137 * and Xen.
5138 *
5139 * We use this workaround only for 82540 because qemu's
5140 * e1000 act as 82540.
5141 */
5142 if (sc->sc_type == WM_T_82540) {
5143 reg |= EECD_SK;
5144 CSR_WRITE(sc, WMREG_EECD, reg);
5145 reg &= ~EECD_SK;
5146 CSR_WRITE(sc, WMREG_EECD, reg);
5147 delay(2);
5148 }
5149 /* XXX: end of workaround */
5150
5151 /* Set CHIP SELECT. */
5152 reg |= EECD_CS;
5153 CSR_WRITE(sc, WMREG_EECD, reg);
5154 delay(2);
5155
5156 /* Shift in the READ command. */
5157 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5158
5159 /* Shift in address. */
5160 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5161
5162 /* Shift out the data. */
5163 wm_eeprom_recvbits(sc, &val, 16);
5164 data[i] = val & 0xffff;
5165
5166 /* Clear CHIP SELECT. */
5167 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5168 CSR_WRITE(sc, WMREG_EECD, reg);
5169 delay(2);
5170 }
5171
5172 return 0;
5173 }
5174
5175 /*
5176 * wm_spi_eeprom_ready:
5177 *
5178 * Wait for a SPI EEPROM to be ready for commands.
5179 */
5180 static int
5181 wm_spi_eeprom_ready(struct wm_softc *sc)
5182 {
5183 uint32_t val;
5184 int usec;
5185
5186 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5187 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5188 wm_eeprom_recvbits(sc, &val, 8);
5189 if ((val & SPI_SR_RDY) == 0)
5190 break;
5191 }
5192 if (usec >= SPI_MAX_RETRIES) {
5193 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5194 return 1;
5195 }
5196 return 0;
5197 }
5198
5199 /*
5200 * wm_read_eeprom_spi:
5201 *
5202 * Read a work from the EEPROM using the SPI protocol.
5203 */
5204 static int
5205 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5206 {
5207 uint32_t reg, val;
5208 int i;
5209 uint8_t opc;
5210
5211 /* Clear SK and CS. */
5212 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5213 CSR_WRITE(sc, WMREG_EECD, reg);
5214 delay(2);
5215
5216 if (wm_spi_eeprom_ready(sc))
5217 return 1;
5218
5219 /* Toggle CS to flush commands. */
5220 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5221 delay(2);
5222 CSR_WRITE(sc, WMREG_EECD, reg);
5223 delay(2);
5224
5225 opc = SPI_OPC_READ;
5226 if (sc->sc_ee_addrbits == 8 && word >= 128)
5227 opc |= SPI_OPC_A8;
5228
5229 wm_eeprom_sendbits(sc, opc, 8);
5230 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5231
5232 for (i = 0; i < wordcnt; i++) {
5233 wm_eeprom_recvbits(sc, &val, 16);
5234 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5235 }
5236
5237 /* Raise CS and clear SK. */
5238 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5239 CSR_WRITE(sc, WMREG_EECD, reg);
5240 delay(2);
5241
5242 return 0;
5243 }
5244
5245 #define EEPROM_CHECKSUM 0xBABA
5246 #define EEPROM_SIZE 0x0040
5247
5248 /*
5249 * wm_validate_eeprom_checksum
5250 *
5251 * The checksum is defined as the sum of the first 64 (16 bit) words.
5252 */
5253 static int
5254 wm_validate_eeprom_checksum(struct wm_softc *sc)
5255 {
5256 uint16_t checksum;
5257 uint16_t eeprom_data;
5258 int i;
5259
5260 checksum = 0;
5261
5262 for (i = 0; i < EEPROM_SIZE; i++) {
5263 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5264 return 1;
5265 checksum += eeprom_data;
5266 }
5267
5268 if (checksum != (uint16_t) EEPROM_CHECKSUM)
5269 return 1;
5270
5271 return 0;
5272 }
5273
5274 /*
5275 * wm_read_eeprom:
5276 *
5277 * Read data from the serial EEPROM.
5278 */
5279 static int
5280 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5281 {
5282 int rv;
5283
5284 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5285 return 1;
5286
5287 if (wm_acquire_eeprom(sc))
5288 return 1;
5289
5290 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5291 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5292 || (sc->sc_type == WM_T_PCH2))
5293 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5294 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5295 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5296 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5297 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5298 else
5299 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5300
5301 wm_release_eeprom(sc);
5302 return rv;
5303 }
5304
5305 static int
5306 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5307 uint16_t *data)
5308 {
5309 int i, eerd = 0;
5310 int error = 0;
5311
5312 for (i = 0; i < wordcnt; i++) {
5313 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5314
5315 CSR_WRITE(sc, WMREG_EERD, eerd);
5316 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5317 if (error != 0)
5318 break;
5319
5320 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5321 }
5322
5323 return error;
5324 }
5325
5326 static int
5327 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5328 {
5329 uint32_t attempts = 100000;
5330 uint32_t i, reg = 0;
5331 int32_t done = -1;
5332
5333 for (i = 0; i < attempts; i++) {
5334 reg = CSR_READ(sc, rw);
5335
5336 if (reg & EERD_DONE) {
5337 done = 0;
5338 break;
5339 }
5340 delay(5);
5341 }
5342
5343 return done;
5344 }
5345
5346 static int
5347 wm_check_alt_mac_addr(struct wm_softc *sc)
5348 {
5349 uint16_t myea[ETHER_ADDR_LEN / 2];
5350 uint16_t offset = EEPROM_OFF_MACADDR;
5351
5352 /* Try to read alternative MAC address pointer */
5353 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5354 return -1;
5355
5356 /* Check pointer */
5357 if (offset == 0xffff)
5358 return -1;
5359
5360 /*
5361 * Check whether alternative MAC address is valid or not.
5362 * Some cards have non 0xffff pointer but those don't use
5363 * alternative MAC address in reality.
5364 *
5365 * Check whether the broadcast bit is set or not.
5366 */
5367 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5368 if (((myea[0] & 0xff) & 0x01) == 0)
5369 return 0; /* found! */
5370
5371 /* not found */
5372 return -1;
5373 }
5374
5375 static int
5376 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5377 {
5378 uint16_t myea[ETHER_ADDR_LEN / 2];
5379 uint16_t offset = EEPROM_OFF_MACADDR;
5380 int do_invert = 0;
5381
5382 switch (sc->sc_type) {
5383 case WM_T_82580:
5384 case WM_T_82580ER:
5385 case WM_T_I350:
5386 switch (sc->sc_funcid) {
5387 case 0:
5388 /* default value (== EEPROM_OFF_MACADDR) */
5389 break;
5390 case 1:
5391 offset = EEPROM_OFF_LAN1;
5392 break;
5393 case 2:
5394 offset = EEPROM_OFF_LAN2;
5395 break;
5396 case 3:
5397 offset = EEPROM_OFF_LAN3;
5398 break;
5399 default:
5400 goto bad;
5401 /* NOTREACHED */
5402 break;
5403 }
5404 break;
5405 case WM_T_82571:
5406 case WM_T_82575:
5407 case WM_T_82576:
5408 case WM_T_80003:
5409 if (wm_check_alt_mac_addr(sc) != 0) {
5410 /* reset the offset to LAN0 */
5411 offset = EEPROM_OFF_MACADDR;
5412 if ((sc->sc_funcid & 0x01) == 1)
5413 do_invert = 1;
5414 goto do_read;
5415 }
5416 switch (sc->sc_funcid) {
5417 case 0:
5418 /*
5419 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5420 * itself.
5421 */
5422 break;
5423 case 1:
5424 offset += EEPROM_OFF_MACADDR_LAN1;
5425 break;
5426 case 2:
5427 offset += EEPROM_OFF_MACADDR_LAN2;
5428 break;
5429 case 3:
5430 offset += EEPROM_OFF_MACADDR_LAN3;
5431 break;
5432 default:
5433 goto bad;
5434 /* NOTREACHED */
5435 break;
5436 }
5437 break;
5438 default:
5439 if ((sc->sc_funcid & 0x01) == 1)
5440 do_invert = 1;
5441 break;
5442 }
5443
5444 do_read:
5445 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5446 myea) != 0) {
5447 goto bad;
5448 }
5449
5450 enaddr[0] = myea[0] & 0xff;
5451 enaddr[1] = myea[0] >> 8;
5452 enaddr[2] = myea[1] & 0xff;
5453 enaddr[3] = myea[1] >> 8;
5454 enaddr[4] = myea[2] & 0xff;
5455 enaddr[5] = myea[2] >> 8;
5456
5457 /*
5458 * Toggle the LSB of the MAC address on the second port
5459 * of some dual port cards.
5460 */
5461 if (do_invert != 0)
5462 enaddr[5] ^= 1;
5463
5464 return 0;
5465
5466 bad:
5467 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5468
5469 return -1;
5470 }
5471
5472 /*
5473 * wm_add_rxbuf:
5474 *
5475 * Add a receive buffer to the indiciated descriptor.
5476 */
5477 static int
5478 wm_add_rxbuf(struct wm_softc *sc, int idx)
5479 {
5480 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5481 struct mbuf *m;
5482 int error;
5483
5484 MGETHDR(m, M_DONTWAIT, MT_DATA);
5485 if (m == NULL)
5486 return ENOBUFS;
5487
5488 MCLGET(m, M_DONTWAIT);
5489 if ((m->m_flags & M_EXT) == 0) {
5490 m_freem(m);
5491 return ENOBUFS;
5492 }
5493
5494 if (rxs->rxs_mbuf != NULL)
5495 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5496
5497 rxs->rxs_mbuf = m;
5498
5499 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5500 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5501 BUS_DMA_READ|BUS_DMA_NOWAIT);
5502 if (error) {
5503 /* XXX XXX XXX */
5504 aprint_error_dev(sc->sc_dev,
5505 "unable to load rx DMA map %d, error = %d\n",
5506 idx, error);
5507 panic("wm_add_rxbuf");
5508 }
5509
5510 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5511 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5512
5513 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5514 if ((sc->sc_rctl & RCTL_EN) != 0)
5515 WM_INIT_RXDESC(sc, idx);
5516 } else
5517 WM_INIT_RXDESC(sc, idx);
5518
5519 return 0;
5520 }
5521
5522 /*
5523 * wm_set_ral:
5524 *
5525 * Set an entery in the receive address list.
5526 */
5527 static void
5528 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5529 {
5530 uint32_t ral_lo, ral_hi;
5531
5532 if (enaddr != NULL) {
5533 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5534 (enaddr[3] << 24);
5535 ral_hi = enaddr[4] | (enaddr[5] << 8);
5536 ral_hi |= RAL_AV;
5537 } else {
5538 ral_lo = 0;
5539 ral_hi = 0;
5540 }
5541
5542 if (sc->sc_type >= WM_T_82544) {
5543 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5544 ral_lo);
5545 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5546 ral_hi);
5547 } else {
5548 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5549 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5550 }
5551 }
5552
5553 /*
5554 * wm_mchash:
5555 *
5556 * Compute the hash of the multicast address for the 4096-bit
5557 * multicast filter.
5558 */
5559 static uint32_t
5560 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5561 {
5562 static const int lo_shift[4] = { 4, 3, 2, 0 };
5563 static const int hi_shift[4] = { 4, 5, 6, 8 };
5564 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5565 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5566 uint32_t hash;
5567
5568 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5569 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5570 || (sc->sc_type == WM_T_PCH2)) {
5571 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5572 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5573 return (hash & 0x3ff);
5574 }
5575 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5576 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5577
5578 return (hash & 0xfff);
5579 }
5580
5581 /*
5582 * wm_set_filter:
5583 *
5584 * Set up the receive filter.
5585 */
5586 static void
5587 wm_set_filter(struct wm_softc *sc)
5588 {
5589 struct ethercom *ec = &sc->sc_ethercom;
5590 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5591 struct ether_multi *enm;
5592 struct ether_multistep step;
5593 bus_addr_t mta_reg;
5594 uint32_t hash, reg, bit;
5595 int i, size;
5596
5597 if (sc->sc_type >= WM_T_82544)
5598 mta_reg = WMREG_CORDOVA_MTA;
5599 else
5600 mta_reg = WMREG_MTA;
5601
5602 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5603
5604 if (ifp->if_flags & IFF_BROADCAST)
5605 sc->sc_rctl |= RCTL_BAM;
5606 if (ifp->if_flags & IFF_PROMISC) {
5607 sc->sc_rctl |= RCTL_UPE;
5608 goto allmulti;
5609 }
5610
5611 /*
5612 * Set the station address in the first RAL slot, and
5613 * clear the remaining slots.
5614 */
5615 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5616 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5617 || (sc->sc_type == WM_T_PCH2))
5618 size = WM_ICH8_RAL_TABSIZE;
5619 else
5620 size = WM_RAL_TABSIZE;
5621 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5622 for (i = 1; i < size; i++)
5623 wm_set_ral(sc, NULL, i);
5624
5625 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5626 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5627 || (sc->sc_type == WM_T_PCH2))
5628 size = WM_ICH8_MC_TABSIZE;
5629 else
5630 size = WM_MC_TABSIZE;
5631 /* Clear out the multicast table. */
5632 for (i = 0; i < size; i++)
5633 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5634
5635 ETHER_FIRST_MULTI(step, ec, enm);
5636 while (enm != NULL) {
5637 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5638 /*
5639 * We must listen to a range of multicast addresses.
5640 * For now, just accept all multicasts, rather than
5641 * trying to set only those filter bits needed to match
5642 * the range. (At this time, the only use of address
5643 * ranges is for IP multicast routing, for which the
5644 * range is big enough to require all bits set.)
5645 */
5646 goto allmulti;
5647 }
5648
5649 hash = wm_mchash(sc, enm->enm_addrlo);
5650
5651 reg = (hash >> 5);
5652 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5653 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5654 || (sc->sc_type == WM_T_PCH2))
5655 reg &= 0x1f;
5656 else
5657 reg &= 0x7f;
5658 bit = hash & 0x1f;
5659
5660 hash = CSR_READ(sc, mta_reg + (reg << 2));
5661 hash |= 1U << bit;
5662
5663 /* XXX Hardware bug?? */
5664 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5665 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5666 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5667 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5668 } else
5669 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5670
5671 ETHER_NEXT_MULTI(step, enm);
5672 }
5673
5674 ifp->if_flags &= ~IFF_ALLMULTI;
5675 goto setit;
5676
5677 allmulti:
5678 ifp->if_flags |= IFF_ALLMULTI;
5679 sc->sc_rctl |= RCTL_MPE;
5680
5681 setit:
5682 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5683 }
5684
5685 /*
5686 * wm_tbi_mediainit:
5687 *
5688 * Initialize media for use on 1000BASE-X devices.
5689 */
5690 static void
5691 wm_tbi_mediainit(struct wm_softc *sc)
5692 {
5693 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5694 const char *sep = "";
5695
5696 if (sc->sc_type < WM_T_82543)
5697 sc->sc_tipg = TIPG_WM_DFLT;
5698 else
5699 sc->sc_tipg = TIPG_LG_DFLT;
5700
5701 sc->sc_tbi_anegticks = 5;
5702
5703 /* Initialize our media structures */
5704 sc->sc_mii.mii_ifp = ifp;
5705
5706 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5707 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5708 wm_tbi_mediastatus);
5709
5710 /*
5711 * SWD Pins:
5712 *
5713 * 0 = Link LED (output)
5714 * 1 = Loss Of Signal (input)
5715 */
5716 sc->sc_ctrl |= CTRL_SWDPIO(0);
5717 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5718
5719 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5720
5721 #define ADD(ss, mm, dd) \
5722 do { \
5723 aprint_normal("%s%s", sep, ss); \
5724 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5725 sep = ", "; \
5726 } while (/*CONSTCOND*/0)
5727
5728 aprint_normal_dev(sc->sc_dev, "");
5729 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5730 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5731 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5732 aprint_normal("\n");
5733
5734 #undef ADD
5735
5736 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5737 }
5738
5739 /*
5740 * wm_tbi_mediastatus: [ifmedia interface function]
5741 *
5742 * Get the current interface media status on a 1000BASE-X device.
5743 */
5744 static void
5745 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5746 {
5747 struct wm_softc *sc = ifp->if_softc;
5748 uint32_t ctrl, status;
5749
5750 ifmr->ifm_status = IFM_AVALID;
5751 ifmr->ifm_active = IFM_ETHER;
5752
5753 status = CSR_READ(sc, WMREG_STATUS);
5754 if ((status & STATUS_LU) == 0) {
5755 ifmr->ifm_active |= IFM_NONE;
5756 return;
5757 }
5758
5759 ifmr->ifm_status |= IFM_ACTIVE;
5760 ifmr->ifm_active |= IFM_1000_SX;
5761 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5762 ifmr->ifm_active |= IFM_FDX;
5763 ctrl = CSR_READ(sc, WMREG_CTRL);
5764 if (ctrl & CTRL_RFCE)
5765 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5766 if (ctrl & CTRL_TFCE)
5767 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5768 }
5769
5770 /*
5771 * wm_tbi_mediachange: [ifmedia interface function]
5772 *
5773 * Set hardware to newly-selected media on a 1000BASE-X device.
5774 */
5775 static int
5776 wm_tbi_mediachange(struct ifnet *ifp)
5777 {
5778 struct wm_softc *sc = ifp->if_softc;
5779 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5780 uint32_t status;
5781 int i;
5782
5783 sc->sc_txcw = 0;
5784 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5785 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5786 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5787 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5788 sc->sc_txcw |= TXCW_ANE;
5789 } else {
5790 /*
5791 * If autonegotiation is turned off, force link up and turn on
5792 * full duplex
5793 */
5794 sc->sc_txcw &= ~TXCW_ANE;
5795 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5796 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5797 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5798 delay(1000);
5799 }
5800
5801 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5802 device_xname(sc->sc_dev),sc->sc_txcw));
5803 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5804 delay(10000);
5805
5806 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5807 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5808
5809 /*
5810 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5811 * optics detect a signal, 0 if they don't.
5812 */
5813 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5814 /* Have signal; wait for the link to come up. */
5815
5816 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5817 /*
5818 * Reset the link, and let autonegotiation do its thing
5819 */
5820 sc->sc_ctrl |= CTRL_LRST;
5821 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5822 delay(1000);
5823 sc->sc_ctrl &= ~CTRL_LRST;
5824 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5825 delay(1000);
5826 }
5827
5828 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5829 delay(10000);
5830 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5831 break;
5832 }
5833
5834 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5835 device_xname(sc->sc_dev),i));
5836
5837 status = CSR_READ(sc, WMREG_STATUS);
5838 DPRINTF(WM_DEBUG_LINK,
5839 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5840 device_xname(sc->sc_dev),status, STATUS_LU));
5841 if (status & STATUS_LU) {
5842 /* Link is up. */
5843 DPRINTF(WM_DEBUG_LINK,
5844 ("%s: LINK: set media -> link up %s\n",
5845 device_xname(sc->sc_dev),
5846 (status & STATUS_FD) ? "FDX" : "HDX"));
5847
5848 /*
5849 * NOTE: CTRL will update TFCE and RFCE automatically,
5850 * so we should update sc->sc_ctrl
5851 */
5852 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5853 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5854 sc->sc_fcrtl &= ~FCRTL_XONE;
5855 if (status & STATUS_FD)
5856 sc->sc_tctl |=
5857 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5858 else
5859 sc->sc_tctl |=
5860 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5861 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5862 sc->sc_fcrtl |= FCRTL_XONE;
5863 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5864 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5865 WMREG_OLD_FCRTL : WMREG_FCRTL,
5866 sc->sc_fcrtl);
5867 sc->sc_tbi_linkup = 1;
5868 } else {
5869 if (i == WM_LINKUP_TIMEOUT)
5870 wm_check_for_link(sc);
5871 /* Link is down. */
5872 DPRINTF(WM_DEBUG_LINK,
5873 ("%s: LINK: set media -> link down\n",
5874 device_xname(sc->sc_dev)));
5875 sc->sc_tbi_linkup = 0;
5876 }
5877 } else {
5878 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5879 device_xname(sc->sc_dev)));
5880 sc->sc_tbi_linkup = 0;
5881 }
5882
5883 wm_tbi_set_linkled(sc);
5884
5885 return 0;
5886 }
5887
5888 /*
5889 * wm_tbi_set_linkled:
5890 *
5891 * Update the link LED on 1000BASE-X devices.
5892 */
5893 static void
5894 wm_tbi_set_linkled(struct wm_softc *sc)
5895 {
5896
5897 if (sc->sc_tbi_linkup)
5898 sc->sc_ctrl |= CTRL_SWDPIN(0);
5899 else
5900 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5901
5902 /* 82540 or newer devices are active low */
5903 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5904
5905 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5906 }
5907
5908 /*
5909 * wm_tbi_check_link:
5910 *
5911 * Check the link on 1000BASE-X devices.
5912 */
5913 static void
5914 wm_tbi_check_link(struct wm_softc *sc)
5915 {
5916 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5917 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5918 uint32_t rxcw, ctrl, status;
5919
5920 status = CSR_READ(sc, WMREG_STATUS);
5921
5922 rxcw = CSR_READ(sc, WMREG_RXCW);
5923 ctrl = CSR_READ(sc, WMREG_CTRL);
5924
5925 /* set link status */
5926 if ((status & STATUS_LU) == 0) {
5927 DPRINTF(WM_DEBUG_LINK,
5928 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5929 sc->sc_tbi_linkup = 0;
5930 } else if (sc->sc_tbi_linkup == 0) {
5931 DPRINTF(WM_DEBUG_LINK,
5932 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5933 (status & STATUS_FD) ? "FDX" : "HDX"));
5934 sc->sc_tbi_linkup = 1;
5935 }
5936
5937 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5938 && ((status & STATUS_LU) == 0)) {
5939 sc->sc_tbi_linkup = 0;
5940 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5941 /* RXCFG storm! */
5942 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5943 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5944 wm_init(ifp);
5945 ifp->if_start(ifp);
5946 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5947 /* If the timer expired, retry autonegotiation */
5948 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5949 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5950 sc->sc_tbi_ticks = 0;
5951 /*
5952 * Reset the link, and let autonegotiation do
5953 * its thing
5954 */
5955 sc->sc_ctrl |= CTRL_LRST;
5956 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5957 delay(1000);
5958 sc->sc_ctrl &= ~CTRL_LRST;
5959 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5960 delay(1000);
5961 CSR_WRITE(sc, WMREG_TXCW,
5962 sc->sc_txcw & ~TXCW_ANE);
5963 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5964 }
5965 }
5966 }
5967
5968 wm_tbi_set_linkled(sc);
5969 }
5970
5971 /*
5972 * wm_gmii_reset:
5973 *
5974 * Reset the PHY.
5975 */
5976 static void
5977 wm_gmii_reset(struct wm_softc *sc)
5978 {
5979 uint32_t reg;
5980 int rv;
5981
5982 /* get phy semaphore */
5983 switch (sc->sc_type) {
5984 case WM_T_82571:
5985 case WM_T_82572:
5986 case WM_T_82573:
5987 case WM_T_82574:
5988 case WM_T_82583:
5989 /* XXX should get sw semaphore, too */
5990 rv = wm_get_swsm_semaphore(sc);
5991 break;
5992 case WM_T_82575:
5993 case WM_T_82576:
5994 case WM_T_82580:
5995 case WM_T_82580ER:
5996 case WM_T_I350:
5997 case WM_T_80003:
5998 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5999 break;
6000 case WM_T_ICH8:
6001 case WM_T_ICH9:
6002 case WM_T_ICH10:
6003 case WM_T_PCH:
6004 case WM_T_PCH2:
6005 rv = wm_get_swfwhw_semaphore(sc);
6006 break;
6007 default:
6008 /* nothing to do*/
6009 rv = 0;
6010 break;
6011 }
6012 if (rv != 0) {
6013 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6014 __func__);
6015 return;
6016 }
6017
6018 switch (sc->sc_type) {
6019 case WM_T_82542_2_0:
6020 case WM_T_82542_2_1:
6021 /* null */
6022 break;
6023 case WM_T_82543:
6024 /*
6025 * With 82543, we need to force speed and duplex on the MAC
6026 * equal to what the PHY speed and duplex configuration is.
6027 * In addition, we need to perform a hardware reset on the PHY
6028 * to take it out of reset.
6029 */
6030 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6031 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6032
6033 /* The PHY reset pin is active-low. */
6034 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6035 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6036 CTRL_EXT_SWDPIN(4));
6037 reg |= CTRL_EXT_SWDPIO(4);
6038
6039 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6040 delay(10*1000);
6041
6042 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6043 delay(150);
6044 #if 0
6045 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6046 #endif
6047 delay(20*1000); /* XXX extra delay to get PHY ID? */
6048 break;
6049 case WM_T_82544: /* reset 10000us */
6050 case WM_T_82540:
6051 case WM_T_82545:
6052 case WM_T_82545_3:
6053 case WM_T_82546:
6054 case WM_T_82546_3:
6055 case WM_T_82541:
6056 case WM_T_82541_2:
6057 case WM_T_82547:
6058 case WM_T_82547_2:
6059 case WM_T_82571: /* reset 100us */
6060 case WM_T_82572:
6061 case WM_T_82573:
6062 case WM_T_82574:
6063 case WM_T_82575:
6064 case WM_T_82576:
6065 case WM_T_82580:
6066 case WM_T_82580ER:
6067 case WM_T_I350:
6068 case WM_T_82583:
6069 case WM_T_80003:
6070 /* generic reset */
6071 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6072 delay(20000);
6073 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6074 delay(20000);
6075
6076 if ((sc->sc_type == WM_T_82541)
6077 || (sc->sc_type == WM_T_82541_2)
6078 || (sc->sc_type == WM_T_82547)
6079 || (sc->sc_type == WM_T_82547_2)) {
6080 /* workaround for igp are done in igp_reset() */
6081 /* XXX add code to set LED after phy reset */
6082 }
6083 break;
6084 case WM_T_ICH8:
6085 case WM_T_ICH9:
6086 case WM_T_ICH10:
6087 case WM_T_PCH:
6088 case WM_T_PCH2:
6089 /* generic reset */
6090 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6091 delay(100);
6092 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6093 delay(150);
6094 break;
6095 default:
6096 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6097 __func__);
6098 break;
6099 }
6100
6101 /* release PHY semaphore */
6102 switch (sc->sc_type) {
6103 case WM_T_82571:
6104 case WM_T_82572:
6105 case WM_T_82573:
6106 case WM_T_82574:
6107 case WM_T_82583:
6108 /* XXX should put sw semaphore, too */
6109 wm_put_swsm_semaphore(sc);
6110 break;
6111 case WM_T_82575:
6112 case WM_T_82576:
6113 case WM_T_82580:
6114 case WM_T_82580ER:
6115 case WM_T_I350:
6116 case WM_T_80003:
6117 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6118 break;
6119 case WM_T_ICH8:
6120 case WM_T_ICH9:
6121 case WM_T_ICH10:
6122 case WM_T_PCH:
6123 case WM_T_PCH2:
6124 wm_put_swfwhw_semaphore(sc);
6125 break;
6126 default:
6127 /* nothing to do*/
6128 rv = 0;
6129 break;
6130 }
6131
6132 /* get_cfg_done */
6133 wm_get_cfg_done(sc);
6134
6135 /* extra setup */
6136 switch (sc->sc_type) {
6137 case WM_T_82542_2_0:
6138 case WM_T_82542_2_1:
6139 case WM_T_82543:
6140 case WM_T_82544:
6141 case WM_T_82540:
6142 case WM_T_82545:
6143 case WM_T_82545_3:
6144 case WM_T_82546:
6145 case WM_T_82546_3:
6146 case WM_T_82541_2:
6147 case WM_T_82547_2:
6148 case WM_T_82571:
6149 case WM_T_82572:
6150 case WM_T_82573:
6151 case WM_T_82574:
6152 case WM_T_82575:
6153 case WM_T_82576:
6154 case WM_T_82580:
6155 case WM_T_82580ER:
6156 case WM_T_I350:
6157 case WM_T_82583:
6158 case WM_T_80003:
6159 /* null */
6160 break;
6161 case WM_T_82541:
6162 case WM_T_82547:
6163 /* XXX Configure actively LED after PHY reset */
6164 break;
6165 case WM_T_ICH8:
6166 case WM_T_ICH9:
6167 case WM_T_ICH10:
6168 case WM_T_PCH:
6169 case WM_T_PCH2:
6170 /* Allow time for h/w to get to a quiescent state afer reset */
6171 delay(10*1000);
6172
6173 if (sc->sc_type == WM_T_PCH)
6174 wm_hv_phy_workaround_ich8lan(sc);
6175
6176 if (sc->sc_type == WM_T_PCH2)
6177 wm_lv_phy_workaround_ich8lan(sc);
6178
6179 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6180 /*
6181 * dummy read to clear the phy wakeup bit after lcd
6182 * reset
6183 */
6184 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6185 }
6186
6187 /*
6188 * XXX Configure the LCD with th extended configuration region
6189 * in NVM
6190 */
6191
6192 /* Configure the LCD with the OEM bits in NVM */
6193 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6194 /*
6195 * Disable LPLU.
6196 * XXX It seems that 82567 has LPLU, too.
6197 */
6198 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6199 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6200 reg |= HV_OEM_BITS_ANEGNOW;
6201 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6202 }
6203 break;
6204 default:
6205 panic("%s: unknown type\n", __func__);
6206 break;
6207 }
6208 }
6209
6210 /*
6211 * wm_gmii_mediainit:
6212 *
6213 * Initialize media for use on 1000BASE-T devices.
6214 */
6215 static void
6216 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6217 {
6218 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6219
6220 /* We have MII. */
6221 sc->sc_flags |= WM_F_HAS_MII;
6222
6223 if (sc->sc_type == WM_T_80003)
6224 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6225 else
6226 sc->sc_tipg = TIPG_1000T_DFLT;
6227
6228 /*
6229 * Let the chip set speed/duplex on its own based on
6230 * signals from the PHY.
6231 * XXXbouyer - I'm not sure this is right for the 80003,
6232 * the em driver only sets CTRL_SLU here - but it seems to work.
6233 */
6234 sc->sc_ctrl |= CTRL_SLU;
6235 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6236
6237 /* Initialize our media structures and probe the GMII. */
6238 sc->sc_mii.mii_ifp = ifp;
6239
6240 switch (prodid) {
6241 case PCI_PRODUCT_INTEL_PCH_M_LM:
6242 case PCI_PRODUCT_INTEL_PCH_M_LC:
6243 /* 82577 */
6244 sc->sc_phytype = WMPHY_82577;
6245 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6246 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6247 break;
6248 case PCI_PRODUCT_INTEL_PCH_D_DM:
6249 case PCI_PRODUCT_INTEL_PCH_D_DC:
6250 /* 82578 */
6251 sc->sc_phytype = WMPHY_82578;
6252 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6253 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6254 break;
6255 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6256 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6257 /* 82578 */
6258 sc->sc_phytype = WMPHY_82579;
6259 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6260 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6261 break;
6262 case PCI_PRODUCT_INTEL_82801I_BM:
6263 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6264 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6265 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6266 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6267 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6268 /* 82567 */
6269 sc->sc_phytype = WMPHY_BM;
6270 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6271 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6272 break;
6273 default:
6274 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6275 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
6276 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
6277 } else if (sc->sc_type >= WM_T_80003) {
6278 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
6279 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
6280 } else if (sc->sc_type >= WM_T_82544) {
6281 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
6282 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
6283 } else {
6284 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
6285 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
6286 }
6287 break;
6288 }
6289 sc->sc_mii.mii_statchg = wm_gmii_statchg;
6290
6291 wm_gmii_reset(sc);
6292
6293 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6294 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
6295 wm_gmii_mediastatus);
6296
6297 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6298 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6299 || (sc->sc_type == WM_T_I350)) {
6300 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6301 /* Attach only one port */
6302 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6303 MII_OFFSET_ANY, MIIF_DOPAUSE);
6304 } else {
6305 int i;
6306 uint32_t ctrl_ext;
6307
6308 /* Power on sgmii phy if it is disabled */
6309 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6310 CSR_WRITE(sc, WMREG_CTRL_EXT,
6311 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6312 CSR_WRITE_FLUSH(sc);
6313 delay(300*1000); /* XXX too long */
6314
6315 /* from 1 to 8 */
6316 for (i = 1; i < 8; i++)
6317 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6318 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6319
6320 /* restore previous sfp cage power state */
6321 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6322 }
6323 } else {
6324 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6325 MII_OFFSET_ANY, MIIF_DOPAUSE);
6326 }
6327
6328 if ((sc->sc_type == WM_T_PCH2) &&
6329 (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
6330 wm_set_mdio_slow_mode_hv(sc);
6331 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6332 MII_OFFSET_ANY, MIIF_DOPAUSE);
6333 }
6334
6335 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6336 /* if failed, retry with *_bm_* */
6337 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6338 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6339
6340 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6341 MII_OFFSET_ANY, MIIF_DOPAUSE);
6342 }
6343 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6344 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6345 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
6346 sc->sc_phytype = WMPHY_NONE;
6347 } else {
6348 /* Check PHY type */
6349 uint32_t model;
6350 struct mii_softc *child;
6351
6352 child = LIST_FIRST(&sc->sc_mii.mii_phys);
6353 if (device_is_a(child->mii_dev, "igphy")) {
6354 struct igphy_softc *isc = (struct igphy_softc *)child;
6355
6356 model = isc->sc_mii.mii_mpd_model;
6357 if (model == MII_MODEL_yyINTEL_I82566)
6358 sc->sc_phytype = WMPHY_IGP_3;
6359 }
6360
6361 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
6362 }
6363 }
6364
6365 /*
6366 * wm_gmii_mediastatus: [ifmedia interface function]
6367 *
6368 * Get the current interface media status on a 1000BASE-T device.
6369 */
6370 static void
6371 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6372 {
6373 struct wm_softc *sc = ifp->if_softc;
6374
6375 ether_mediastatus(ifp, ifmr);
6376 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6377 | sc->sc_flowflags;
6378 }
6379
6380 /*
6381 * wm_gmii_mediachange: [ifmedia interface function]
6382 *
6383 * Set hardware to newly-selected media on a 1000BASE-T device.
6384 */
6385 static int
6386 wm_gmii_mediachange(struct ifnet *ifp)
6387 {
6388 struct wm_softc *sc = ifp->if_softc;
6389 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6390 int rc;
6391
6392 if ((ifp->if_flags & IFF_UP) == 0)
6393 return 0;
6394
6395 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6396 sc->sc_ctrl |= CTRL_SLU;
6397 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6398 || (sc->sc_type > WM_T_82543)) {
6399 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6400 } else {
6401 sc->sc_ctrl &= ~CTRL_ASDE;
6402 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6403 if (ife->ifm_media & IFM_FDX)
6404 sc->sc_ctrl |= CTRL_FD;
6405 switch (IFM_SUBTYPE(ife->ifm_media)) {
6406 case IFM_10_T:
6407 sc->sc_ctrl |= CTRL_SPEED_10;
6408 break;
6409 case IFM_100_TX:
6410 sc->sc_ctrl |= CTRL_SPEED_100;
6411 break;
6412 case IFM_1000_T:
6413 sc->sc_ctrl |= CTRL_SPEED_1000;
6414 break;
6415 default:
6416 panic("wm_gmii_mediachange: bad media 0x%x",
6417 ife->ifm_media);
6418 }
6419 }
6420 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6421 if (sc->sc_type <= WM_T_82543)
6422 wm_gmii_reset(sc);
6423
6424 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6425 return 0;
6426 return rc;
6427 }
6428
6429 #define MDI_IO CTRL_SWDPIN(2)
6430 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6431 #define MDI_CLK CTRL_SWDPIN(3)
6432
6433 static void
6434 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6435 {
6436 uint32_t i, v;
6437
6438 v = CSR_READ(sc, WMREG_CTRL);
6439 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6440 v |= MDI_DIR | CTRL_SWDPIO(3);
6441
6442 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6443 if (data & i)
6444 v |= MDI_IO;
6445 else
6446 v &= ~MDI_IO;
6447 CSR_WRITE(sc, WMREG_CTRL, v);
6448 delay(10);
6449 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6450 delay(10);
6451 CSR_WRITE(sc, WMREG_CTRL, v);
6452 delay(10);
6453 }
6454 }
6455
6456 static uint32_t
6457 i82543_mii_recvbits(struct wm_softc *sc)
6458 {
6459 uint32_t v, i, data = 0;
6460
6461 v = CSR_READ(sc, WMREG_CTRL);
6462 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6463 v |= CTRL_SWDPIO(3);
6464
6465 CSR_WRITE(sc, WMREG_CTRL, v);
6466 delay(10);
6467 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6468 delay(10);
6469 CSR_WRITE(sc, WMREG_CTRL, v);
6470 delay(10);
6471
6472 for (i = 0; i < 16; i++) {
6473 data <<= 1;
6474 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6475 delay(10);
6476 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6477 data |= 1;
6478 CSR_WRITE(sc, WMREG_CTRL, v);
6479 delay(10);
6480 }
6481
6482 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6483 delay(10);
6484 CSR_WRITE(sc, WMREG_CTRL, v);
6485 delay(10);
6486
6487 return data;
6488 }
6489
6490 #undef MDI_IO
6491 #undef MDI_DIR
6492 #undef MDI_CLK
6493
6494 /*
6495 * wm_gmii_i82543_readreg: [mii interface function]
6496 *
6497 * Read a PHY register on the GMII (i82543 version).
6498 */
6499 static int
6500 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6501 {
6502 struct wm_softc *sc = device_private(self);
6503 int rv;
6504
6505 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6506 i82543_mii_sendbits(sc, reg | (phy << 5) |
6507 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6508 rv = i82543_mii_recvbits(sc) & 0xffff;
6509
6510 DPRINTF(WM_DEBUG_GMII,
6511 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6512 device_xname(sc->sc_dev), phy, reg, rv));
6513
6514 return rv;
6515 }
6516
6517 /*
6518 * wm_gmii_i82543_writereg: [mii interface function]
6519 *
6520 * Write a PHY register on the GMII (i82543 version).
6521 */
6522 static void
6523 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6524 {
6525 struct wm_softc *sc = device_private(self);
6526
6527 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6528 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6529 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6530 (MII_COMMAND_START << 30), 32);
6531 }
6532
6533 /*
6534 * wm_gmii_i82544_readreg: [mii interface function]
6535 *
6536 * Read a PHY register on the GMII.
6537 */
6538 static int
6539 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6540 {
6541 struct wm_softc *sc = device_private(self);
6542 uint32_t mdic = 0;
6543 int i, rv;
6544
6545 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6546 MDIC_REGADD(reg));
6547
6548 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6549 mdic = CSR_READ(sc, WMREG_MDIC);
6550 if (mdic & MDIC_READY)
6551 break;
6552 delay(50);
6553 }
6554
6555 if ((mdic & MDIC_READY) == 0) {
6556 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6557 device_xname(sc->sc_dev), phy, reg);
6558 rv = 0;
6559 } else if (mdic & MDIC_E) {
6560 #if 0 /* This is normal if no PHY is present. */
6561 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6562 device_xname(sc->sc_dev), phy, reg);
6563 #endif
6564 rv = 0;
6565 } else {
6566 rv = MDIC_DATA(mdic);
6567 if (rv == 0xffff)
6568 rv = 0;
6569 }
6570
6571 return rv;
6572 }
6573
6574 /*
6575 * wm_gmii_i82544_writereg: [mii interface function]
6576 *
6577 * Write a PHY register on the GMII.
6578 */
6579 static void
6580 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6581 {
6582 struct wm_softc *sc = device_private(self);
6583 uint32_t mdic = 0;
6584 int i;
6585
6586 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6587 MDIC_REGADD(reg) | MDIC_DATA(val));
6588
6589 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6590 mdic = CSR_READ(sc, WMREG_MDIC);
6591 if (mdic & MDIC_READY)
6592 break;
6593 delay(50);
6594 }
6595
6596 if ((mdic & MDIC_READY) == 0)
6597 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6598 device_xname(sc->sc_dev), phy, reg);
6599 else if (mdic & MDIC_E)
6600 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6601 device_xname(sc->sc_dev), phy, reg);
6602 }
6603
6604 /*
6605 * wm_gmii_i80003_readreg: [mii interface function]
6606 *
6607 * Read a PHY register on the kumeran
6608 * This could be handled by the PHY layer if we didn't have to lock the
6609 * ressource ...
6610 */
6611 static int
6612 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6613 {
6614 struct wm_softc *sc = device_private(self);
6615 int sem;
6616 int rv;
6617
6618 if (phy != 1) /* only one PHY on kumeran bus */
6619 return 0;
6620
6621 sem = swfwphysem[sc->sc_funcid];
6622 if (wm_get_swfw_semaphore(sc, sem)) {
6623 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6624 __func__);
6625 return 0;
6626 }
6627
6628 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6629 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6630 reg >> GG82563_PAGE_SHIFT);
6631 } else {
6632 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6633 reg >> GG82563_PAGE_SHIFT);
6634 }
6635 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6636 delay(200);
6637 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6638 delay(200);
6639
6640 wm_put_swfw_semaphore(sc, sem);
6641 return rv;
6642 }
6643
6644 /*
6645 * wm_gmii_i80003_writereg: [mii interface function]
6646 *
6647 * Write a PHY register on the kumeran.
6648 * This could be handled by the PHY layer if we didn't have to lock the
6649 * ressource ...
6650 */
6651 static void
6652 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6653 {
6654 struct wm_softc *sc = device_private(self);
6655 int sem;
6656
6657 if (phy != 1) /* only one PHY on kumeran bus */
6658 return;
6659
6660 sem = swfwphysem[sc->sc_funcid];
6661 if (wm_get_swfw_semaphore(sc, sem)) {
6662 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6663 __func__);
6664 return;
6665 }
6666
6667 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6668 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6669 reg >> GG82563_PAGE_SHIFT);
6670 } else {
6671 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6672 reg >> GG82563_PAGE_SHIFT);
6673 }
6674 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6675 delay(200);
6676 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6677 delay(200);
6678
6679 wm_put_swfw_semaphore(sc, sem);
6680 }
6681
6682 /*
6683 * wm_gmii_bm_readreg: [mii interface function]
6684 *
6685 * Read a PHY register on the kumeran
6686 * This could be handled by the PHY layer if we didn't have to lock the
6687 * ressource ...
6688 */
6689 static int
6690 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6691 {
6692 struct wm_softc *sc = device_private(self);
6693 int sem;
6694 int rv;
6695
6696 sem = swfwphysem[sc->sc_funcid];
6697 if (wm_get_swfw_semaphore(sc, sem)) {
6698 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6699 __func__);
6700 return 0;
6701 }
6702
6703 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6704 if (phy == 1)
6705 wm_gmii_i82544_writereg(self, phy, 0x1f,
6706 reg);
6707 else
6708 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6709 reg >> GG82563_PAGE_SHIFT);
6710
6711 }
6712
6713 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6714 wm_put_swfw_semaphore(sc, sem);
6715 return rv;
6716 }
6717
6718 /*
6719 * wm_gmii_bm_writereg: [mii interface function]
6720 *
6721 * Write a PHY register on the kumeran.
6722 * This could be handled by the PHY layer if we didn't have to lock the
6723 * ressource ...
6724 */
6725 static void
6726 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6727 {
6728 struct wm_softc *sc = device_private(self);
6729 int sem;
6730
6731 sem = swfwphysem[sc->sc_funcid];
6732 if (wm_get_swfw_semaphore(sc, sem)) {
6733 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6734 __func__);
6735 return;
6736 }
6737
6738 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6739 if (phy == 1)
6740 wm_gmii_i82544_writereg(self, phy, 0x1f,
6741 reg);
6742 else
6743 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6744 reg >> GG82563_PAGE_SHIFT);
6745
6746 }
6747
6748 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6749 wm_put_swfw_semaphore(sc, sem);
6750 }
6751
6752 static void
6753 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6754 {
6755 struct wm_softc *sc = device_private(self);
6756 uint16_t regnum = BM_PHY_REG_NUM(offset);
6757 uint16_t wuce;
6758
6759 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6760 if (sc->sc_type == WM_T_PCH) {
6761 /* XXX e1000 driver do nothing... why? */
6762 }
6763
6764 /* Set page 769 */
6765 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6766 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6767
6768 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6769
6770 wuce &= ~BM_WUC_HOST_WU_BIT;
6771 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6772 wuce | BM_WUC_ENABLE_BIT);
6773
6774 /* Select page 800 */
6775 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6776 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6777
6778 /* Write page 800 */
6779 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6780
6781 if (rd)
6782 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6783 else
6784 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6785
6786 /* Set page 769 */
6787 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6788 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6789
6790 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6791 }
6792
6793 /*
6794 * wm_gmii_hv_readreg: [mii interface function]
6795 *
6796 * Read a PHY register on the kumeran
6797 * This could be handled by the PHY layer if we didn't have to lock the
6798 * ressource ...
6799 */
6800 static int
6801 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6802 {
6803 struct wm_softc *sc = device_private(self);
6804 uint16_t page = BM_PHY_REG_PAGE(reg);
6805 uint16_t regnum = BM_PHY_REG_NUM(reg);
6806 uint16_t val;
6807 int rv;
6808
6809 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6810 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6811 __func__);
6812 return 0;
6813 }
6814
6815 /* XXX Workaround failure in MDIO access while cable is disconnected */
6816 if (sc->sc_phytype == WMPHY_82577) {
6817 /* XXX must write */
6818 }
6819
6820 /* Page 800 works differently than the rest so it has its own func */
6821 if (page == BM_WUC_PAGE) {
6822 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6823 return val;
6824 }
6825
6826 /*
6827 * Lower than page 768 works differently than the rest so it has its
6828 * own func
6829 */
6830 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6831 printf("gmii_hv_readreg!!!\n");
6832 return 0;
6833 }
6834
6835 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6836 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6837 page << BME1000_PAGE_SHIFT);
6838 }
6839
6840 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6841 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6842 return rv;
6843 }
6844
6845 /*
6846 * wm_gmii_hv_writereg: [mii interface function]
6847 *
6848 * Write a PHY register on the kumeran.
6849 * This could be handled by the PHY layer if we didn't have to lock the
6850 * ressource ...
6851 */
6852 static void
6853 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6854 {
6855 struct wm_softc *sc = device_private(self);
6856 uint16_t page = BM_PHY_REG_PAGE(reg);
6857 uint16_t regnum = BM_PHY_REG_NUM(reg);
6858
6859 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6860 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6861 __func__);
6862 return;
6863 }
6864
6865 /* XXX Workaround failure in MDIO access while cable is disconnected */
6866
6867 /* Page 800 works differently than the rest so it has its own func */
6868 if (page == BM_WUC_PAGE) {
6869 uint16_t tmp;
6870
6871 tmp = val;
6872 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6873 return;
6874 }
6875
6876 /*
6877 * Lower than page 768 works differently than the rest so it has its
6878 * own func
6879 */
6880 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6881 printf("gmii_hv_writereg!!!\n");
6882 return;
6883 }
6884
6885 /*
6886 * XXX Workaround MDIO accesses being disabled after entering IEEE
6887 * Power Down (whenever bit 11 of the PHY control register is set)
6888 */
6889
6890 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6891 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6892 page << BME1000_PAGE_SHIFT);
6893 }
6894
6895 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6896 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6897 }
6898
6899 /*
6900 * wm_gmii_hv_readreg: [mii interface function]
6901 *
6902 * Read a PHY register on the kumeran
6903 * This could be handled by the PHY layer if we didn't have to lock the
6904 * ressource ...
6905 */
6906 static int
6907 wm_sgmii_readreg(device_t self, int phy, int reg)
6908 {
6909 struct wm_softc *sc = device_private(self);
6910 uint32_t i2ccmd;
6911 int i, rv;
6912
6913 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6914 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6915 __func__);
6916 return 0;
6917 }
6918
6919 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6920 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6921 | I2CCMD_OPCODE_READ;
6922 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6923
6924 /* Poll the ready bit */
6925 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6926 delay(50);
6927 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6928 if (i2ccmd & I2CCMD_READY)
6929 break;
6930 }
6931 if ((i2ccmd & I2CCMD_READY) == 0)
6932 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6933 if ((i2ccmd & I2CCMD_ERROR) != 0)
6934 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6935
6936 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6937
6938 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6939 return rv;
6940 }
6941
6942 /*
6943 * wm_gmii_hv_writereg: [mii interface function]
6944 *
6945 * Write a PHY register on the kumeran.
6946 * This could be handled by the PHY layer if we didn't have to lock the
6947 * ressource ...
6948 */
6949 static void
6950 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6951 {
6952 struct wm_softc *sc = device_private(self);
6953 uint32_t i2ccmd;
6954 int i;
6955
6956 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6957 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6958 __func__);
6959 return;
6960 }
6961
6962 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6963 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6964 | I2CCMD_OPCODE_WRITE;
6965 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6966
6967 /* Poll the ready bit */
6968 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6969 delay(50);
6970 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6971 if (i2ccmd & I2CCMD_READY)
6972 break;
6973 }
6974 if ((i2ccmd & I2CCMD_READY) == 0)
6975 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6976 if ((i2ccmd & I2CCMD_ERROR) != 0)
6977 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6978
6979 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6980 }
6981
6982 /*
6983 * wm_gmii_statchg: [mii interface function]
6984 *
6985 * Callback from MII layer when media changes.
6986 */
6987 static void
6988 wm_gmii_statchg(struct ifnet *ifp)
6989 {
6990 struct wm_softc *sc = ifp->if_softc;
6991 struct mii_data *mii = &sc->sc_mii;
6992
6993 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6994 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6995 sc->sc_fcrtl &= ~FCRTL_XONE;
6996
6997 /*
6998 * Get flow control negotiation result.
6999 */
7000 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7001 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7002 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7003 mii->mii_media_active &= ~IFM_ETH_FMASK;
7004 }
7005
7006 if (sc->sc_flowflags & IFM_FLOW) {
7007 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7008 sc->sc_ctrl |= CTRL_TFCE;
7009 sc->sc_fcrtl |= FCRTL_XONE;
7010 }
7011 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7012 sc->sc_ctrl |= CTRL_RFCE;
7013 }
7014
7015 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7016 DPRINTF(WM_DEBUG_LINK,
7017 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7018 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7019 } else {
7020 DPRINTF(WM_DEBUG_LINK,
7021 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7022 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7023 }
7024
7025 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7026 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7027 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7028 : WMREG_FCRTL, sc->sc_fcrtl);
7029 if (sc->sc_type == WM_T_80003) {
7030 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7031 case IFM_1000_T:
7032 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7033 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7034 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7035 break;
7036 default:
7037 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7038 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7039 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7040 break;
7041 }
7042 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7043 }
7044 }
7045
7046 /*
7047 * wm_kmrn_readreg:
7048 *
7049 * Read a kumeran register
7050 */
7051 static int
7052 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7053 {
7054 int rv;
7055
7056 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7057 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7058 aprint_error_dev(sc->sc_dev,
7059 "%s: failed to get semaphore\n", __func__);
7060 return 0;
7061 }
7062 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7063 if (wm_get_swfwhw_semaphore(sc)) {
7064 aprint_error_dev(sc->sc_dev,
7065 "%s: failed to get semaphore\n", __func__);
7066 return 0;
7067 }
7068 }
7069
7070 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7071 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7072 KUMCTRLSTA_REN);
7073 delay(2);
7074
7075 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7076
7077 if (sc->sc_flags == WM_F_SWFW_SYNC)
7078 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7079 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7080 wm_put_swfwhw_semaphore(sc);
7081
7082 return rv;
7083 }
7084
7085 /*
7086 * wm_kmrn_writereg:
7087 *
7088 * Write a kumeran register
7089 */
7090 static void
7091 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7092 {
7093
7094 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7095 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7096 aprint_error_dev(sc->sc_dev,
7097 "%s: failed to get semaphore\n", __func__);
7098 return;
7099 }
7100 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7101 if (wm_get_swfwhw_semaphore(sc)) {
7102 aprint_error_dev(sc->sc_dev,
7103 "%s: failed to get semaphore\n", __func__);
7104 return;
7105 }
7106 }
7107
7108 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7109 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7110 (val & KUMCTRLSTA_MASK));
7111
7112 if (sc->sc_flags == WM_F_SWFW_SYNC)
7113 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7114 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7115 wm_put_swfwhw_semaphore(sc);
7116 }
7117
7118 static int
7119 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7120 {
7121 uint32_t eecd = 0;
7122
7123 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7124 || sc->sc_type == WM_T_82583) {
7125 eecd = CSR_READ(sc, WMREG_EECD);
7126
7127 /* Isolate bits 15 & 16 */
7128 eecd = ((eecd >> 15) & 0x03);
7129
7130 /* If both bits are set, device is Flash type */
7131 if (eecd == 0x03)
7132 return 0;
7133 }
7134 return 1;
7135 }
7136
7137 static int
7138 wm_get_swsm_semaphore(struct wm_softc *sc)
7139 {
7140 int32_t timeout;
7141 uint32_t swsm;
7142
7143 /* Get the FW semaphore. */
7144 timeout = 1000 + 1; /* XXX */
7145 while (timeout) {
7146 swsm = CSR_READ(sc, WMREG_SWSM);
7147 swsm |= SWSM_SWESMBI;
7148 CSR_WRITE(sc, WMREG_SWSM, swsm);
7149 /* if we managed to set the bit we got the semaphore. */
7150 swsm = CSR_READ(sc, WMREG_SWSM);
7151 if (swsm & SWSM_SWESMBI)
7152 break;
7153
7154 delay(50);
7155 timeout--;
7156 }
7157
7158 if (timeout == 0) {
7159 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7160 /* Release semaphores */
7161 wm_put_swsm_semaphore(sc);
7162 return 1;
7163 }
7164 return 0;
7165 }
7166
7167 static void
7168 wm_put_swsm_semaphore(struct wm_softc *sc)
7169 {
7170 uint32_t swsm;
7171
7172 swsm = CSR_READ(sc, WMREG_SWSM);
7173 swsm &= ~(SWSM_SWESMBI);
7174 CSR_WRITE(sc, WMREG_SWSM, swsm);
7175 }
7176
7177 static int
7178 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7179 {
7180 uint32_t swfw_sync;
7181 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7182 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7183 int timeout = 200;
7184
7185 for (timeout = 0; timeout < 200; timeout++) {
7186 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7187 if (wm_get_swsm_semaphore(sc)) {
7188 aprint_error_dev(sc->sc_dev,
7189 "%s: failed to get semaphore\n",
7190 __func__);
7191 return 1;
7192 }
7193 }
7194 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7195 if ((swfw_sync & (swmask | fwmask)) == 0) {
7196 swfw_sync |= swmask;
7197 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7198 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7199 wm_put_swsm_semaphore(sc);
7200 return 0;
7201 }
7202 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7203 wm_put_swsm_semaphore(sc);
7204 delay(5000);
7205 }
7206 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7207 device_xname(sc->sc_dev), mask, swfw_sync);
7208 return 1;
7209 }
7210
7211 static void
7212 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7213 {
7214 uint32_t swfw_sync;
7215
7216 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7217 while (wm_get_swsm_semaphore(sc) != 0)
7218 continue;
7219 }
7220 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7221 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7222 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7223 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7224 wm_put_swsm_semaphore(sc);
7225 }
7226
7227 static int
7228 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7229 {
7230 uint32_t ext_ctrl;
7231 int timeout = 200;
7232
7233 for (timeout = 0; timeout < 200; timeout++) {
7234 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7235 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7236 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7237
7238 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7239 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7240 return 0;
7241 delay(5000);
7242 }
7243 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7244 device_xname(sc->sc_dev), ext_ctrl);
7245 return 1;
7246 }
7247
7248 static void
7249 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7250 {
7251 uint32_t ext_ctrl;
7252 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7253 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7254 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7255 }
7256
7257 static int
7258 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7259 {
7260 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7261 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7262
7263 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7264 /* Value of bit 22 corresponds to the flash bank we're on. */
7265 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7266 } else {
7267 uint8_t bank_high_byte;
7268 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
7269 if ((bank_high_byte & 0xc0) == 0x80)
7270 *bank = 0;
7271 else {
7272 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7273 &bank_high_byte);
7274 if ((bank_high_byte & 0xc0) == 0x80)
7275 *bank = 1;
7276 else {
7277 aprint_error_dev(sc->sc_dev,
7278 "EEPROM not present\n");
7279 return -1;
7280 }
7281 }
7282 }
7283
7284 return 0;
7285 }
7286
7287 /******************************************************************************
7288 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7289 * register.
7290 *
7291 * sc - Struct containing variables accessed by shared code
7292 * offset - offset of word in the EEPROM to read
7293 * data - word read from the EEPROM
7294 * words - number of words to read
7295 *****************************************************************************/
7296 static int
7297 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7298 {
7299 int32_t error = 0;
7300 uint32_t flash_bank = 0;
7301 uint32_t act_offset = 0;
7302 uint32_t bank_offset = 0;
7303 uint16_t word = 0;
7304 uint16_t i = 0;
7305
7306 /* We need to know which is the valid flash bank. In the event
7307 * that we didn't allocate eeprom_shadow_ram, we may not be
7308 * managing flash_bank. So it cannot be trusted and needs
7309 * to be updated with each read.
7310 */
7311 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7312 if (error) {
7313 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7314 __func__);
7315 return error;
7316 }
7317
7318 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
7319 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7320
7321 error = wm_get_swfwhw_semaphore(sc);
7322 if (error) {
7323 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7324 __func__);
7325 return error;
7326 }
7327
7328 for (i = 0; i < words; i++) {
7329 /* The NVM part needs a byte offset, hence * 2 */
7330 act_offset = bank_offset + ((offset + i) * 2);
7331 error = wm_read_ich8_word(sc, act_offset, &word);
7332 if (error) {
7333 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
7334 __func__);
7335 break;
7336 }
7337 data[i] = word;
7338 }
7339
7340 wm_put_swfwhw_semaphore(sc);
7341 return error;
7342 }
7343
7344 /******************************************************************************
7345 * This function does initial flash setup so that a new read/write/erase cycle
7346 * can be started.
7347 *
7348 * sc - The pointer to the hw structure
7349 ****************************************************************************/
7350 static int32_t
7351 wm_ich8_cycle_init(struct wm_softc *sc)
7352 {
7353 uint16_t hsfsts;
7354 int32_t error = 1;
7355 int32_t i = 0;
7356
7357 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7358
7359 /* May be check the Flash Des Valid bit in Hw status */
7360 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7361 return error;
7362 }
7363
7364 /* Clear FCERR in Hw status by writing 1 */
7365 /* Clear DAEL in Hw status by writing a 1 */
7366 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7367
7368 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7369
7370 /*
7371 * Either we should have a hardware SPI cycle in progress bit to check
7372 * against, in order to start a new cycle or FDONE bit should be
7373 * changed in the hardware so that it is 1 after harware reset, which
7374 * can then be used as an indication whether a cycle is in progress or
7375 * has been completed .. we should also have some software semaphore
7376 * mechanism to guard FDONE or the cycle in progress bit so that two
7377 * threads access to those bits can be sequentiallized or a way so that
7378 * 2 threads dont start the cycle at the same time
7379 */
7380
7381 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7382 /*
7383 * There is no cycle running at present, so we can start a
7384 * cycle
7385 */
7386
7387 /* Begin by setting Flash Cycle Done. */
7388 hsfsts |= HSFSTS_DONE;
7389 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7390 error = 0;
7391 } else {
7392 /*
7393 * otherwise poll for sometime so the current cycle has a
7394 * chance to end before giving up.
7395 */
7396 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7397 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7398 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7399 error = 0;
7400 break;
7401 }
7402 delay(1);
7403 }
7404 if (error == 0) {
7405 /*
7406 * Successful in waiting for previous cycle to timeout,
7407 * now set the Flash Cycle Done.
7408 */
7409 hsfsts |= HSFSTS_DONE;
7410 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7411 }
7412 }
7413 return error;
7414 }
7415
7416 /******************************************************************************
7417 * This function starts a flash cycle and waits for its completion
7418 *
7419 * sc - The pointer to the hw structure
7420 ****************************************************************************/
7421 static int32_t
7422 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7423 {
7424 uint16_t hsflctl;
7425 uint16_t hsfsts;
7426 int32_t error = 1;
7427 uint32_t i = 0;
7428
7429 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7430 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7431 hsflctl |= HSFCTL_GO;
7432 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7433
7434 /* wait till FDONE bit is set to 1 */
7435 do {
7436 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7437 if (hsfsts & HSFSTS_DONE)
7438 break;
7439 delay(1);
7440 i++;
7441 } while (i < timeout);
7442 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7443 error = 0;
7444
7445 return error;
7446 }
7447
7448 /******************************************************************************
7449 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7450 *
7451 * sc - The pointer to the hw structure
7452 * index - The index of the byte or word to read.
7453 * size - Size of data to read, 1=byte 2=word
7454 * data - Pointer to the word to store the value read.
7455 *****************************************************************************/
7456 static int32_t
7457 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7458 uint32_t size, uint16_t* data)
7459 {
7460 uint16_t hsfsts;
7461 uint16_t hsflctl;
7462 uint32_t flash_linear_address;
7463 uint32_t flash_data = 0;
7464 int32_t error = 1;
7465 int32_t count = 0;
7466
7467 if (size < 1 || size > 2 || data == 0x0 ||
7468 index > ICH_FLASH_LINEAR_ADDR_MASK)
7469 return error;
7470
7471 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7472 sc->sc_ich8_flash_base;
7473
7474 do {
7475 delay(1);
7476 /* Steps */
7477 error = wm_ich8_cycle_init(sc);
7478 if (error)
7479 break;
7480
7481 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7482 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7483 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7484 & HSFCTL_BCOUNT_MASK;
7485 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7486 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7487
7488 /*
7489 * Write the last 24 bits of index into Flash Linear address
7490 * field in Flash Address
7491 */
7492 /* TODO: TBD maybe check the index against the size of flash */
7493
7494 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7495
7496 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7497
7498 /*
7499 * Check if FCERR is set to 1, if set to 1, clear it and try
7500 * the whole sequence a few more times, else read in (shift in)
7501 * the Flash Data0, the order is least significant byte first
7502 * msb to lsb
7503 */
7504 if (error == 0) {
7505 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7506 if (size == 1)
7507 *data = (uint8_t)(flash_data & 0x000000FF);
7508 else if (size == 2)
7509 *data = (uint16_t)(flash_data & 0x0000FFFF);
7510 break;
7511 } else {
7512 /*
7513 * If we've gotten here, then things are probably
7514 * completely hosed, but if the error condition is
7515 * detected, it won't hurt to give it another try...
7516 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7517 */
7518 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7519 if (hsfsts & HSFSTS_ERR) {
7520 /* Repeat for some time before giving up. */
7521 continue;
7522 } else if ((hsfsts & HSFSTS_DONE) == 0)
7523 break;
7524 }
7525 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7526
7527 return error;
7528 }
7529
7530 /******************************************************************************
7531 * Reads a single byte from the NVM using the ICH8 flash access registers.
7532 *
7533 * sc - pointer to wm_hw structure
7534 * index - The index of the byte to read.
7535 * data - Pointer to a byte to store the value read.
7536 *****************************************************************************/
7537 static int32_t
7538 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7539 {
7540 int32_t status;
7541 uint16_t word = 0;
7542
7543 status = wm_read_ich8_data(sc, index, 1, &word);
7544 if (status == 0)
7545 *data = (uint8_t)word;
7546 else
7547 *data = 0;
7548
7549 return status;
7550 }
7551
7552 /******************************************************************************
7553 * Reads a word from the NVM using the ICH8 flash access registers.
7554 *
7555 * sc - pointer to wm_hw structure
7556 * index - The starting byte index of the word to read.
7557 * data - Pointer to a word to store the value read.
7558 *****************************************************************************/
7559 static int32_t
7560 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7561 {
7562 int32_t status;
7563
7564 status = wm_read_ich8_data(sc, index, 2, data);
7565 return status;
7566 }
7567
7568 static int
7569 wm_check_mng_mode(struct wm_softc *sc)
7570 {
7571 int rv;
7572
7573 switch (sc->sc_type) {
7574 case WM_T_ICH8:
7575 case WM_T_ICH9:
7576 case WM_T_ICH10:
7577 case WM_T_PCH:
7578 case WM_T_PCH2:
7579 rv = wm_check_mng_mode_ich8lan(sc);
7580 break;
7581 case WM_T_82574:
7582 case WM_T_82583:
7583 rv = wm_check_mng_mode_82574(sc);
7584 break;
7585 case WM_T_82571:
7586 case WM_T_82572:
7587 case WM_T_82573:
7588 case WM_T_80003:
7589 rv = wm_check_mng_mode_generic(sc);
7590 break;
7591 default:
7592 /* noting to do */
7593 rv = 0;
7594 break;
7595 }
7596
7597 return rv;
7598 }
7599
7600 static int
7601 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7602 {
7603 uint32_t fwsm;
7604
7605 fwsm = CSR_READ(sc, WMREG_FWSM);
7606
7607 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7608 return 1;
7609
7610 return 0;
7611 }
7612
7613 static int
7614 wm_check_mng_mode_82574(struct wm_softc *sc)
7615 {
7616 uint16_t data;
7617
7618 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7619
7620 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7621 return 1;
7622
7623 return 0;
7624 }
7625
7626 static int
7627 wm_check_mng_mode_generic(struct wm_softc *sc)
7628 {
7629 uint32_t fwsm;
7630
7631 fwsm = CSR_READ(sc, WMREG_FWSM);
7632
7633 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7634 return 1;
7635
7636 return 0;
7637 }
7638
7639 static int
7640 wm_enable_mng_pass_thru(struct wm_softc *sc)
7641 {
7642 uint32_t manc, fwsm, factps;
7643
7644 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7645 return 0;
7646
7647 manc = CSR_READ(sc, WMREG_MANC);
7648
7649 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7650 device_xname(sc->sc_dev), manc));
7651 if (((manc & MANC_RECV_TCO_EN) == 0)
7652 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7653 return 0;
7654
7655 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7656 fwsm = CSR_READ(sc, WMREG_FWSM);
7657 factps = CSR_READ(sc, WMREG_FACTPS);
7658 if (((factps & FACTPS_MNGCG) == 0)
7659 && ((fwsm & FWSM_MODE_MASK)
7660 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7661 return 1;
7662 } else if (((manc & MANC_SMBUS_EN) != 0)
7663 && ((manc & MANC_ASF_EN) == 0))
7664 return 1;
7665
7666 return 0;
7667 }
7668
7669 static int
7670 wm_check_reset_block(struct wm_softc *sc)
7671 {
7672 uint32_t reg;
7673
7674 switch (sc->sc_type) {
7675 case WM_T_ICH8:
7676 case WM_T_ICH9:
7677 case WM_T_ICH10:
7678 case WM_T_PCH:
7679 case WM_T_PCH2:
7680 reg = CSR_READ(sc, WMREG_FWSM);
7681 if ((reg & FWSM_RSPCIPHY) != 0)
7682 return 0;
7683 else
7684 return -1;
7685 break;
7686 case WM_T_82571:
7687 case WM_T_82572:
7688 case WM_T_82573:
7689 case WM_T_82574:
7690 case WM_T_82583:
7691 case WM_T_80003:
7692 reg = CSR_READ(sc, WMREG_MANC);
7693 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7694 return -1;
7695 else
7696 return 0;
7697 break;
7698 default:
7699 /* no problem */
7700 break;
7701 }
7702
7703 return 0;
7704 }
7705
7706 static void
7707 wm_get_hw_control(struct wm_softc *sc)
7708 {
7709 uint32_t reg;
7710
7711 switch (sc->sc_type) {
7712 case WM_T_82573:
7713 reg = CSR_READ(sc, WMREG_SWSM);
7714 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7715 break;
7716 case WM_T_82571:
7717 case WM_T_82572:
7718 case WM_T_82574:
7719 case WM_T_82583:
7720 case WM_T_80003:
7721 case WM_T_ICH8:
7722 case WM_T_ICH9:
7723 case WM_T_ICH10:
7724 case WM_T_PCH:
7725 case WM_T_PCH2:
7726 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7727 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7728 break;
7729 default:
7730 break;
7731 }
7732 }
7733
7734 static void
7735 wm_release_hw_control(struct wm_softc *sc)
7736 {
7737 uint32_t reg;
7738
7739 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7740 return;
7741
7742 if (sc->sc_type == WM_T_82573) {
7743 reg = CSR_READ(sc, WMREG_SWSM);
7744 reg &= ~SWSM_DRV_LOAD;
7745 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7746 } else {
7747 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7748 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7749 }
7750 }
7751
7752 /* XXX Currently TBI only */
7753 static int
7754 wm_check_for_link(struct wm_softc *sc)
7755 {
7756 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7757 uint32_t rxcw;
7758 uint32_t ctrl;
7759 uint32_t status;
7760 uint32_t sig;
7761
7762 rxcw = CSR_READ(sc, WMREG_RXCW);
7763 ctrl = CSR_READ(sc, WMREG_CTRL);
7764 status = CSR_READ(sc, WMREG_STATUS);
7765
7766 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7767
7768 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7769 device_xname(sc->sc_dev), __func__,
7770 ((ctrl & CTRL_SWDPIN(1)) == sig),
7771 ((status & STATUS_LU) != 0),
7772 ((rxcw & RXCW_C) != 0)
7773 ));
7774
7775 /*
7776 * SWDPIN LU RXCW
7777 * 0 0 0
7778 * 0 0 1 (should not happen)
7779 * 0 1 0 (should not happen)
7780 * 0 1 1 (should not happen)
7781 * 1 0 0 Disable autonego and force linkup
7782 * 1 0 1 got /C/ but not linkup yet
7783 * 1 1 0 (linkup)
7784 * 1 1 1 If IFM_AUTO, back to autonego
7785 *
7786 */
7787 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7788 && ((status & STATUS_LU) == 0)
7789 && ((rxcw & RXCW_C) == 0)) {
7790 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7791 __func__));
7792 sc->sc_tbi_linkup = 0;
7793 /* Disable auto-negotiation in the TXCW register */
7794 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7795
7796 /*
7797 * Force link-up and also force full-duplex.
7798 *
7799 * NOTE: CTRL was updated TFCE and RFCE automatically,
7800 * so we should update sc->sc_ctrl
7801 */
7802 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7803 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7804 } else if (((status & STATUS_LU) != 0)
7805 && ((rxcw & RXCW_C) != 0)
7806 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7807 sc->sc_tbi_linkup = 1;
7808 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7809 __func__));
7810 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7811 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7812 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7813 && ((rxcw & RXCW_C) != 0)) {
7814 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7815 } else {
7816 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7817 status));
7818 }
7819
7820 return 0;
7821 }
7822
7823 /* Work-around for 82566 Kumeran PCS lock loss */
7824 static void
7825 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7826 {
7827 int miistatus, active, i;
7828 int reg;
7829
7830 miistatus = sc->sc_mii.mii_media_status;
7831
7832 /* If the link is not up, do nothing */
7833 if ((miistatus & IFM_ACTIVE) != 0)
7834 return;
7835
7836 active = sc->sc_mii.mii_media_active;
7837
7838 /* Nothing to do if the link is other than 1Gbps */
7839 if (IFM_SUBTYPE(active) != IFM_1000_T)
7840 return;
7841
7842 for (i = 0; i < 10; i++) {
7843 /* read twice */
7844 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7845 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7846 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7847 goto out; /* GOOD! */
7848
7849 /* Reset the PHY */
7850 wm_gmii_reset(sc);
7851 delay(5*1000);
7852 }
7853
7854 /* Disable GigE link negotiation */
7855 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7856 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7857 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7858
7859 /*
7860 * Call gig speed drop workaround on Gig disable before accessing
7861 * any PHY registers.
7862 */
7863 wm_gig_downshift_workaround_ich8lan(sc);
7864
7865 out:
7866 return;
7867 }
7868
7869 /* WOL from S5 stops working */
7870 static void
7871 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7872 {
7873 uint16_t kmrn_reg;
7874
7875 /* Only for igp3 */
7876 if (sc->sc_phytype == WMPHY_IGP_3) {
7877 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7878 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7879 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7880 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7881 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7882 }
7883 }
7884
7885 #ifdef WM_WOL
7886 /* Power down workaround on D3 */
7887 static void
7888 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7889 {
7890 uint32_t reg;
7891 int i;
7892
7893 for (i = 0; i < 2; i++) {
7894 /* Disable link */
7895 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7896 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7897 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7898
7899 /*
7900 * Call gig speed drop workaround on Gig disable before
7901 * accessing any PHY registers
7902 */
7903 if (sc->sc_type == WM_T_ICH8)
7904 wm_gig_downshift_workaround_ich8lan(sc);
7905
7906 /* Write VR power-down enable */
7907 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7908 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7909 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7910 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7911
7912 /* Read it back and test */
7913 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7914 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7915 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7916 break;
7917
7918 /* Issue PHY reset and repeat at most one more time */
7919 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7920 }
7921 }
7922 #endif /* WM_WOL */
7923
7924 /*
7925 * Workaround for pch's PHYs
7926 * XXX should be moved to new PHY driver?
7927 */
7928 static void
7929 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7930 {
7931 if (sc->sc_phytype == WMPHY_82577)
7932 wm_set_mdio_slow_mode_hv(sc);
7933
7934 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7935
7936 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7937
7938 /* 82578 */
7939 if (sc->sc_phytype == WMPHY_82578) {
7940 /* PCH rev. < 3 */
7941 if (sc->sc_rev < 3) {
7942 /* XXX 6 bit shift? Why? Is it page2? */
7943 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7944 0x66c0);
7945 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7946 0xffff);
7947 }
7948
7949 /* XXX phy rev. < 2 */
7950 }
7951
7952 /* Select page 0 */
7953
7954 /* XXX acquire semaphore */
7955 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7956 /* XXX release semaphore */
7957
7958 /*
7959 * Configure the K1 Si workaround during phy reset assuming there is
7960 * link so that it disables K1 if link is in 1Gbps.
7961 */
7962 wm_k1_gig_workaround_hv(sc, 1);
7963 }
7964
7965 static void
7966 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7967 {
7968
7969 wm_set_mdio_slow_mode_hv(sc);
7970 }
7971
7972 static void
7973 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7974 {
7975 int k1_enable = sc->sc_nvm_k1_enabled;
7976
7977 /* XXX acquire semaphore */
7978
7979 if (link) {
7980 k1_enable = 0;
7981
7982 /* Link stall fix for link up */
7983 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7984 } else {
7985 /* Link stall fix for link down */
7986 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7987 }
7988
7989 wm_configure_k1_ich8lan(sc, k1_enable);
7990
7991 /* XXX release semaphore */
7992 }
7993
7994 static void
7995 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
7996 {
7997 uint32_t reg;
7998
7999 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8000 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8001 reg | HV_KMRN_MDIO_SLOW);
8002 }
8003
8004 static void
8005 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8006 {
8007 uint32_t ctrl, ctrl_ext, tmp;
8008 uint16_t kmrn_reg;
8009
8010 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8011
8012 if (k1_enable)
8013 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8014 else
8015 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8016
8017 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8018
8019 delay(20);
8020
8021 ctrl = CSR_READ(sc, WMREG_CTRL);
8022 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8023
8024 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8025 tmp |= CTRL_FRCSPD;
8026
8027 CSR_WRITE(sc, WMREG_CTRL, tmp);
8028 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8029 delay(20);
8030
8031 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8032 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8033 delay(20);
8034 }
8035
8036 static void
8037 wm_smbustopci(struct wm_softc *sc)
8038 {
8039 uint32_t fwsm;
8040
8041 fwsm = CSR_READ(sc, WMREG_FWSM);
8042 if (((fwsm & FWSM_FW_VALID) == 0)
8043 && ((wm_check_reset_block(sc) == 0))) {
8044 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8045 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8046 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8047 delay(10);
8048 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8049 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8050 delay(50*1000);
8051
8052 /*
8053 * Gate automatic PHY configuration by hardware on non-managed
8054 * 82579
8055 */
8056 if (sc->sc_type == WM_T_PCH2)
8057 wm_gate_hw_phy_config_ich8lan(sc, 1);
8058 }
8059 }
8060
8061 static void
8062 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8063 {
8064 uint32_t gcr;
8065 pcireg_t ctrl2;
8066
8067 gcr = CSR_READ(sc, WMREG_GCR);
8068
8069 /* Only take action if timeout value is defaulted to 0 */
8070 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8071 goto out;
8072
8073 if ((gcr & GCR_CAP_VER2) == 0) {
8074 gcr |= GCR_CMPL_TMOUT_10MS;
8075 goto out;
8076 }
8077
8078 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8079 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
8080 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
8081 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8082 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
8083
8084 out:
8085 /* Disable completion timeout resend */
8086 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8087
8088 CSR_WRITE(sc, WMREG_GCR, gcr);
8089 }
8090
8091 /* special case - for 82575 - need to do manual init ... */
8092 static void
8093 wm_reset_init_script_82575(struct wm_softc *sc)
8094 {
8095 /*
8096 * remark: this is untested code - we have no board without EEPROM
8097 * same setup as mentioned int the freeBSD driver for the i82575
8098 */
8099
8100 /* SerDes configuration via SERDESCTRL */
8101 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8102 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8103 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8104 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8105
8106 /* CCM configuration via CCMCTL register */
8107 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8108 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8109
8110 /* PCIe lanes configuration */
8111 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8112 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8113 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8114 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8115
8116 /* PCIe PLL Configuration */
8117 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8118 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8119 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8120 }
8121
8122 static void
8123 wm_init_manageability(struct wm_softc *sc)
8124 {
8125
8126 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8127 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8128 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8129
8130 /* disabl hardware interception of ARP */
8131 manc &= ~MANC_ARP_EN;
8132
8133 /* enable receiving management packets to the host */
8134 if (sc->sc_type >= WM_T_82571) {
8135 manc |= MANC_EN_MNG2HOST;
8136 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8137 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8138
8139 }
8140
8141 CSR_WRITE(sc, WMREG_MANC, manc);
8142 }
8143 }
8144
8145 static void
8146 wm_release_manageability(struct wm_softc *sc)
8147 {
8148
8149 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8150 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8151
8152 if (sc->sc_type >= WM_T_82571)
8153 manc &= ~MANC_EN_MNG2HOST;
8154
8155 CSR_WRITE(sc, WMREG_MANC, manc);
8156 }
8157 }
8158
8159 static void
8160 wm_get_wakeup(struct wm_softc *sc)
8161 {
8162
8163 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8164 switch (sc->sc_type) {
8165 case WM_T_82573:
8166 case WM_T_82583:
8167 sc->sc_flags |= WM_F_HAS_AMT;
8168 /* FALLTHROUGH */
8169 case WM_T_80003:
8170 case WM_T_82541:
8171 case WM_T_82547:
8172 case WM_T_82571:
8173 case WM_T_82572:
8174 case WM_T_82574:
8175 case WM_T_82575:
8176 case WM_T_82576:
8177 #if 0 /* XXX */
8178 case WM_T_82580:
8179 case WM_T_82580ER:
8180 case WM_T_I350:
8181 #endif
8182 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8183 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8184 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8185 break;
8186 case WM_T_ICH8:
8187 case WM_T_ICH9:
8188 case WM_T_ICH10:
8189 case WM_T_PCH:
8190 case WM_T_PCH2:
8191 sc->sc_flags |= WM_F_HAS_AMT;
8192 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8193 break;
8194 default:
8195 break;
8196 }
8197
8198 /* 1: HAS_MANAGE */
8199 if (wm_enable_mng_pass_thru(sc) != 0)
8200 sc->sc_flags |= WM_F_HAS_MANAGE;
8201
8202 #ifdef WM_DEBUG
8203 printf("\n");
8204 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8205 printf("HAS_AMT,");
8206 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8207 printf("ARC_SUBSYS_VALID,");
8208 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8209 printf("ASF_FIRMWARE_PRES,");
8210 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8211 printf("HAS_MANAGE,");
8212 printf("\n");
8213 #endif
8214 /*
8215 * Note that the WOL flags is set after the resetting of the eeprom
8216 * stuff
8217 */
8218 }
8219
8220 #ifdef WM_WOL
8221 /* WOL in the newer chipset interfaces (pchlan) */
8222 static void
8223 wm_enable_phy_wakeup(struct wm_softc *sc)
8224 {
8225 #if 0
8226 uint16_t preg;
8227
8228 /* Copy MAC RARs to PHY RARs */
8229
8230 /* Copy MAC MTA to PHY MTA */
8231
8232 /* Configure PHY Rx Control register */
8233
8234 /* Enable PHY wakeup in MAC register */
8235
8236 /* Configure and enable PHY wakeup in PHY registers */
8237
8238 /* Activate PHY wakeup */
8239
8240 /* XXX */
8241 #endif
8242 }
8243
8244 static void
8245 wm_enable_wakeup(struct wm_softc *sc)
8246 {
8247 uint32_t reg, pmreg;
8248 pcireg_t pmode;
8249
8250 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8251 &pmreg, NULL) == 0)
8252 return;
8253
8254 /* Advertise the wakeup capability */
8255 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8256 | CTRL_SWDPIN(3));
8257 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8258
8259 /* ICH workaround */
8260 switch (sc->sc_type) {
8261 case WM_T_ICH8:
8262 case WM_T_ICH9:
8263 case WM_T_ICH10:
8264 case WM_T_PCH:
8265 case WM_T_PCH2:
8266 /* Disable gig during WOL */
8267 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8268 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8269 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8270 if (sc->sc_type == WM_T_PCH)
8271 wm_gmii_reset(sc);
8272
8273 /* Power down workaround */
8274 if (sc->sc_phytype == WMPHY_82577) {
8275 struct mii_softc *child;
8276
8277 /* Assume that the PHY is copper */
8278 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8279 if (child->mii_mpd_rev <= 2)
8280 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8281 (768 << 5) | 25, 0x0444); /* magic num */
8282 }
8283 break;
8284 default:
8285 break;
8286 }
8287
8288 /* Keep the laser running on fiber adapters */
8289 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8290 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8291 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8292 reg |= CTRL_EXT_SWDPIN(3);
8293 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8294 }
8295
8296 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8297 #if 0 /* for the multicast packet */
8298 reg |= WUFC_MC;
8299 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8300 #endif
8301
8302 if (sc->sc_type == WM_T_PCH) {
8303 wm_enable_phy_wakeup(sc);
8304 } else {
8305 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8306 CSR_WRITE(sc, WMREG_WUFC, reg);
8307 }
8308
8309 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8310 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8311 || (sc->sc_type == WM_T_PCH2))
8312 && (sc->sc_phytype == WMPHY_IGP_3))
8313 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8314
8315 /* Request PME */
8316 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8317 #if 0
8318 /* Disable WOL */
8319 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8320 #else
8321 /* For WOL */
8322 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8323 #endif
8324 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8325 }
8326 #endif /* WM_WOL */
8327
8328 static bool
8329 wm_suspend(device_t self, const pmf_qual_t *qual)
8330 {
8331 struct wm_softc *sc = device_private(self);
8332
8333 wm_release_manageability(sc);
8334 wm_release_hw_control(sc);
8335 #ifdef WM_WOL
8336 wm_enable_wakeup(sc);
8337 #endif
8338
8339 return true;
8340 }
8341
8342 static bool
8343 wm_resume(device_t self, const pmf_qual_t *qual)
8344 {
8345 struct wm_softc *sc = device_private(self);
8346
8347 wm_init_manageability(sc);
8348
8349 return true;
8350 }
8351
8352 static void
8353 wm_set_eee_i350(struct wm_softc * sc)
8354 {
8355 uint32_t ipcnfg, eeer;
8356
8357 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8358 eeer = CSR_READ(sc, WMREG_EEER);
8359
8360 if ((sc->sc_flags & WM_F_EEE) != 0) {
8361 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8362 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8363 | EEER_LPI_FC);
8364 } else {
8365 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8366 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8367 | EEER_LPI_FC);
8368 }
8369
8370 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8371 CSR_WRITE(sc, WMREG_EEER, eeer);
8372 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8373 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8374 }
8375