if_wm.c revision 1.232 1 /* $NetBSD: if_wm.c,v 1.232 2012/08/29 20:39:24 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.232 2012/08/29 20:39:24 bouyer Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
136 | WM_DEBUG_MANAGE;
137
138 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
139 #else
140 #define DPRINTF(x, y) /* nothing */
141 #endif /* WM_DEBUG */
142
143 /*
144 * Transmit descriptor list size. Due to errata, we can only have
145 * 256 hardware descriptors in the ring on < 82544, but we use 4096
146 * on >= 82544. We tell the upper layers that they can queue a lot
147 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
148 * of them at a time.
149 *
150 * We allow up to 256 (!) DMA segments per packet. Pathological packet
151 * chains containing many small mbufs have been observed in zero-copy
152 * situations with jumbo frames.
153 */
154 #define WM_NTXSEGS 256
155 #define WM_IFQUEUELEN 256
156 #define WM_TXQUEUELEN_MAX 64
157 #define WM_TXQUEUELEN_MAX_82547 16
158 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
159 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
160 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
161 #define WM_NTXDESC_82542 256
162 #define WM_NTXDESC_82544 4096
163 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
164 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
165 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
166 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
167 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
168
169 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
170
171 /*
172 * Receive descriptor list size. We have one Rx buffer for normal
173 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
174 * packet. We allocate 256 receive descriptors, each with a 2k
175 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
176 */
177 #define WM_NRXDESC 256
178 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
179 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
180 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
181
182 /*
183 * Control structures are DMA'd to the i82542 chip. We allocate them in
184 * a single clump that maps to a single DMA segment to make several things
185 * easier.
186 */
187 struct wm_control_data_82544 {
188 /*
189 * The receive descriptors.
190 */
191 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
192
193 /*
194 * The transmit descriptors. Put these at the end, because
195 * we might use a smaller number of them.
196 */
197 union {
198 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
199 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
200 } wdc_u;
201 };
202
203 struct wm_control_data_82542 {
204 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
211
212 /*
213 * Software state for transmit jobs.
214 */
215 struct wm_txsoft {
216 struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap; /* our DMA map */
218 int txs_firstdesc; /* first descriptor in packet */
219 int txs_lastdesc; /* last descriptor in packet */
220 int txs_ndesc; /* # of descriptors used */
221 };
222
223 /*
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
227 */
228 struct wm_rxsoft {
229 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap; /* our DMA map */
231 };
232
233 #define WM_LINKUP_TIMEOUT 50
234
235 static uint16_t swfwphysem[] = {
236 SWFW_PHY0_SM,
237 SWFW_PHY1_SM,
238 SWFW_PHY2_SM,
239 SWFW_PHY3_SM
240 };
241
242 /*
243 * Software state per device.
244 */
245 struct wm_softc {
246 device_t sc_dev; /* generic device information */
247 bus_space_tag_t sc_st; /* bus space tag */
248 bus_space_handle_t sc_sh; /* bus space handle */
249 bus_size_t sc_ss; /* bus space size */
250 bus_space_tag_t sc_iot; /* I/O space tag */
251 bus_space_handle_t sc_ioh; /* I/O space handle */
252 bus_size_t sc_ios; /* I/O space size */
253 bus_space_tag_t sc_flasht; /* flash registers space tag */
254 bus_space_handle_t sc_flashh; /* flash registers space handle */
255 bus_dma_tag_t sc_dmat; /* bus DMA tag */
256
257 struct ethercom sc_ethercom; /* ethernet common data */
258 struct mii_data sc_mii; /* MII/media information */
259
260 pci_chipset_tag_t sc_pc;
261 pcitag_t sc_pcitag;
262 int sc_bus_speed; /* PCI/PCIX bus speed */
263 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
264
265 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
266 wm_chip_type sc_type; /* MAC type */
267 int sc_rev; /* MAC revision */
268 wm_phy_type sc_phytype; /* PHY type */
269 int sc_funcid; /* unit number of the chip (0 to 3) */
270 int sc_flags; /* flags; see below */
271 int sc_if_flags; /* last if_flags */
272 int sc_flowflags; /* 802.3x flow control flags */
273 int sc_align_tweak;
274
275 void *sc_ih; /* interrupt cookie */
276 callout_t sc_tick_ch; /* tick callout */
277
278 int sc_ee_addrbits; /* EEPROM address bits */
279 int sc_ich8_flash_base;
280 int sc_ich8_flash_bank_size;
281 int sc_nvm_k1_enabled;
282
283 /*
284 * Software state for the transmit and receive descriptors.
285 */
286 int sc_txnum; /* must be a power of two */
287 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
288 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
289
290 /*
291 * Control data structures.
292 */
293 int sc_ntxdesc; /* must be a power of two */
294 struct wm_control_data_82544 *sc_control_data;
295 bus_dmamap_t sc_cddmamap; /* control data DMA map */
296 bus_dma_segment_t sc_cd_seg; /* control data segment */
297 int sc_cd_rseg; /* real number of control segment */
298 size_t sc_cd_size; /* control data size */
299 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
300 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
301 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
302 #define sc_rxdescs sc_control_data->wcd_rxdescs
303
304 #ifdef WM_EVENT_COUNTERS
305 /* Event counters. */
306 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
307 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
308 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
309 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
310 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
311 struct evcnt sc_ev_rxintr; /* Rx interrupts */
312 struct evcnt sc_ev_linkintr; /* Link interrupts */
313
314 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
315 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
316 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
317 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
318 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
319 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
320 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
321 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
322
323 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
324 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
325
326 struct evcnt sc_ev_tu; /* Tx underrun */
327
328 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
329 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
330 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
331 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
332 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
333 #endif /* WM_EVENT_COUNTERS */
334
335 bus_addr_t sc_tdt_reg; /* offset of TDT register */
336
337 int sc_txfree; /* number of free Tx descriptors */
338 int sc_txnext; /* next ready Tx descriptor */
339
340 int sc_txsfree; /* number of free Tx jobs */
341 int sc_txsnext; /* next free Tx job */
342 int sc_txsdirty; /* dirty Tx jobs */
343
344 /* These 5 variables are used only on the 82547. */
345 int sc_txfifo_size; /* Tx FIFO size */
346 int sc_txfifo_head; /* current head of FIFO */
347 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
348 int sc_txfifo_stall; /* Tx FIFO is stalled */
349 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
350
351 bus_addr_t sc_rdt_reg; /* offset of RDT register */
352
353 int sc_rxptr; /* next ready Rx descriptor/queue ent */
354 int sc_rxdiscard;
355 int sc_rxlen;
356 struct mbuf *sc_rxhead;
357 struct mbuf *sc_rxtail;
358 struct mbuf **sc_rxtailp;
359
360 uint32_t sc_ctrl; /* prototype CTRL register */
361 #if 0
362 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
363 #endif
364 uint32_t sc_icr; /* prototype interrupt bits */
365 uint32_t sc_itr; /* prototype intr throttling reg */
366 uint32_t sc_tctl; /* prototype TCTL register */
367 uint32_t sc_rctl; /* prototype RCTL register */
368 uint32_t sc_txcw; /* prototype TXCW register */
369 uint32_t sc_tipg; /* prototype TIPG register */
370 uint32_t sc_fcrtl; /* prototype FCRTL register */
371 uint32_t sc_pba; /* prototype PBA register */
372
373 int sc_tbi_linkup; /* TBI link status */
374 int sc_tbi_anegticks; /* autonegotiation ticks */
375 int sc_tbi_ticks; /* tbi ticks */
376 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
377 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
378
379 int sc_mchash_type; /* multicast filter offset */
380
381 krndsource_t rnd_source; /* random source */
382 };
383
384 #define WM_RXCHAIN_RESET(sc) \
385 do { \
386 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
387 *(sc)->sc_rxtailp = NULL; \
388 (sc)->sc_rxlen = 0; \
389 } while (/*CONSTCOND*/0)
390
391 #define WM_RXCHAIN_LINK(sc, m) \
392 do { \
393 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
394 (sc)->sc_rxtailp = &(m)->m_next; \
395 } while (/*CONSTCOND*/0)
396
397 #ifdef WM_EVENT_COUNTERS
398 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
399 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
400 #else
401 #define WM_EVCNT_INCR(ev) /* nothing */
402 #define WM_EVCNT_ADD(ev, val) /* nothing */
403 #endif
404
405 #define CSR_READ(sc, reg) \
406 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
407 #define CSR_WRITE(sc, reg, val) \
408 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
409 #define CSR_WRITE_FLUSH(sc) \
410 (void) CSR_READ((sc), WMREG_STATUS)
411
412 #define ICH8_FLASH_READ32(sc, reg) \
413 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
414 #define ICH8_FLASH_WRITE32(sc, reg, data) \
415 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
416
417 #define ICH8_FLASH_READ16(sc, reg) \
418 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
419 #define ICH8_FLASH_WRITE16(sc, reg, data) \
420 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
421
422 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
423 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
424
425 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
426 #define WM_CDTXADDR_HI(sc, x) \
427 (sizeof(bus_addr_t) == 8 ? \
428 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
429
430 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
431 #define WM_CDRXADDR_HI(sc, x) \
432 (sizeof(bus_addr_t) == 8 ? \
433 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
434
435 #define WM_CDTXSYNC(sc, x, n, ops) \
436 do { \
437 int __x, __n; \
438 \
439 __x = (x); \
440 __n = (n); \
441 \
442 /* If it will wrap around, sync to the end of the ring. */ \
443 if ((__x + __n) > WM_NTXDESC(sc)) { \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
446 (WM_NTXDESC(sc) - __x), (ops)); \
447 __n -= (WM_NTXDESC(sc) - __x); \
448 __x = 0; \
449 } \
450 \
451 /* Now sync whatever is left. */ \
452 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
453 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
454 } while (/*CONSTCOND*/0)
455
456 #define WM_CDRXSYNC(sc, x, ops) \
457 do { \
458 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
459 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
460 } while (/*CONSTCOND*/0)
461
462 #define WM_INIT_RXDESC(sc, x) \
463 do { \
464 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
465 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
466 struct mbuf *__m = __rxs->rxs_mbuf; \
467 \
468 /* \
469 * Note: We scoot the packet forward 2 bytes in the buffer \
470 * so that the payload after the Ethernet header is aligned \
471 * to a 4-byte boundary. \
472 * \
473 * XXX BRAINDAMAGE ALERT! \
474 * The stupid chip uses the same size for every buffer, which \
475 * is set in the Receive Control register. We are using the 2K \
476 * size option, but what we REALLY want is (2K - 2)! For this \
477 * reason, we can't "scoot" packets longer than the standard \
478 * Ethernet MTU. On strict-alignment platforms, if the total \
479 * size exceeds (2K - 2) we set align_tweak to 0 and let \
480 * the upper layer copy the headers. \
481 */ \
482 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
483 \
484 wm_set_dma_addr(&__rxd->wrx_addr, \
485 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
486 __rxd->wrx_len = 0; \
487 __rxd->wrx_cksum = 0; \
488 __rxd->wrx_status = 0; \
489 __rxd->wrx_errors = 0; \
490 __rxd->wrx_special = 0; \
491 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
492 \
493 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
494 } while (/*CONSTCOND*/0)
495
496 static void wm_start(struct ifnet *);
497 static void wm_nq_start(struct ifnet *);
498 static void wm_watchdog(struct ifnet *);
499 static int wm_ifflags_cb(struct ethercom *);
500 static int wm_ioctl(struct ifnet *, u_long, void *);
501 static int wm_init(struct ifnet *);
502 static void wm_stop(struct ifnet *, int);
503 static bool wm_suspend(device_t, const pmf_qual_t *);
504 static bool wm_resume(device_t, const pmf_qual_t *);
505
506 static void wm_reset(struct wm_softc *);
507 static void wm_rxdrain(struct wm_softc *);
508 static int wm_add_rxbuf(struct wm_softc *, int);
509 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
510 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
511 static int wm_validate_eeprom_checksum(struct wm_softc *);
512 static int wm_check_alt_mac_addr(struct wm_softc *);
513 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
514 static void wm_tick(void *);
515
516 static void wm_set_filter(struct wm_softc *);
517 static void wm_set_vlan(struct wm_softc *);
518
519 static int wm_intr(void *);
520 static void wm_txintr(struct wm_softc *);
521 static void wm_rxintr(struct wm_softc *);
522 static void wm_linkintr(struct wm_softc *, uint32_t);
523
524 static void wm_tbi_mediainit(struct wm_softc *);
525 static int wm_tbi_mediachange(struct ifnet *);
526 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
527
528 static void wm_tbi_set_linkled(struct wm_softc *);
529 static void wm_tbi_check_link(struct wm_softc *);
530
531 static void wm_gmii_reset(struct wm_softc *);
532
533 static int wm_gmii_i82543_readreg(device_t, int, int);
534 static void wm_gmii_i82543_writereg(device_t, int, int, int);
535
536 static int wm_gmii_i82544_readreg(device_t, int, int);
537 static void wm_gmii_i82544_writereg(device_t, int, int, int);
538
539 static int wm_gmii_i80003_readreg(device_t, int, int);
540 static void wm_gmii_i80003_writereg(device_t, int, int, int);
541 static int wm_gmii_bm_readreg(device_t, int, int);
542 static void wm_gmii_bm_writereg(device_t, int, int, int);
543 static int wm_gmii_hv_readreg(device_t, int, int);
544 static void wm_gmii_hv_writereg(device_t, int, int, int);
545 static int wm_sgmii_readreg(device_t, int, int);
546 static void wm_sgmii_writereg(device_t, int, int, int);
547
548 static void wm_gmii_statchg(struct ifnet *);
549
550 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
551 static int wm_gmii_mediachange(struct ifnet *);
552 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
553
554 static int wm_kmrn_readreg(struct wm_softc *, int);
555 static void wm_kmrn_writereg(struct wm_softc *, int, int);
556
557 static void wm_set_spiaddrbits(struct wm_softc *);
558 static int wm_match(device_t, cfdata_t, void *);
559 static void wm_attach(device_t, device_t, void *);
560 static int wm_detach(device_t, int);
561 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
562 static void wm_get_auto_rd_done(struct wm_softc *);
563 static void wm_lan_init_done(struct wm_softc *);
564 static void wm_get_cfg_done(struct wm_softc *);
565 static int wm_get_swsm_semaphore(struct wm_softc *);
566 static void wm_put_swsm_semaphore(struct wm_softc *);
567 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
568 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
569 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
570 static int wm_get_swfwhw_semaphore(struct wm_softc *);
571 static void wm_put_swfwhw_semaphore(struct wm_softc *);
572
573 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
574 static int32_t wm_ich8_cycle_init(struct wm_softc *);
575 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
576 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
577 uint32_t, uint16_t *);
578 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
579 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
580 static void wm_82547_txfifo_stall(void *);
581 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
582 static int wm_check_mng_mode(struct wm_softc *);
583 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
584 static int wm_check_mng_mode_82574(struct wm_softc *);
585 static int wm_check_mng_mode_generic(struct wm_softc *);
586 static int wm_enable_mng_pass_thru(struct wm_softc *);
587 static int wm_check_reset_block(struct wm_softc *);
588 static void wm_get_hw_control(struct wm_softc *);
589 static int wm_check_for_link(struct wm_softc *);
590 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
591 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
592 #ifdef WM_WOL
593 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
594 #endif
595 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
596 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
597 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
598 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
599 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
600 static void wm_smbustopci(struct wm_softc *);
601 static void wm_set_pcie_completion_timeout(struct wm_softc *);
602 static void wm_reset_init_script_82575(struct wm_softc *);
603 static void wm_release_manageability(struct wm_softc *);
604 static void wm_release_hw_control(struct wm_softc *);
605 static void wm_get_wakeup(struct wm_softc *);
606 #ifdef WM_WOL
607 static void wm_enable_phy_wakeup(struct wm_softc *);
608 static void wm_enable_wakeup(struct wm_softc *);
609 #endif
610 static void wm_init_manageability(struct wm_softc *);
611 static void wm_set_eee_i350(struct wm_softc *);
612
613 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
614 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
615
616 /*
617 * Devices supported by this driver.
618 */
619 static const struct wm_product {
620 pci_vendor_id_t wmp_vendor;
621 pci_product_id_t wmp_product;
622 const char *wmp_name;
623 wm_chip_type wmp_type;
624 int wmp_flags;
625 #define WMP_F_1000X 0x01
626 #define WMP_F_1000T 0x02
627 #define WMP_F_SERDES 0x04
628 } wm_products[] = {
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
630 "Intel i82542 1000BASE-X Ethernet",
631 WM_T_82542_2_1, WMP_F_1000X },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
634 "Intel i82543GC 1000BASE-X Ethernet",
635 WM_T_82543, WMP_F_1000X },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
638 "Intel i82543GC 1000BASE-T Ethernet",
639 WM_T_82543, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
642 "Intel i82544EI 1000BASE-T Ethernet",
643 WM_T_82544, WMP_F_1000T },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
646 "Intel i82544EI 1000BASE-X Ethernet",
647 WM_T_82544, WMP_F_1000X },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
650 "Intel i82544GC 1000BASE-T Ethernet",
651 WM_T_82544, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
654 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
655 WM_T_82544, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
658 "Intel i82540EM 1000BASE-T Ethernet",
659 WM_T_82540, WMP_F_1000T },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
662 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
663 WM_T_82540, WMP_F_1000T },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
666 "Intel i82540EP 1000BASE-T Ethernet",
667 WM_T_82540, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
670 "Intel i82540EP 1000BASE-T Ethernet",
671 WM_T_82540, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
674 "Intel i82540EP 1000BASE-T Ethernet",
675 WM_T_82540, WMP_F_1000T },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
678 "Intel i82545EM 1000BASE-T Ethernet",
679 WM_T_82545, WMP_F_1000T },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
682 "Intel i82545GM 1000BASE-T Ethernet",
683 WM_T_82545_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
686 "Intel i82545GM 1000BASE-X Ethernet",
687 WM_T_82545_3, WMP_F_1000X },
688 #if 0
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
690 "Intel i82545GM Gigabit Ethernet (SERDES)",
691 WM_T_82545_3, WMP_F_SERDES },
692 #endif
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
694 "Intel i82546EB 1000BASE-T Ethernet",
695 WM_T_82546, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
698 "Intel i82546EB 1000BASE-T Ethernet",
699 WM_T_82546, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
702 "Intel i82545EM 1000BASE-X Ethernet",
703 WM_T_82545, WMP_F_1000X },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
706 "Intel i82546EB 1000BASE-X Ethernet",
707 WM_T_82546, WMP_F_1000X },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
710 "Intel i82546GB 1000BASE-T Ethernet",
711 WM_T_82546_3, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
714 "Intel i82546GB 1000BASE-X Ethernet",
715 WM_T_82546_3, WMP_F_1000X },
716 #if 0
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
718 "Intel i82546GB Gigabit Ethernet (SERDES)",
719 WM_T_82546_3, WMP_F_SERDES },
720 #endif
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
722 "i82546GB quad-port Gigabit Ethernet",
723 WM_T_82546_3, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
726 "i82546GB quad-port Gigabit Ethernet (KSP3)",
727 WM_T_82546_3, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
730 "Intel PRO/1000MT (82546GB)",
731 WM_T_82546_3, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
734 "Intel i82541EI 1000BASE-T Ethernet",
735 WM_T_82541, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
738 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
739 WM_T_82541, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
742 "Intel i82541EI Mobile 1000BASE-T Ethernet",
743 WM_T_82541, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
746 "Intel i82541ER 1000BASE-T Ethernet",
747 WM_T_82541_2, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
750 "Intel i82541GI 1000BASE-T Ethernet",
751 WM_T_82541_2, WMP_F_1000T },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
754 "Intel i82541GI Mobile 1000BASE-T Ethernet",
755 WM_T_82541_2, WMP_F_1000T },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
758 "Intel i82541PI 1000BASE-T Ethernet",
759 WM_T_82541_2, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
762 "Intel i82547EI 1000BASE-T Ethernet",
763 WM_T_82547, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
766 "Intel i82547EI Mobile 1000BASE-T Ethernet",
767 WM_T_82547, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
770 "Intel i82547GI 1000BASE-T Ethernet",
771 WM_T_82547_2, WMP_F_1000T },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
774 "Intel PRO/1000 PT (82571EB)",
775 WM_T_82571, WMP_F_1000T },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
778 "Intel PRO/1000 PF (82571EB)",
779 WM_T_82571, WMP_F_1000X },
780 #if 0
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
782 "Intel PRO/1000 PB (82571EB)",
783 WM_T_82571, WMP_F_SERDES },
784 #endif
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
786 "Intel PRO/1000 QT (82571EB)",
787 WM_T_82571, WMP_F_1000T },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
790 "Intel i82572EI 1000baseT Ethernet",
791 WM_T_82572, WMP_F_1000T },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
794 "Intel PRO/1000 PT Quad Port Server Adapter",
795 WM_T_82571, WMP_F_1000T, },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
798 "Intel i82572EI 1000baseX Ethernet",
799 WM_T_82572, WMP_F_1000X },
800 #if 0
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
802 "Intel i82572EI Gigabit Ethernet (SERDES)",
803 WM_T_82572, WMP_F_SERDES },
804 #endif
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
807 "Intel i82572EI 1000baseT Ethernet",
808 WM_T_82572, WMP_F_1000T },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
811 "Intel i82573E",
812 WM_T_82573, WMP_F_1000T },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
815 "Intel i82573E IAMT",
816 WM_T_82573, WMP_F_1000T },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
819 "Intel i82573L Gigabit Ethernet",
820 WM_T_82573, WMP_F_1000T },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
823 "Intel i82574L",
824 WM_T_82574, WMP_F_1000T },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
827 "Intel i82583V",
828 WM_T_82583, WMP_F_1000T },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
831 "i80003 dual 1000baseT Ethernet",
832 WM_T_80003, WMP_F_1000T },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
835 "i80003 dual 1000baseX Ethernet",
836 WM_T_80003, WMP_F_1000T },
837 #if 0
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
839 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
840 WM_T_80003, WMP_F_SERDES },
841 #endif
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
844 "Intel i80003 1000baseT Ethernet",
845 WM_T_80003, WMP_F_1000T },
846 #if 0
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
848 "Intel i80003 Gigabit Ethernet (SERDES)",
849 WM_T_80003, WMP_F_SERDES },
850 #endif
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
852 "Intel i82801H (M_AMT) LAN Controller",
853 WM_T_ICH8, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
855 "Intel i82801H (AMT) LAN Controller",
856 WM_T_ICH8, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
858 "Intel i82801H LAN Controller",
859 WM_T_ICH8, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
861 "Intel i82801H (IFE) LAN Controller",
862 WM_T_ICH8, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
864 "Intel i82801H (M) LAN Controller",
865 WM_T_ICH8, WMP_F_1000T },
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
867 "Intel i82801H IFE (GT) LAN Controller",
868 WM_T_ICH8, WMP_F_1000T },
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
870 "Intel i82801H IFE (G) LAN Controller",
871 WM_T_ICH8, WMP_F_1000T },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
873 "82801I (AMT) LAN Controller",
874 WM_T_ICH9, WMP_F_1000T },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
876 "82801I LAN Controller",
877 WM_T_ICH9, WMP_F_1000T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
879 "82801I (G) LAN Controller",
880 WM_T_ICH9, WMP_F_1000T },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
882 "82801I (GT) LAN Controller",
883 WM_T_ICH9, WMP_F_1000T },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
885 "82801I (C) LAN Controller",
886 WM_T_ICH9, WMP_F_1000T },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
888 "82801I mobile LAN Controller",
889 WM_T_ICH9, WMP_F_1000T },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
891 "82801I mobile (V) LAN Controller",
892 WM_T_ICH9, WMP_F_1000T },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
894 "82801I mobile (AMT) LAN Controller",
895 WM_T_ICH9, WMP_F_1000T },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
897 "82567LM-4 LAN Controller",
898 WM_T_ICH9, WMP_F_1000T },
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
900 "82567V-3 LAN Controller",
901 WM_T_ICH9, WMP_F_1000T },
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
903 "82567LM-2 LAN Controller",
904 WM_T_ICH10, WMP_F_1000T },
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
906 "82567LF-2 LAN Controller",
907 WM_T_ICH10, WMP_F_1000T },
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
909 "82567LM-3 LAN Controller",
910 WM_T_ICH10, WMP_F_1000T },
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
912 "82567LF-3 LAN Controller",
913 WM_T_ICH10, WMP_F_1000T },
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
915 "82567V-2 LAN Controller",
916 WM_T_ICH10, WMP_F_1000T },
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
918 "82567V-3? LAN Controller",
919 WM_T_ICH10, WMP_F_1000T },
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
921 "HANKSVILLE LAN Controller",
922 WM_T_ICH10, WMP_F_1000T },
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
924 "PCH LAN (82577LM) Controller",
925 WM_T_PCH, WMP_F_1000T },
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
927 "PCH LAN (82577LC) Controller",
928 WM_T_PCH, WMP_F_1000T },
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
930 "PCH LAN (82578DM) Controller",
931 WM_T_PCH, WMP_F_1000T },
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
933 "PCH LAN (82578DC) Controller",
934 WM_T_PCH2, WMP_F_1000T },
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
936 "PCH2 LAN (82579LM) Controller",
937 WM_T_PCH2, WMP_F_1000T },
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
939 "PCH2 LAN (82579V) Controller",
940 WM_T_PCH, WMP_F_1000T },
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
942 "82575EB dual-1000baseT Ethernet",
943 WM_T_82575, WMP_F_1000T },
944 #if 0
945 /*
946 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
947 * disabled for now ...
948 */
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
950 "82575EB dual-1000baseX Ethernet (SERDES)",
951 WM_T_82575, WMP_F_SERDES },
952 #endif
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
954 "82575GB quad-1000baseT Ethernet",
955 WM_T_82575, WMP_F_1000T },
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
957 "82575GB quad-1000baseT Ethernet (PM)",
958 WM_T_82575, WMP_F_1000T },
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
960 "82576 1000BaseT Ethernet",
961 WM_T_82576, WMP_F_1000T },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
963 "82576 1000BaseX Ethernet",
964 WM_T_82576, WMP_F_1000X },
965 #if 0
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
967 "82576 gigabit Ethernet (SERDES)",
968 WM_T_82576, WMP_F_SERDES },
969 #endif
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
971 "82576 quad-1000BaseT Ethernet",
972 WM_T_82576, WMP_F_1000T },
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
974 "82576 gigabit Ethernet",
975 WM_T_82576, WMP_F_1000T },
976 #if 0
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
978 "82576 gigabit Ethernet (SERDES)",
979 WM_T_82576, WMP_F_SERDES },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
981 "82576 quad-gigabit Ethernet (SERDES)",
982 WM_T_82576, WMP_F_SERDES },
983 #endif
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
985 "82580 1000BaseT Ethernet",
986 WM_T_82580, WMP_F_1000T },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
988 "82580 1000BaseX Ethernet",
989 WM_T_82580, WMP_F_1000X },
990 #if 0
991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
992 "82580 1000BaseT Ethernet (SERDES)",
993 WM_T_82580, WMP_F_SERDES },
994 #endif
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
996 "82580 gigabit Ethernet (SGMII)",
997 WM_T_82580, WMP_F_1000T },
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
999 "82580 dual-1000BaseT Ethernet",
1000 WM_T_82580, WMP_F_1000T },
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1002 "82580 1000BaseT Ethernet",
1003 WM_T_82580ER, WMP_F_1000T },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1005 "82580 dual-1000BaseT Ethernet",
1006 WM_T_82580ER, WMP_F_1000T },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1008 "82580 quad-1000BaseX Ethernet",
1009 WM_T_82580, WMP_F_1000X },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1011 "I350 Gigabit Network Connection",
1012 WM_T_I350, WMP_F_1000T },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1014 "I350 Gigabit Fiber Network Connection",
1015 WM_T_I350, WMP_F_1000X },
1016 #if 0
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1018 "I350 Gigabit Backplane Connection",
1019 WM_T_I350, WMP_F_SERDES },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1021 "I350 Gigabit Connection",
1022 WM_T_I350, WMP_F_1000T },
1023 #endif
1024 { 0, 0,
1025 NULL,
1026 0, 0 },
1027 };
1028
1029 #ifdef WM_EVENT_COUNTERS
1030 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1031 #endif /* WM_EVENT_COUNTERS */
1032
1033 #if 0 /* Not currently used */
1034 static inline uint32_t
1035 wm_io_read(struct wm_softc *sc, int reg)
1036 {
1037
1038 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1039 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1040 }
1041 #endif
1042
1043 static inline void
1044 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1045 {
1046
1047 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1048 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1049 }
1050
1051 static inline void
1052 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1053 uint32_t data)
1054 {
1055 uint32_t regval;
1056 int i;
1057
1058 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1059
1060 CSR_WRITE(sc, reg, regval);
1061
1062 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1063 delay(5);
1064 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1065 break;
1066 }
1067 if (i == SCTL_CTL_POLL_TIMEOUT) {
1068 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1069 device_xname(sc->sc_dev), reg);
1070 }
1071 }
1072
1073 static inline void
1074 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1075 {
1076 wa->wa_low = htole32(v & 0xffffffffU);
1077 if (sizeof(bus_addr_t) == 8)
1078 wa->wa_high = htole32((uint64_t) v >> 32);
1079 else
1080 wa->wa_high = 0;
1081 }
1082
1083 static void
1084 wm_set_spiaddrbits(struct wm_softc *sc)
1085 {
1086 uint32_t reg;
1087
1088 sc->sc_flags |= WM_F_EEPROM_SPI;
1089 reg = CSR_READ(sc, WMREG_EECD);
1090 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1091 }
1092
1093 static const struct wm_product *
1094 wm_lookup(const struct pci_attach_args *pa)
1095 {
1096 const struct wm_product *wmp;
1097
1098 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1099 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1100 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1101 return wmp;
1102 }
1103 return NULL;
1104 }
1105
1106 static int
1107 wm_match(device_t parent, cfdata_t cf, void *aux)
1108 {
1109 struct pci_attach_args *pa = aux;
1110
1111 if (wm_lookup(pa) != NULL)
1112 return 1;
1113
1114 return 0;
1115 }
1116
1117 static void
1118 wm_attach(device_t parent, device_t self, void *aux)
1119 {
1120 struct wm_softc *sc = device_private(self);
1121 struct pci_attach_args *pa = aux;
1122 prop_dictionary_t dict;
1123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1124 pci_chipset_tag_t pc = pa->pa_pc;
1125 pci_intr_handle_t ih;
1126 const char *intrstr = NULL;
1127 const char *eetype, *xname;
1128 bus_space_tag_t memt;
1129 bus_space_handle_t memh;
1130 bus_size_t memsize;
1131 int memh_valid;
1132 int i, error;
1133 const struct wm_product *wmp;
1134 prop_data_t ea;
1135 prop_number_t pn;
1136 uint8_t enaddr[ETHER_ADDR_LEN];
1137 uint16_t cfg1, cfg2, swdpin, io3;
1138 pcireg_t preg, memtype;
1139 uint16_t eeprom_data, apme_mask;
1140 uint32_t reg;
1141
1142 sc->sc_dev = self;
1143 callout_init(&sc->sc_tick_ch, 0);
1144
1145 sc->sc_wmp = wmp = wm_lookup(pa);
1146 if (wmp == NULL) {
1147 printf("\n");
1148 panic("wm_attach: impossible");
1149 }
1150
1151 sc->sc_pc = pa->pa_pc;
1152 sc->sc_pcitag = pa->pa_tag;
1153
1154 if (pci_dma64_available(pa))
1155 sc->sc_dmat = pa->pa_dmat64;
1156 else
1157 sc->sc_dmat = pa->pa_dmat;
1158
1159 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1160 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1161
1162 sc->sc_type = wmp->wmp_type;
1163 if (sc->sc_type < WM_T_82543) {
1164 if (sc->sc_rev < 2) {
1165 aprint_error_dev(sc->sc_dev,
1166 "i82542 must be at least rev. 2\n");
1167 return;
1168 }
1169 if (sc->sc_rev < 3)
1170 sc->sc_type = WM_T_82542_2_0;
1171 }
1172
1173 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1174 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1175 || (sc->sc_type == WM_T_I350))
1176 sc->sc_flags |= WM_F_NEWQUEUE;
1177
1178 /* Set device properties (mactype) */
1179 dict = device_properties(sc->sc_dev);
1180 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1181
1182 /*
1183 * Map the device. All devices support memory-mapped acccess,
1184 * and it is really required for normal operation.
1185 */
1186 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1187 switch (memtype) {
1188 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1189 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1190 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1191 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1192 break;
1193 default:
1194 memh_valid = 0;
1195 break;
1196 }
1197
1198 if (memh_valid) {
1199 sc->sc_st = memt;
1200 sc->sc_sh = memh;
1201 sc->sc_ss = memsize;
1202 } else {
1203 aprint_error_dev(sc->sc_dev,
1204 "unable to map device registers\n");
1205 return;
1206 }
1207
1208 wm_get_wakeup(sc);
1209
1210 /*
1211 * In addition, i82544 and later support I/O mapped indirect
1212 * register access. It is not desirable (nor supported in
1213 * this driver) to use it for normal operation, though it is
1214 * required to work around bugs in some chip versions.
1215 */
1216 if (sc->sc_type >= WM_T_82544) {
1217 /* First we have to find the I/O BAR. */
1218 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1219 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1220 PCI_MAPREG_TYPE_IO)
1221 break;
1222 }
1223 if (i != PCI_MAPREG_END) {
1224 /*
1225 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1226 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1227 * It's no problem because newer chips has no this
1228 * bug.
1229 *
1230 * The i8254x doesn't apparently respond when the
1231 * I/O BAR is 0, which looks somewhat like it's not
1232 * been configured.
1233 */
1234 preg = pci_conf_read(pc, pa->pa_tag, i);
1235 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1236 aprint_error_dev(sc->sc_dev,
1237 "WARNING: I/O BAR at zero.\n");
1238 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1239 0, &sc->sc_iot, &sc->sc_ioh,
1240 NULL, &sc->sc_ios) == 0) {
1241 sc->sc_flags |= WM_F_IOH_VALID;
1242 } else {
1243 aprint_error_dev(sc->sc_dev,
1244 "WARNING: unable to map I/O space\n");
1245 }
1246 }
1247
1248 }
1249
1250 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1251 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1252 preg |= PCI_COMMAND_MASTER_ENABLE;
1253 if (sc->sc_type < WM_T_82542_2_1)
1254 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1255 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1256
1257 /* power up chip */
1258 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1259 NULL)) && error != EOPNOTSUPP) {
1260 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1261 return;
1262 }
1263
1264 /*
1265 * Map and establish our interrupt.
1266 */
1267 if (pci_intr_map(pa, &ih)) {
1268 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1269 return;
1270 }
1271 intrstr = pci_intr_string(pc, ih);
1272 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1273 if (sc->sc_ih == NULL) {
1274 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1275 if (intrstr != NULL)
1276 aprint_error(" at %s", intrstr);
1277 aprint_error("\n");
1278 return;
1279 }
1280 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1281
1282 /*
1283 * Check the function ID (unit number of the chip).
1284 */
1285 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1286 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1287 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1288 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1289 || (sc->sc_type == WM_T_I350))
1290 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1291 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1292 else
1293 sc->sc_funcid = 0;
1294
1295 /*
1296 * Determine a few things about the bus we're connected to.
1297 */
1298 if (sc->sc_type < WM_T_82543) {
1299 /* We don't really know the bus characteristics here. */
1300 sc->sc_bus_speed = 33;
1301 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1302 /*
1303 * CSA (Communication Streaming Architecture) is about as fast
1304 * a 32-bit 66MHz PCI Bus.
1305 */
1306 sc->sc_flags |= WM_F_CSA;
1307 sc->sc_bus_speed = 66;
1308 aprint_verbose_dev(sc->sc_dev,
1309 "Communication Streaming Architecture\n");
1310 if (sc->sc_type == WM_T_82547) {
1311 callout_init(&sc->sc_txfifo_ch, 0);
1312 callout_setfunc(&sc->sc_txfifo_ch,
1313 wm_82547_txfifo_stall, sc);
1314 aprint_verbose_dev(sc->sc_dev,
1315 "using 82547 Tx FIFO stall work-around\n");
1316 }
1317 } else if (sc->sc_type >= WM_T_82571) {
1318 sc->sc_flags |= WM_F_PCIE;
1319 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1320 && (sc->sc_type != WM_T_ICH10)
1321 && (sc->sc_type != WM_T_PCH)
1322 && (sc->sc_type != WM_T_PCH2)) {
1323 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1324 /* ICH* and PCH* have no PCIe capability registers */
1325 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1326 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1327 NULL) == 0)
1328 aprint_error_dev(sc->sc_dev,
1329 "unable to find PCIe capability\n");
1330 }
1331 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1332 } else {
1333 reg = CSR_READ(sc, WMREG_STATUS);
1334 if (reg & STATUS_BUS64)
1335 sc->sc_flags |= WM_F_BUS64;
1336 if ((reg & STATUS_PCIX_MODE) != 0) {
1337 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1338
1339 sc->sc_flags |= WM_F_PCIX;
1340 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1341 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1342 aprint_error_dev(sc->sc_dev,
1343 "unable to find PCIX capability\n");
1344 else if (sc->sc_type != WM_T_82545_3 &&
1345 sc->sc_type != WM_T_82546_3) {
1346 /*
1347 * Work around a problem caused by the BIOS
1348 * setting the max memory read byte count
1349 * incorrectly.
1350 */
1351 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1352 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1353 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1354 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1355
1356 bytecnt =
1357 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1358 PCI_PCIX_CMD_BYTECNT_SHIFT;
1359 maxb =
1360 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1361 PCI_PCIX_STATUS_MAXB_SHIFT;
1362 if (bytecnt > maxb) {
1363 aprint_verbose_dev(sc->sc_dev,
1364 "resetting PCI-X MMRBC: %d -> %d\n",
1365 512 << bytecnt, 512 << maxb);
1366 pcix_cmd = (pcix_cmd &
1367 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1368 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1369 pci_conf_write(pa->pa_pc, pa->pa_tag,
1370 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1371 pcix_cmd);
1372 }
1373 }
1374 }
1375 /*
1376 * The quad port adapter is special; it has a PCIX-PCIX
1377 * bridge on the board, and can run the secondary bus at
1378 * a higher speed.
1379 */
1380 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1381 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1382 : 66;
1383 } else if (sc->sc_flags & WM_F_PCIX) {
1384 switch (reg & STATUS_PCIXSPD_MASK) {
1385 case STATUS_PCIXSPD_50_66:
1386 sc->sc_bus_speed = 66;
1387 break;
1388 case STATUS_PCIXSPD_66_100:
1389 sc->sc_bus_speed = 100;
1390 break;
1391 case STATUS_PCIXSPD_100_133:
1392 sc->sc_bus_speed = 133;
1393 break;
1394 default:
1395 aprint_error_dev(sc->sc_dev,
1396 "unknown PCIXSPD %d; assuming 66MHz\n",
1397 reg & STATUS_PCIXSPD_MASK);
1398 sc->sc_bus_speed = 66;
1399 break;
1400 }
1401 } else
1402 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1403 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1404 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1405 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1406 }
1407
1408 /*
1409 * Allocate the control data structures, and create and load the
1410 * DMA map for it.
1411 *
1412 * NOTE: All Tx descriptors must be in the same 4G segment of
1413 * memory. So must Rx descriptors. We simplify by allocating
1414 * both sets within the same 4G segment.
1415 */
1416 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1417 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1418 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1419 sizeof(struct wm_control_data_82542) :
1420 sizeof(struct wm_control_data_82544);
1421 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1422 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1423 &sc->sc_cd_rseg, 0)) != 0) {
1424 aprint_error_dev(sc->sc_dev,
1425 "unable to allocate control data, error = %d\n",
1426 error);
1427 goto fail_0;
1428 }
1429
1430 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1431 sc->sc_cd_rseg, sc->sc_cd_size,
1432 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1433 aprint_error_dev(sc->sc_dev,
1434 "unable to map control data, error = %d\n", error);
1435 goto fail_1;
1436 }
1437
1438 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1439 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1440 aprint_error_dev(sc->sc_dev,
1441 "unable to create control data DMA map, error = %d\n",
1442 error);
1443 goto fail_2;
1444 }
1445
1446 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1447 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1448 aprint_error_dev(sc->sc_dev,
1449 "unable to load control data DMA map, error = %d\n",
1450 error);
1451 goto fail_3;
1452 }
1453
1454 /*
1455 * Create the transmit buffer DMA maps.
1456 */
1457 WM_TXQUEUELEN(sc) =
1458 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1459 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1460 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1461 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1462 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1463 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1464 aprint_error_dev(sc->sc_dev,
1465 "unable to create Tx DMA map %d, error = %d\n",
1466 i, error);
1467 goto fail_4;
1468 }
1469 }
1470
1471 /*
1472 * Create the receive buffer DMA maps.
1473 */
1474 for (i = 0; i < WM_NRXDESC; i++) {
1475 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1476 MCLBYTES, 0, 0,
1477 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1478 aprint_error_dev(sc->sc_dev,
1479 "unable to create Rx DMA map %d error = %d\n",
1480 i, error);
1481 goto fail_5;
1482 }
1483 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1484 }
1485
1486 /* clear interesting stat counters */
1487 CSR_READ(sc, WMREG_COLC);
1488 CSR_READ(sc, WMREG_RXERRC);
1489
1490 /* get PHY control from SMBus to PCIe */
1491 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1492 wm_smbustopci(sc);
1493
1494 /*
1495 * Reset the chip to a known state.
1496 */
1497 wm_reset(sc);
1498
1499 switch (sc->sc_type) {
1500 case WM_T_82571:
1501 case WM_T_82572:
1502 case WM_T_82573:
1503 case WM_T_82574:
1504 case WM_T_82583:
1505 case WM_T_80003:
1506 case WM_T_ICH8:
1507 case WM_T_ICH9:
1508 case WM_T_ICH10:
1509 case WM_T_PCH:
1510 case WM_T_PCH2:
1511 if (wm_check_mng_mode(sc) != 0)
1512 wm_get_hw_control(sc);
1513 break;
1514 default:
1515 break;
1516 }
1517
1518 /*
1519 * Get some information about the EEPROM.
1520 */
1521 switch (sc->sc_type) {
1522 case WM_T_82542_2_0:
1523 case WM_T_82542_2_1:
1524 case WM_T_82543:
1525 case WM_T_82544:
1526 /* Microwire */
1527 sc->sc_ee_addrbits = 6;
1528 break;
1529 case WM_T_82540:
1530 case WM_T_82545:
1531 case WM_T_82545_3:
1532 case WM_T_82546:
1533 case WM_T_82546_3:
1534 /* Microwire */
1535 reg = CSR_READ(sc, WMREG_EECD);
1536 if (reg & EECD_EE_SIZE)
1537 sc->sc_ee_addrbits = 8;
1538 else
1539 sc->sc_ee_addrbits = 6;
1540 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1541 break;
1542 case WM_T_82541:
1543 case WM_T_82541_2:
1544 case WM_T_82547:
1545 case WM_T_82547_2:
1546 reg = CSR_READ(sc, WMREG_EECD);
1547 if (reg & EECD_EE_TYPE) {
1548 /* SPI */
1549 wm_set_spiaddrbits(sc);
1550 } else
1551 /* Microwire */
1552 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1553 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1554 break;
1555 case WM_T_82571:
1556 case WM_T_82572:
1557 /* SPI */
1558 wm_set_spiaddrbits(sc);
1559 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1560 break;
1561 case WM_T_82573:
1562 case WM_T_82574:
1563 case WM_T_82583:
1564 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1565 sc->sc_flags |= WM_F_EEPROM_FLASH;
1566 else {
1567 /* SPI */
1568 wm_set_spiaddrbits(sc);
1569 }
1570 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1571 break;
1572 case WM_T_82575:
1573 case WM_T_82576:
1574 case WM_T_82580:
1575 case WM_T_82580ER:
1576 case WM_T_I350:
1577 case WM_T_80003:
1578 /* SPI */
1579 wm_set_spiaddrbits(sc);
1580 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1581 break;
1582 case WM_T_ICH8:
1583 case WM_T_ICH9:
1584 case WM_T_ICH10:
1585 case WM_T_PCH:
1586 case WM_T_PCH2:
1587 /* FLASH */
1588 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1589 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1590 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1591 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1592 aprint_error_dev(sc->sc_dev,
1593 "can't map FLASH registers\n");
1594 return;
1595 }
1596 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1597 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1598 ICH_FLASH_SECTOR_SIZE;
1599 sc->sc_ich8_flash_bank_size =
1600 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1601 sc->sc_ich8_flash_bank_size -=
1602 (reg & ICH_GFPREG_BASE_MASK);
1603 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1604 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1605 break;
1606 default:
1607 break;
1608 }
1609
1610 /*
1611 * Defer printing the EEPROM type until after verifying the checksum
1612 * This allows the EEPROM type to be printed correctly in the case
1613 * that no EEPROM is attached.
1614 */
1615 /*
1616 * Validate the EEPROM checksum. If the checksum fails, flag
1617 * this for later, so we can fail future reads from the EEPROM.
1618 */
1619 if (wm_validate_eeprom_checksum(sc)) {
1620 /*
1621 * Read twice again because some PCI-e parts fail the
1622 * first check due to the link being in sleep state.
1623 */
1624 if (wm_validate_eeprom_checksum(sc))
1625 sc->sc_flags |= WM_F_EEPROM_INVALID;
1626 }
1627
1628 /* Set device properties (macflags) */
1629 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1630
1631 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1632 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1633 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1634 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1635 } else {
1636 if (sc->sc_flags & WM_F_EEPROM_SPI)
1637 eetype = "SPI";
1638 else
1639 eetype = "MicroWire";
1640 aprint_verbose_dev(sc->sc_dev,
1641 "%u word (%d address bits) %s EEPROM\n",
1642 1U << sc->sc_ee_addrbits,
1643 sc->sc_ee_addrbits, eetype);
1644 }
1645
1646 /*
1647 * Read the Ethernet address from the EEPROM, if not first found
1648 * in device properties.
1649 */
1650 ea = prop_dictionary_get(dict, "mac-address");
1651 if (ea != NULL) {
1652 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1653 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1654 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1655 } else {
1656 if (wm_read_mac_addr(sc, enaddr) != 0) {
1657 aprint_error_dev(sc->sc_dev,
1658 "unable to read Ethernet address\n");
1659 return;
1660 }
1661 }
1662
1663 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1664 ether_sprintf(enaddr));
1665
1666 /*
1667 * Read the config info from the EEPROM, and set up various
1668 * bits in the control registers based on their contents.
1669 */
1670 pn = prop_dictionary_get(dict, "i82543-cfg1");
1671 if (pn != NULL) {
1672 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1673 cfg1 = (uint16_t) prop_number_integer_value(pn);
1674 } else {
1675 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1676 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1677 return;
1678 }
1679 }
1680
1681 pn = prop_dictionary_get(dict, "i82543-cfg2");
1682 if (pn != NULL) {
1683 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1684 cfg2 = (uint16_t) prop_number_integer_value(pn);
1685 } else {
1686 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1687 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1688 return;
1689 }
1690 }
1691
1692 /* check for WM_F_WOL */
1693 switch (sc->sc_type) {
1694 case WM_T_82542_2_0:
1695 case WM_T_82542_2_1:
1696 case WM_T_82543:
1697 /* dummy? */
1698 eeprom_data = 0;
1699 apme_mask = EEPROM_CFG3_APME;
1700 break;
1701 case WM_T_82544:
1702 apme_mask = EEPROM_CFG2_82544_APM_EN;
1703 eeprom_data = cfg2;
1704 break;
1705 case WM_T_82546:
1706 case WM_T_82546_3:
1707 case WM_T_82571:
1708 case WM_T_82572:
1709 case WM_T_82573:
1710 case WM_T_82574:
1711 case WM_T_82583:
1712 case WM_T_80003:
1713 default:
1714 apme_mask = EEPROM_CFG3_APME;
1715 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1716 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1717 break;
1718 case WM_T_82575:
1719 case WM_T_82576:
1720 case WM_T_82580:
1721 case WM_T_82580ER:
1722 case WM_T_I350:
1723 case WM_T_ICH8:
1724 case WM_T_ICH9:
1725 case WM_T_ICH10:
1726 case WM_T_PCH:
1727 case WM_T_PCH2:
1728 /* XXX The funcid should be checked on some devices */
1729 apme_mask = WUC_APME;
1730 eeprom_data = CSR_READ(sc, WMREG_WUC);
1731 break;
1732 }
1733
1734 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1735 if ((eeprom_data & apme_mask) != 0)
1736 sc->sc_flags |= WM_F_WOL;
1737 #ifdef WM_DEBUG
1738 if ((sc->sc_flags & WM_F_WOL) != 0)
1739 printf("WOL\n");
1740 #endif
1741
1742 /*
1743 * XXX need special handling for some multiple port cards
1744 * to disable a paticular port.
1745 */
1746
1747 if (sc->sc_type >= WM_T_82544) {
1748 pn = prop_dictionary_get(dict, "i82543-swdpin");
1749 if (pn != NULL) {
1750 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1751 swdpin = (uint16_t) prop_number_integer_value(pn);
1752 } else {
1753 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1754 aprint_error_dev(sc->sc_dev,
1755 "unable to read SWDPIN\n");
1756 return;
1757 }
1758 }
1759 }
1760
1761 if (cfg1 & EEPROM_CFG1_ILOS)
1762 sc->sc_ctrl |= CTRL_ILOS;
1763 if (sc->sc_type >= WM_T_82544) {
1764 sc->sc_ctrl |=
1765 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1766 CTRL_SWDPIO_SHIFT;
1767 sc->sc_ctrl |=
1768 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1769 CTRL_SWDPINS_SHIFT;
1770 } else {
1771 sc->sc_ctrl |=
1772 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1773 CTRL_SWDPIO_SHIFT;
1774 }
1775
1776 #if 0
1777 if (sc->sc_type >= WM_T_82544) {
1778 if (cfg1 & EEPROM_CFG1_IPS0)
1779 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1780 if (cfg1 & EEPROM_CFG1_IPS1)
1781 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1782 sc->sc_ctrl_ext |=
1783 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1784 CTRL_EXT_SWDPIO_SHIFT;
1785 sc->sc_ctrl_ext |=
1786 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1787 CTRL_EXT_SWDPINS_SHIFT;
1788 } else {
1789 sc->sc_ctrl_ext |=
1790 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1791 CTRL_EXT_SWDPIO_SHIFT;
1792 }
1793 #endif
1794
1795 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1796 #if 0
1797 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1798 #endif
1799
1800 /*
1801 * Set up some register offsets that are different between
1802 * the i82542 and the i82543 and later chips.
1803 */
1804 if (sc->sc_type < WM_T_82543) {
1805 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1806 sc->sc_tdt_reg = WMREG_OLD_TDT;
1807 } else {
1808 sc->sc_rdt_reg = WMREG_RDT;
1809 sc->sc_tdt_reg = WMREG_TDT;
1810 }
1811
1812 if (sc->sc_type == WM_T_PCH) {
1813 uint16_t val;
1814
1815 /* Save the NVM K1 bit setting */
1816 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1817
1818 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1819 sc->sc_nvm_k1_enabled = 1;
1820 else
1821 sc->sc_nvm_k1_enabled = 0;
1822 }
1823
1824 /*
1825 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1826 * media structures accordingly.
1827 */
1828 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1829 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1830 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1831 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1832 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1833 wm_gmii_mediainit(sc, wmp->wmp_product);
1834 } else if (sc->sc_type < WM_T_82543 ||
1835 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1836 if (wmp->wmp_flags & WMP_F_1000T)
1837 aprint_error_dev(sc->sc_dev,
1838 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1839 wm_tbi_mediainit(sc);
1840 } else {
1841 switch (sc->sc_type) {
1842 case WM_T_82575:
1843 case WM_T_82576:
1844 case WM_T_82580:
1845 case WM_T_82580ER:
1846 case WM_T_I350:
1847 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1848 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1849 case CTRL_EXT_LINK_MODE_SGMII:
1850 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1851 sc->sc_flags |= WM_F_SGMII;
1852 CSR_WRITE(sc, WMREG_CTRL_EXT,
1853 reg | CTRL_EXT_I2C_ENA);
1854 wm_gmii_mediainit(sc, wmp->wmp_product);
1855 break;
1856 case CTRL_EXT_LINK_MODE_1000KX:
1857 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1858 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1859 CSR_WRITE(sc, WMREG_CTRL_EXT,
1860 reg | CTRL_EXT_I2C_ENA);
1861 panic("not supported yet\n");
1862 break;
1863 case CTRL_EXT_LINK_MODE_GMII:
1864 default:
1865 CSR_WRITE(sc, WMREG_CTRL_EXT,
1866 reg & ~CTRL_EXT_I2C_ENA);
1867 wm_gmii_mediainit(sc, wmp->wmp_product);
1868 break;
1869 }
1870 break;
1871 default:
1872 if (wmp->wmp_flags & WMP_F_1000X)
1873 aprint_error_dev(sc->sc_dev,
1874 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1875 wm_gmii_mediainit(sc, wmp->wmp_product);
1876 }
1877 }
1878
1879 ifp = &sc->sc_ethercom.ec_if;
1880 xname = device_xname(sc->sc_dev);
1881 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1882 ifp->if_softc = sc;
1883 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1884 ifp->if_ioctl = wm_ioctl;
1885 if (sc->sc_type == WM_T_I350)
1886 ifp->if_start = wm_nq_start;
1887 else
1888 ifp->if_start = wm_start;
1889 ifp->if_watchdog = wm_watchdog;
1890 ifp->if_init = wm_init;
1891 ifp->if_stop = wm_stop;
1892 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1893 IFQ_SET_READY(&ifp->if_snd);
1894
1895 /* Check for jumbo frame */
1896 switch (sc->sc_type) {
1897 case WM_T_82573:
1898 /* XXX limited to 9234 if ASPM is disabled */
1899 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1900 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1901 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1902 break;
1903 case WM_T_82571:
1904 case WM_T_82572:
1905 case WM_T_82574:
1906 case WM_T_82575:
1907 case WM_T_82576:
1908 case WM_T_82580:
1909 case WM_T_82580ER:
1910 case WM_T_I350:
1911 case WM_T_80003:
1912 case WM_T_ICH9:
1913 case WM_T_ICH10:
1914 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1915 /* XXX limited to 9234 */
1916 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1917 break;
1918 case WM_T_PCH:
1919 /* XXX limited to 4096 */
1920 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1921 break;
1922 case WM_T_82542_2_0:
1923 case WM_T_82542_2_1:
1924 case WM_T_82583:
1925 case WM_T_ICH8:
1926 /* No support for jumbo frame */
1927 break;
1928 default:
1929 /* ETHER_MAX_LEN_JUMBO */
1930 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1931 break;
1932 }
1933
1934 /*
1935 * If we're a i82543 or greater, we can support VLANs.
1936 */
1937 if (sc->sc_type == WM_T_82575 || sc->sc_type == WM_T_82576)
1938 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
1939 else if (sc->sc_type >= WM_T_82543)
1940 sc->sc_ethercom.ec_capabilities |=
1941 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1942
1943 /*
1944 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1945 * on i82543 and later.
1946 */
1947 if (sc->sc_type >= WM_T_82543) {
1948 ifp->if_capabilities |=
1949 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1950 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1951 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1952 IFCAP_CSUM_TCPv6_Tx |
1953 IFCAP_CSUM_UDPv6_Tx;
1954 }
1955
1956 /*
1957 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1958 *
1959 * 82541GI (8086:1076) ... no
1960 * 82572EI (8086:10b9) ... yes
1961 */
1962 if (sc->sc_type >= WM_T_82571) {
1963 ifp->if_capabilities |=
1964 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1965 }
1966
1967 /*
1968 * If we're a i82544 or greater (except i82547), we can do
1969 * TCP segmentation offload.
1970 */
1971 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1972 ifp->if_capabilities |= IFCAP_TSOv4;
1973 }
1974
1975 if (sc->sc_type >= WM_T_82571) {
1976 ifp->if_capabilities |= IFCAP_TSOv6;
1977 }
1978
1979 /*
1980 * Attach the interface.
1981 */
1982 if_attach(ifp);
1983 ether_ifattach(ifp, enaddr);
1984 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1985 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1986
1987 #ifdef WM_EVENT_COUNTERS
1988 /* Attach event counters. */
1989 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1990 NULL, xname, "txsstall");
1991 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1992 NULL, xname, "txdstall");
1993 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1994 NULL, xname, "txfifo_stall");
1995 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1996 NULL, xname, "txdw");
1997 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1998 NULL, xname, "txqe");
1999 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2000 NULL, xname, "rxintr");
2001 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2002 NULL, xname, "linkintr");
2003
2004 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2005 NULL, xname, "rxipsum");
2006 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2007 NULL, xname, "rxtusum");
2008 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2009 NULL, xname, "txipsum");
2010 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2011 NULL, xname, "txtusum");
2012 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2013 NULL, xname, "txtusum6");
2014
2015 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2016 NULL, xname, "txtso");
2017 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2018 NULL, xname, "txtso6");
2019 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2020 NULL, xname, "txtsopain");
2021
2022 for (i = 0; i < WM_NTXSEGS; i++) {
2023 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2024 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2025 NULL, xname, wm_txseg_evcnt_names[i]);
2026 }
2027
2028 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2029 NULL, xname, "txdrop");
2030
2031 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2032 NULL, xname, "tu");
2033
2034 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2035 NULL, xname, "tx_xoff");
2036 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2037 NULL, xname, "tx_xon");
2038 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2039 NULL, xname, "rx_xoff");
2040 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2041 NULL, xname, "rx_xon");
2042 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2043 NULL, xname, "rx_macctl");
2044 #endif /* WM_EVENT_COUNTERS */
2045
2046 if (pmf_device_register(self, wm_suspend, wm_resume))
2047 pmf_class_network_register(self, ifp);
2048 else
2049 aprint_error_dev(self, "couldn't establish power handler\n");
2050
2051 return;
2052
2053 /*
2054 * Free any resources we've allocated during the failed attach
2055 * attempt. Do this in reverse order and fall through.
2056 */
2057 fail_5:
2058 for (i = 0; i < WM_NRXDESC; i++) {
2059 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2060 bus_dmamap_destroy(sc->sc_dmat,
2061 sc->sc_rxsoft[i].rxs_dmamap);
2062 }
2063 fail_4:
2064 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2065 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2066 bus_dmamap_destroy(sc->sc_dmat,
2067 sc->sc_txsoft[i].txs_dmamap);
2068 }
2069 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2070 fail_3:
2071 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2072 fail_2:
2073 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2074 sc->sc_cd_size);
2075 fail_1:
2076 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2077 fail_0:
2078 return;
2079 }
2080
2081 static int
2082 wm_detach(device_t self, int flags __unused)
2083 {
2084 struct wm_softc *sc = device_private(self);
2085 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2086 int i, s;
2087
2088 s = splnet();
2089 /* Stop the interface. Callouts are stopped in it. */
2090 wm_stop(ifp, 1);
2091 splx(s);
2092
2093 pmf_device_deregister(self);
2094
2095 /* Tell the firmware about the release */
2096 wm_release_manageability(sc);
2097 wm_release_hw_control(sc);
2098
2099 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2100
2101 /* Delete all remaining media. */
2102 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2103
2104 ether_ifdetach(ifp);
2105 if_detach(ifp);
2106
2107
2108 /* Unload RX dmamaps and free mbufs */
2109 wm_rxdrain(sc);
2110
2111 /* Free dmamap. It's the same as the end of the wm_attach() function */
2112 for (i = 0; i < WM_NRXDESC; i++) {
2113 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2114 bus_dmamap_destroy(sc->sc_dmat,
2115 sc->sc_rxsoft[i].rxs_dmamap);
2116 }
2117 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2118 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2119 bus_dmamap_destroy(sc->sc_dmat,
2120 sc->sc_txsoft[i].txs_dmamap);
2121 }
2122 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2123 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2124 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2125 sc->sc_cd_size);
2126 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2127
2128 /* Disestablish the interrupt handler */
2129 if (sc->sc_ih != NULL) {
2130 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2131 sc->sc_ih = NULL;
2132 }
2133
2134 /* Unmap the registers */
2135 if (sc->sc_ss) {
2136 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2137 sc->sc_ss = 0;
2138 }
2139
2140 if (sc->sc_ios) {
2141 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2142 sc->sc_ios = 0;
2143 }
2144
2145 return 0;
2146 }
2147
2148 /*
2149 * wm_tx_offload:
2150 *
2151 * Set up TCP/IP checksumming parameters for the
2152 * specified packet.
2153 */
2154 static int
2155 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2156 uint8_t *fieldsp)
2157 {
2158 struct mbuf *m0 = txs->txs_mbuf;
2159 struct livengood_tcpip_ctxdesc *t;
2160 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2161 uint32_t ipcse;
2162 struct ether_header *eh;
2163 int offset, iphl;
2164 uint8_t fields;
2165
2166 /*
2167 * XXX It would be nice if the mbuf pkthdr had offset
2168 * fields for the protocol headers.
2169 */
2170
2171 eh = mtod(m0, struct ether_header *);
2172 switch (htons(eh->ether_type)) {
2173 case ETHERTYPE_IP:
2174 case ETHERTYPE_IPV6:
2175 offset = ETHER_HDR_LEN;
2176 break;
2177
2178 case ETHERTYPE_VLAN:
2179 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2180 break;
2181
2182 default:
2183 /*
2184 * Don't support this protocol or encapsulation.
2185 */
2186 *fieldsp = 0;
2187 *cmdp = 0;
2188 return 0;
2189 }
2190
2191 if ((m0->m_pkthdr.csum_flags &
2192 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2193 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2194 } else {
2195 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2196 }
2197 ipcse = offset + iphl - 1;
2198
2199 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2200 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2201 seg = 0;
2202 fields = 0;
2203
2204 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2205 int hlen = offset + iphl;
2206 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2207
2208 if (__predict_false(m0->m_len <
2209 (hlen + sizeof(struct tcphdr)))) {
2210 /*
2211 * TCP/IP headers are not in the first mbuf; we need
2212 * to do this the slow and painful way. Let's just
2213 * hope this doesn't happen very often.
2214 */
2215 struct tcphdr th;
2216
2217 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2218
2219 m_copydata(m0, hlen, sizeof(th), &th);
2220 if (v4) {
2221 struct ip ip;
2222
2223 m_copydata(m0, offset, sizeof(ip), &ip);
2224 ip.ip_len = 0;
2225 m_copyback(m0,
2226 offset + offsetof(struct ip, ip_len),
2227 sizeof(ip.ip_len), &ip.ip_len);
2228 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2229 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2230 } else {
2231 struct ip6_hdr ip6;
2232
2233 m_copydata(m0, offset, sizeof(ip6), &ip6);
2234 ip6.ip6_plen = 0;
2235 m_copyback(m0,
2236 offset + offsetof(struct ip6_hdr, ip6_plen),
2237 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2238 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2239 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2240 }
2241 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2242 sizeof(th.th_sum), &th.th_sum);
2243
2244 hlen += th.th_off << 2;
2245 } else {
2246 /*
2247 * TCP/IP headers are in the first mbuf; we can do
2248 * this the easy way.
2249 */
2250 struct tcphdr *th;
2251
2252 if (v4) {
2253 struct ip *ip =
2254 (void *)(mtod(m0, char *) + offset);
2255 th = (void *)(mtod(m0, char *) + hlen);
2256
2257 ip->ip_len = 0;
2258 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2259 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2260 } else {
2261 struct ip6_hdr *ip6 =
2262 (void *)(mtod(m0, char *) + offset);
2263 th = (void *)(mtod(m0, char *) + hlen);
2264
2265 ip6->ip6_plen = 0;
2266 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2267 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2268 }
2269 hlen += th->th_off << 2;
2270 }
2271
2272 if (v4) {
2273 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2274 cmdlen |= WTX_TCPIP_CMD_IP;
2275 } else {
2276 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2277 ipcse = 0;
2278 }
2279 cmd |= WTX_TCPIP_CMD_TSE;
2280 cmdlen |= WTX_TCPIP_CMD_TSE |
2281 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2282 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2283 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2284 }
2285
2286 /*
2287 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2288 * offload feature, if we load the context descriptor, we
2289 * MUST provide valid values for IPCSS and TUCSS fields.
2290 */
2291
2292 ipcs = WTX_TCPIP_IPCSS(offset) |
2293 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2294 WTX_TCPIP_IPCSE(ipcse);
2295 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2296 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2297 fields |= WTX_IXSM;
2298 }
2299
2300 offset += iphl;
2301
2302 if (m0->m_pkthdr.csum_flags &
2303 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2304 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2305 fields |= WTX_TXSM;
2306 tucs = WTX_TCPIP_TUCSS(offset) |
2307 WTX_TCPIP_TUCSO(offset +
2308 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2309 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2310 } else if ((m0->m_pkthdr.csum_flags &
2311 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2312 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2313 fields |= WTX_TXSM;
2314 tucs = WTX_TCPIP_TUCSS(offset) |
2315 WTX_TCPIP_TUCSO(offset +
2316 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2317 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2318 } else {
2319 /* Just initialize it to a valid TCP context. */
2320 tucs = WTX_TCPIP_TUCSS(offset) |
2321 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2322 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2323 }
2324
2325 /* Fill in the context descriptor. */
2326 t = (struct livengood_tcpip_ctxdesc *)
2327 &sc->sc_txdescs[sc->sc_txnext];
2328 t->tcpip_ipcs = htole32(ipcs);
2329 t->tcpip_tucs = htole32(tucs);
2330 t->tcpip_cmdlen = htole32(cmdlen);
2331 t->tcpip_seg = htole32(seg);
2332 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2333
2334 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2335 txs->txs_ndesc++;
2336
2337 *cmdp = cmd;
2338 *fieldsp = fields;
2339
2340 return 0;
2341 }
2342
2343 static void
2344 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2345 {
2346 struct mbuf *m;
2347 int i;
2348
2349 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2350 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2351 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2352 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2353 m->m_data, m->m_len, m->m_flags);
2354 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2355 i, i == 1 ? "" : "s");
2356 }
2357
2358 /*
2359 * wm_82547_txfifo_stall:
2360 *
2361 * Callout used to wait for the 82547 Tx FIFO to drain,
2362 * reset the FIFO pointers, and restart packet transmission.
2363 */
2364 static void
2365 wm_82547_txfifo_stall(void *arg)
2366 {
2367 struct wm_softc *sc = arg;
2368 int s;
2369
2370 s = splnet();
2371
2372 if (sc->sc_txfifo_stall) {
2373 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2374 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2375 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2376 /*
2377 * Packets have drained. Stop transmitter, reset
2378 * FIFO pointers, restart transmitter, and kick
2379 * the packet queue.
2380 */
2381 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2382 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2383 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2384 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2385 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2386 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2387 CSR_WRITE(sc, WMREG_TCTL, tctl);
2388 CSR_WRITE_FLUSH(sc);
2389
2390 sc->sc_txfifo_head = 0;
2391 sc->sc_txfifo_stall = 0;
2392 wm_start(&sc->sc_ethercom.ec_if);
2393 } else {
2394 /*
2395 * Still waiting for packets to drain; try again in
2396 * another tick.
2397 */
2398 callout_schedule(&sc->sc_txfifo_ch, 1);
2399 }
2400 }
2401
2402 splx(s);
2403 }
2404
2405 static void
2406 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2407 {
2408 uint32_t reg;
2409
2410 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2411
2412 if (on != 0)
2413 reg |= EXTCNFCTR_GATE_PHY_CFG;
2414 else
2415 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2416
2417 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2418 }
2419
2420 /*
2421 * wm_82547_txfifo_bugchk:
2422 *
2423 * Check for bug condition in the 82547 Tx FIFO. We need to
2424 * prevent enqueueing a packet that would wrap around the end
2425 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2426 *
2427 * We do this by checking the amount of space before the end
2428 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2429 * the Tx FIFO, wait for all remaining packets to drain, reset
2430 * the internal FIFO pointers to the beginning, and restart
2431 * transmission on the interface.
2432 */
2433 #define WM_FIFO_HDR 0x10
2434 #define WM_82547_PAD_LEN 0x3e0
2435 static int
2436 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2437 {
2438 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2439 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2440
2441 /* Just return if already stalled. */
2442 if (sc->sc_txfifo_stall)
2443 return 1;
2444
2445 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2446 /* Stall only occurs in half-duplex mode. */
2447 goto send_packet;
2448 }
2449
2450 if (len >= WM_82547_PAD_LEN + space) {
2451 sc->sc_txfifo_stall = 1;
2452 callout_schedule(&sc->sc_txfifo_ch, 1);
2453 return 1;
2454 }
2455
2456 send_packet:
2457 sc->sc_txfifo_head += len;
2458 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2459 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2460
2461 return 0;
2462 }
2463
2464 /*
2465 * wm_start: [ifnet interface function]
2466 *
2467 * Start packet transmission on the interface.
2468 */
2469 static void
2470 wm_start(struct ifnet *ifp)
2471 {
2472 struct wm_softc *sc = ifp->if_softc;
2473 struct mbuf *m0;
2474 struct m_tag *mtag;
2475 struct wm_txsoft *txs;
2476 bus_dmamap_t dmamap;
2477 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2478 bus_addr_t curaddr;
2479 bus_size_t seglen, curlen;
2480 uint32_t cksumcmd;
2481 uint8_t cksumfields;
2482
2483 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2484 return;
2485
2486 /*
2487 * Remember the previous number of free descriptors.
2488 */
2489 ofree = sc->sc_txfree;
2490
2491 /*
2492 * Loop through the send queue, setting up transmit descriptors
2493 * until we drain the queue, or use up all available transmit
2494 * descriptors.
2495 */
2496 for (;;) {
2497 /* Grab a packet off the queue. */
2498 IFQ_POLL(&ifp->if_snd, m0);
2499 if (m0 == NULL)
2500 break;
2501
2502 DPRINTF(WM_DEBUG_TX,
2503 ("%s: TX: have packet to transmit: %p\n",
2504 device_xname(sc->sc_dev), m0));
2505
2506 /* Get a work queue entry. */
2507 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2508 wm_txintr(sc);
2509 if (sc->sc_txsfree == 0) {
2510 DPRINTF(WM_DEBUG_TX,
2511 ("%s: TX: no free job descriptors\n",
2512 device_xname(sc->sc_dev)));
2513 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2514 break;
2515 }
2516 }
2517
2518 txs = &sc->sc_txsoft[sc->sc_txsnext];
2519 dmamap = txs->txs_dmamap;
2520
2521 use_tso = (m0->m_pkthdr.csum_flags &
2522 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2523
2524 /*
2525 * So says the Linux driver:
2526 * The controller does a simple calculation to make sure
2527 * there is enough room in the FIFO before initiating the
2528 * DMA for each buffer. The calc is:
2529 * 4 = ceil(buffer len / MSS)
2530 * To make sure we don't overrun the FIFO, adjust the max
2531 * buffer len if the MSS drops.
2532 */
2533 dmamap->dm_maxsegsz =
2534 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2535 ? m0->m_pkthdr.segsz << 2
2536 : WTX_MAX_LEN;
2537
2538 /*
2539 * Load the DMA map. If this fails, the packet either
2540 * didn't fit in the allotted number of segments, or we
2541 * were short on resources. For the too-many-segments
2542 * case, we simply report an error and drop the packet,
2543 * since we can't sanely copy a jumbo packet to a single
2544 * buffer.
2545 */
2546 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2547 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2548 if (error) {
2549 if (error == EFBIG) {
2550 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2551 log(LOG_ERR, "%s: Tx packet consumes too many "
2552 "DMA segments, dropping...\n",
2553 device_xname(sc->sc_dev));
2554 IFQ_DEQUEUE(&ifp->if_snd, m0);
2555 wm_dump_mbuf_chain(sc, m0);
2556 m_freem(m0);
2557 continue;
2558 }
2559 /*
2560 * Short on resources, just stop for now.
2561 */
2562 DPRINTF(WM_DEBUG_TX,
2563 ("%s: TX: dmamap load failed: %d\n",
2564 device_xname(sc->sc_dev), error));
2565 break;
2566 }
2567
2568 segs_needed = dmamap->dm_nsegs;
2569 if (use_tso) {
2570 /* For sentinel descriptor; see below. */
2571 segs_needed++;
2572 }
2573
2574 /*
2575 * Ensure we have enough descriptors free to describe
2576 * the packet. Note, we always reserve one descriptor
2577 * at the end of the ring due to the semantics of the
2578 * TDT register, plus one more in the event we need
2579 * to load offload context.
2580 */
2581 if (segs_needed > sc->sc_txfree - 2) {
2582 /*
2583 * Not enough free descriptors to transmit this
2584 * packet. We haven't committed anything yet,
2585 * so just unload the DMA map, put the packet
2586 * pack on the queue, and punt. Notify the upper
2587 * layer that there are no more slots left.
2588 */
2589 DPRINTF(WM_DEBUG_TX,
2590 ("%s: TX: need %d (%d) descriptors, have %d\n",
2591 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2592 segs_needed, sc->sc_txfree - 1));
2593 ifp->if_flags |= IFF_OACTIVE;
2594 bus_dmamap_unload(sc->sc_dmat, dmamap);
2595 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2596 break;
2597 }
2598
2599 /*
2600 * Check for 82547 Tx FIFO bug. We need to do this
2601 * once we know we can transmit the packet, since we
2602 * do some internal FIFO space accounting here.
2603 */
2604 if (sc->sc_type == WM_T_82547 &&
2605 wm_82547_txfifo_bugchk(sc, m0)) {
2606 DPRINTF(WM_DEBUG_TX,
2607 ("%s: TX: 82547 Tx FIFO bug detected\n",
2608 device_xname(sc->sc_dev)));
2609 ifp->if_flags |= IFF_OACTIVE;
2610 bus_dmamap_unload(sc->sc_dmat, dmamap);
2611 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2612 break;
2613 }
2614
2615 IFQ_DEQUEUE(&ifp->if_snd, m0);
2616
2617 /*
2618 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2619 */
2620
2621 DPRINTF(WM_DEBUG_TX,
2622 ("%s: TX: packet has %d (%d) DMA segments\n",
2623 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2624
2625 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2626
2627 /*
2628 * Store a pointer to the packet so that we can free it
2629 * later.
2630 *
2631 * Initially, we consider the number of descriptors the
2632 * packet uses the number of DMA segments. This may be
2633 * incremented by 1 if we do checksum offload (a descriptor
2634 * is used to set the checksum context).
2635 */
2636 txs->txs_mbuf = m0;
2637 txs->txs_firstdesc = sc->sc_txnext;
2638 txs->txs_ndesc = segs_needed;
2639
2640 /* Set up offload parameters for this packet. */
2641 if (m0->m_pkthdr.csum_flags &
2642 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2643 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2644 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2645 if (wm_tx_offload(sc, txs, &cksumcmd,
2646 &cksumfields) != 0) {
2647 /* Error message already displayed. */
2648 bus_dmamap_unload(sc->sc_dmat, dmamap);
2649 continue;
2650 }
2651 } else {
2652 cksumcmd = 0;
2653 cksumfields = 0;
2654 }
2655
2656 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2657
2658 /* Sync the DMA map. */
2659 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2660 BUS_DMASYNC_PREWRITE);
2661
2662 /*
2663 * Initialize the transmit descriptor.
2664 */
2665 for (nexttx = sc->sc_txnext, seg = 0;
2666 seg < dmamap->dm_nsegs; seg++) {
2667 for (seglen = dmamap->dm_segs[seg].ds_len,
2668 curaddr = dmamap->dm_segs[seg].ds_addr;
2669 seglen != 0;
2670 curaddr += curlen, seglen -= curlen,
2671 nexttx = WM_NEXTTX(sc, nexttx)) {
2672 curlen = seglen;
2673
2674 /*
2675 * So says the Linux driver:
2676 * Work around for premature descriptor
2677 * write-backs in TSO mode. Append a
2678 * 4-byte sentinel descriptor.
2679 */
2680 if (use_tso &&
2681 seg == dmamap->dm_nsegs - 1 &&
2682 curlen > 8)
2683 curlen -= 4;
2684
2685 wm_set_dma_addr(
2686 &sc->sc_txdescs[nexttx].wtx_addr,
2687 curaddr);
2688 sc->sc_txdescs[nexttx].wtx_cmdlen =
2689 htole32(cksumcmd | curlen);
2690 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2691 0;
2692 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2693 cksumfields;
2694 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2695 lasttx = nexttx;
2696
2697 DPRINTF(WM_DEBUG_TX,
2698 ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2699 "len %#04zx\n",
2700 device_xname(sc->sc_dev), nexttx,
2701 curaddr & 0xffffffffUL, curlen));
2702 }
2703 }
2704
2705 KASSERT(lasttx != -1);
2706
2707 /*
2708 * Set up the command byte on the last descriptor of
2709 * the packet. If we're in the interrupt delay window,
2710 * delay the interrupt.
2711 */
2712 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2713 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2714
2715 /*
2716 * If VLANs are enabled and the packet has a VLAN tag, set
2717 * up the descriptor to encapsulate the packet for us.
2718 *
2719 * This is only valid on the last descriptor of the packet.
2720 */
2721 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2722 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2723 htole32(WTX_CMD_VLE);
2724 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2725 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2726 }
2727
2728 txs->txs_lastdesc = lasttx;
2729
2730 DPRINTF(WM_DEBUG_TX,
2731 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2732 device_xname(sc->sc_dev),
2733 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2734
2735 /* Sync the descriptors we're using. */
2736 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2737 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2738
2739 /* Give the packet to the chip. */
2740 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2741
2742 DPRINTF(WM_DEBUG_TX,
2743 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2744
2745 DPRINTF(WM_DEBUG_TX,
2746 ("%s: TX: finished transmitting packet, job %d\n",
2747 device_xname(sc->sc_dev), sc->sc_txsnext));
2748
2749 /* Advance the tx pointer. */
2750 sc->sc_txfree -= txs->txs_ndesc;
2751 sc->sc_txnext = nexttx;
2752
2753 sc->sc_txsfree--;
2754 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2755
2756 /* Pass the packet to any BPF listeners. */
2757 bpf_mtap(ifp, m0);
2758 }
2759
2760 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2761 /* No more slots; notify upper layer. */
2762 ifp->if_flags |= IFF_OACTIVE;
2763 }
2764
2765 if (sc->sc_txfree != ofree) {
2766 /* Set a watchdog timer in case the chip flakes out. */
2767 ifp->if_timer = 5;
2768 }
2769 }
2770
2771 /*
2772 * wm_nq_tx_offload:
2773 *
2774 * Set up TCP/IP checksumming parameters for the
2775 * specified packet, for NEWQUEUE devices
2776 */
2777 static int
2778 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2779 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2780 {
2781 struct mbuf *m0 = txs->txs_mbuf;
2782 struct m_tag *mtag;
2783 uint32_t vl_len, mssidx, cmdc;
2784 struct ether_header *eh;
2785 int offset, iphl;
2786
2787 /*
2788 * XXX It would be nice if the mbuf pkthdr had offset
2789 * fields for the protocol headers.
2790 */
2791
2792 eh = mtod(m0, struct ether_header *);
2793 switch (htons(eh->ether_type)) {
2794 case ETHERTYPE_IP:
2795 case ETHERTYPE_IPV6:
2796 offset = ETHER_HDR_LEN;
2797 break;
2798
2799 case ETHERTYPE_VLAN:
2800 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2801 break;
2802
2803 default:
2804 /*
2805 * Don't support this protocol or encapsulation.
2806 */
2807 *do_csum = false;
2808 return 0;
2809 }
2810 *do_csum = true;
2811 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2812 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2813
2814 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2815 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2816
2817 if ((m0->m_pkthdr.csum_flags &
2818 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2819 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2820 } else {
2821 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2822 }
2823 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2824 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2825
2826 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2827 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2828 << NQTXC_VLLEN_VLAN_SHIFT);
2829 *cmdlenp |= NQTX_CMD_VLE;
2830 }
2831
2832 *fieldsp = 0;
2833 mssidx = 0;
2834
2835 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2836 int hlen = offset + iphl;
2837 int tcp_hlen;
2838 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2839
2840 if (__predict_false(m0->m_len <
2841 (hlen + sizeof(struct tcphdr)))) {
2842 /*
2843 * TCP/IP headers are not in the first mbuf; we need
2844 * to do this the slow and painful way. Let's just
2845 * hope this doesn't happen very often.
2846 */
2847 struct tcphdr th;
2848
2849 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2850
2851 m_copydata(m0, hlen, sizeof(th), &th);
2852 if (v4) {
2853 struct ip ip;
2854
2855 m_copydata(m0, offset, sizeof(ip), &ip);
2856 ip.ip_len = 0;
2857 m_copyback(m0,
2858 offset + offsetof(struct ip, ip_len),
2859 sizeof(ip.ip_len), &ip.ip_len);
2860 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2861 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2862 } else {
2863 struct ip6_hdr ip6;
2864
2865 m_copydata(m0, offset, sizeof(ip6), &ip6);
2866 ip6.ip6_plen = 0;
2867 m_copyback(m0,
2868 offset + offsetof(struct ip6_hdr, ip6_plen),
2869 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2870 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2871 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2872 }
2873 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2874 sizeof(th.th_sum), &th.th_sum);
2875
2876 tcp_hlen = th.th_off << 2;
2877 } else {
2878 /*
2879 * TCP/IP headers are in the first mbuf; we can do
2880 * this the easy way.
2881 */
2882 struct tcphdr *th;
2883
2884 if (v4) {
2885 struct ip *ip =
2886 (void *)(mtod(m0, char *) + offset);
2887 th = (void *)(mtod(m0, char *) + hlen);
2888
2889 ip->ip_len = 0;
2890 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2891 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2892 } else {
2893 struct ip6_hdr *ip6 =
2894 (void *)(mtod(m0, char *) + offset);
2895 th = (void *)(mtod(m0, char *) + hlen);
2896
2897 ip6->ip6_plen = 0;
2898 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2899 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2900 }
2901 tcp_hlen = th->th_off << 2;
2902 }
2903 hlen += tcp_hlen;
2904 *cmdlenp |= NQTX_CMD_TSE;
2905
2906 if (v4) {
2907 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2908 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2909 } else {
2910 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2911 *fieldsp |= NQTXD_FIELDS_TUXSM;
2912 }
2913 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2914 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2915 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2916 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2917 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2918 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2919 } else {
2920 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2921 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2922 }
2923
2924 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2925 *fieldsp |= NQTXD_FIELDS_IXSM;
2926 cmdc |= NQTXC_CMD_IP4;
2927 }
2928
2929 if (m0->m_pkthdr.csum_flags &
2930 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2931 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2932 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2933 cmdc |= NQTXC_CMD_TCP;
2934 } else {
2935 cmdc |= NQTXC_CMD_UDP;
2936 }
2937 cmdc |= NQTXC_CMD_IP4;
2938 *fieldsp |= NQTXD_FIELDS_TUXSM;
2939 }
2940 if (m0->m_pkthdr.csum_flags &
2941 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2942 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2943 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2944 cmdc |= NQTXC_CMD_TCP;
2945 } else {
2946 cmdc |= NQTXC_CMD_UDP;
2947 }
2948 cmdc |= NQTXC_CMD_IP6;
2949 *fieldsp |= NQTXD_FIELDS_TUXSM;
2950 }
2951
2952 /* Fill in the context descriptor. */
2953 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
2954 htole32(vl_len);
2955 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
2956 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
2957 htole32(cmdc);
2958 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
2959 htole32(mssidx);
2960 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2961 DPRINTF(WM_DEBUG_TX,
2962 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
2963 sc->sc_txnext, 0, vl_len));
2964 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
2965 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2966 txs->txs_ndesc++;
2967 return 0;
2968 }
2969
2970 /*
2971 * wm_nq_start: [ifnet interface function]
2972 *
2973 * Start packet transmission on the interface for NEWQUEUE devices
2974 */
2975 static void
2976 wm_nq_start(struct ifnet *ifp)
2977 {
2978 struct wm_softc *sc = ifp->if_softc;
2979 struct mbuf *m0;
2980 struct m_tag *mtag;
2981 struct wm_txsoft *txs;
2982 bus_dmamap_t dmamap;
2983 int error, nexttx, lasttx = -1, seg, segs_needed;
2984 bool do_csum, sent;
2985 uint32_t cmdlen, fields, dcmdlen;
2986
2987 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2988 return;
2989
2990 sent = false;
2991
2992 /*
2993 * Loop through the send queue, setting up transmit descriptors
2994 * until we drain the queue, or use up all available transmit
2995 * descriptors.
2996 */
2997 for (;;) {
2998 /* Grab a packet off the queue. */
2999 IFQ_POLL(&ifp->if_snd, m0);
3000 if (m0 == NULL)
3001 break;
3002
3003 DPRINTF(WM_DEBUG_TX,
3004 ("%s: TX: have packet to transmit: %p\n",
3005 device_xname(sc->sc_dev), m0));
3006
3007 /* Get a work queue entry. */
3008 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3009 wm_txintr(sc);
3010 if (sc->sc_txsfree == 0) {
3011 DPRINTF(WM_DEBUG_TX,
3012 ("%s: TX: no free job descriptors\n",
3013 device_xname(sc->sc_dev)));
3014 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3015 break;
3016 }
3017 }
3018
3019 txs = &sc->sc_txsoft[sc->sc_txsnext];
3020 dmamap = txs->txs_dmamap;
3021
3022 /*
3023 * Load the DMA map. If this fails, the packet either
3024 * didn't fit in the allotted number of segments, or we
3025 * were short on resources. For the too-many-segments
3026 * case, we simply report an error and drop the packet,
3027 * since we can't sanely copy a jumbo packet to a single
3028 * buffer.
3029 */
3030 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3031 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3032 if (error) {
3033 if (error == EFBIG) {
3034 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3035 log(LOG_ERR, "%s: Tx packet consumes too many "
3036 "DMA segments, dropping...\n",
3037 device_xname(sc->sc_dev));
3038 IFQ_DEQUEUE(&ifp->if_snd, m0);
3039 wm_dump_mbuf_chain(sc, m0);
3040 m_freem(m0);
3041 continue;
3042 }
3043 /*
3044 * Short on resources, just stop for now.
3045 */
3046 DPRINTF(WM_DEBUG_TX,
3047 ("%s: TX: dmamap load failed: %d\n",
3048 device_xname(sc->sc_dev), error));
3049 break;
3050 }
3051
3052 segs_needed = dmamap->dm_nsegs;
3053
3054 /*
3055 * Ensure we have enough descriptors free to describe
3056 * the packet. Note, we always reserve one descriptor
3057 * at the end of the ring due to the semantics of the
3058 * TDT register, plus one more in the event we need
3059 * to load offload context.
3060 */
3061 if (segs_needed > sc->sc_txfree - 2) {
3062 /*
3063 * Not enough free descriptors to transmit this
3064 * packet. We haven't committed anything yet,
3065 * so just unload the DMA map, put the packet
3066 * pack on the queue, and punt. Notify the upper
3067 * layer that there are no more slots left.
3068 */
3069 DPRINTF(WM_DEBUG_TX,
3070 ("%s: TX: need %d (%d) descriptors, have %d\n",
3071 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3072 segs_needed, sc->sc_txfree - 1));
3073 ifp->if_flags |= IFF_OACTIVE;
3074 bus_dmamap_unload(sc->sc_dmat, dmamap);
3075 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3076 break;
3077 }
3078
3079 IFQ_DEQUEUE(&ifp->if_snd, m0);
3080
3081 /*
3082 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3083 */
3084
3085 DPRINTF(WM_DEBUG_TX,
3086 ("%s: TX: packet has %d (%d) DMA segments\n",
3087 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3088
3089 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3090
3091 /*
3092 * Store a pointer to the packet so that we can free it
3093 * later.
3094 *
3095 * Initially, we consider the number of descriptors the
3096 * packet uses the number of DMA segments. This may be
3097 * incremented by 1 if we do checksum offload (a descriptor
3098 * is used to set the checksum context).
3099 */
3100 txs->txs_mbuf = m0;
3101 txs->txs_firstdesc = sc->sc_txnext;
3102 txs->txs_ndesc = segs_needed;
3103
3104 /* Set up offload parameters for this packet. */
3105 if (m0->m_pkthdr.csum_flags &
3106 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3107 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3108 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3109 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3110 &do_csum) != 0) {
3111 /* Error message already displayed. */
3112 bus_dmamap_unload(sc->sc_dmat, dmamap);
3113 continue;
3114 }
3115 } else {
3116 do_csum = false;
3117 }
3118
3119 /* Sync the DMA map. */
3120 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3121 BUS_DMASYNC_PREWRITE);
3122
3123 /*
3124 * Initialize the first transmit descriptor.
3125 */
3126 nexttx = sc->sc_txnext;
3127 if (!do_csum) {
3128 /* setup a legacy descriptor */
3129 wm_set_dma_addr(
3130 &sc->sc_txdescs[nexttx].wtx_addr,
3131 dmamap->dm_segs[0].ds_addr);
3132 sc->sc_txdescs[nexttx].wtx_cmdlen =
3133 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3134 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3135 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3136 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3137 NULL) {
3138 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3139 htole32(WTX_CMD_VLE);
3140 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3141 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3142 } else {
3143 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3144 }
3145 dcmdlen = 0;
3146 } else {
3147 /* setup an advanced data descriptor */
3148 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3149 htole64(dmamap->dm_segs[0].ds_addr);
3150 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3151 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3152 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3153 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3154 htole32(fields);
3155 DPRINTF(WM_DEBUG_TX,
3156 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3157 device_xname(sc->sc_dev), nexttx,
3158 dmamap->dm_segs[0].ds_addr));
3159 DPRINTF(WM_DEBUG_TX,
3160 ("\t 0x%08x%08x\n", fields,
3161 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3162 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3163 }
3164
3165 lasttx = nexttx;
3166 nexttx = WM_NEXTTX(sc, nexttx);
3167 /*
3168 * fill in the next descriptors. legacy or adcanced format
3169 * is the same here
3170 */
3171 for (seg = 1; seg < dmamap->dm_nsegs;
3172 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3173 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3174 htole64(dmamap->dm_segs[seg].ds_addr);
3175 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3176 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3177 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3178 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3179 lasttx = nexttx;
3180
3181 DPRINTF(WM_DEBUG_TX,
3182 ("%s: TX: desc %d: %#" PRIxPADDR ", "
3183 "len %#04zx\n",
3184 device_xname(sc->sc_dev), nexttx,
3185 dmamap->dm_segs[seg].ds_addr,
3186 dmamap->dm_segs[seg].ds_len));
3187 }
3188
3189 KASSERT(lasttx != -1);
3190
3191 /*
3192 * Set up the command byte on the last descriptor of
3193 * the packet. If we're in the interrupt delay window,
3194 * delay the interrupt.
3195 */
3196 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3197 (NQTX_CMD_EOP | NQTX_CMD_RS));
3198 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3199 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3200
3201 txs->txs_lastdesc = lasttx;
3202
3203 DPRINTF(WM_DEBUG_TX,
3204 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3205 device_xname(sc->sc_dev),
3206 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3207
3208 /* Sync the descriptors we're using. */
3209 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3210 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3211
3212 /* Give the packet to the chip. */
3213 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3214 sent = true;
3215
3216 DPRINTF(WM_DEBUG_TX,
3217 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3218
3219 DPRINTF(WM_DEBUG_TX,
3220 ("%s: TX: finished transmitting packet, job %d\n",
3221 device_xname(sc->sc_dev), sc->sc_txsnext));
3222
3223 /* Advance the tx pointer. */
3224 sc->sc_txfree -= txs->txs_ndesc;
3225 sc->sc_txnext = nexttx;
3226
3227 sc->sc_txsfree--;
3228 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3229
3230 /* Pass the packet to any BPF listeners. */
3231 bpf_mtap(ifp, m0);
3232 }
3233
3234 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3235 /* No more slots; notify upper layer. */
3236 ifp->if_flags |= IFF_OACTIVE;
3237 }
3238
3239 if (sent) {
3240 /* Set a watchdog timer in case the chip flakes out. */
3241 ifp->if_timer = 5;
3242 }
3243 }
3244
3245 /*
3246 * wm_watchdog: [ifnet interface function]
3247 *
3248 * Watchdog timer handler.
3249 */
3250 static void
3251 wm_watchdog(struct ifnet *ifp)
3252 {
3253 struct wm_softc *sc = ifp->if_softc;
3254
3255 /*
3256 * Since we're using delayed interrupts, sweep up
3257 * before we report an error.
3258 */
3259 wm_txintr(sc);
3260
3261 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3262 #ifdef WM_DEBUG
3263 int i, j;
3264 struct wm_txsoft *txs;
3265 #endif
3266 log(LOG_ERR,
3267 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3268 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3269 sc->sc_txnext);
3270 ifp->if_oerrors++;
3271 #ifdef WM_DEBUG
3272 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3273 i = WM_NEXTTXS(sc, i)) {
3274 txs = &sc->sc_txsoft[i];
3275 printf("txs %d tx %d -> %d\n",
3276 i, txs->txs_firstdesc, txs->txs_lastdesc);
3277 for (j = txs->txs_firstdesc; ;
3278 j = WM_NEXTTX(sc, j)) {
3279 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3280 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3281 printf("\t %#08x%08x\n",
3282 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3283 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3284 if (j == txs->txs_lastdesc)
3285 break;
3286 }
3287 }
3288 #endif
3289 /* Reset the interface. */
3290 (void) wm_init(ifp);
3291 }
3292
3293 /* Try to get more packets going. */
3294 ifp->if_start(ifp);
3295 }
3296
3297 static int
3298 wm_ifflags_cb(struct ethercom *ec)
3299 {
3300 struct ifnet *ifp = &ec->ec_if;
3301 struct wm_softc *sc = ifp->if_softc;
3302 int change = ifp->if_flags ^ sc->sc_if_flags;
3303
3304 if (change != 0)
3305 sc->sc_if_flags = ifp->if_flags;
3306
3307 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3308 return ENETRESET;
3309
3310 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3311 wm_set_filter(sc);
3312
3313 wm_set_vlan(sc);
3314
3315 return 0;
3316 }
3317
3318 /*
3319 * wm_ioctl: [ifnet interface function]
3320 *
3321 * Handle control requests from the operator.
3322 */
3323 static int
3324 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3325 {
3326 struct wm_softc *sc = ifp->if_softc;
3327 struct ifreq *ifr = (struct ifreq *) data;
3328 struct ifaddr *ifa = (struct ifaddr *)data;
3329 struct sockaddr_dl *sdl;
3330 int s, error;
3331
3332 s = splnet();
3333
3334 switch (cmd) {
3335 case SIOCSIFMEDIA:
3336 case SIOCGIFMEDIA:
3337 /* Flow control requires full-duplex mode. */
3338 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3339 (ifr->ifr_media & IFM_FDX) == 0)
3340 ifr->ifr_media &= ~IFM_ETH_FMASK;
3341 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3342 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3343 /* We can do both TXPAUSE and RXPAUSE. */
3344 ifr->ifr_media |=
3345 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3346 }
3347 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3348 }
3349 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3350 break;
3351 case SIOCINITIFADDR:
3352 if (ifa->ifa_addr->sa_family == AF_LINK) {
3353 sdl = satosdl(ifp->if_dl->ifa_addr);
3354 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3355 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3356 /* unicast address is first multicast entry */
3357 wm_set_filter(sc);
3358 error = 0;
3359 break;
3360 }
3361 /*FALLTHROUGH*/
3362 default:
3363 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3364 break;
3365
3366 error = 0;
3367
3368 if (cmd == SIOCSIFCAP)
3369 error = (*ifp->if_init)(ifp);
3370 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3371 ;
3372 else if (ifp->if_flags & IFF_RUNNING) {
3373 /*
3374 * Multicast list has changed; set the hardware filter
3375 * accordingly.
3376 */
3377 wm_set_filter(sc);
3378 }
3379 break;
3380 }
3381
3382 /* Try to get more packets going. */
3383 ifp->if_start(ifp);
3384
3385 splx(s);
3386 return error;
3387 }
3388
3389 /*
3390 * wm_intr:
3391 *
3392 * Interrupt service routine.
3393 */
3394 static int
3395 wm_intr(void *arg)
3396 {
3397 struct wm_softc *sc = arg;
3398 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3399 uint32_t icr;
3400 int handled = 0;
3401
3402 while (1 /* CONSTCOND */) {
3403 icr = CSR_READ(sc, WMREG_ICR);
3404 if ((icr & sc->sc_icr) == 0)
3405 break;
3406 rnd_add_uint32(&sc->rnd_source, icr);
3407
3408 handled = 1;
3409
3410 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3411 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3412 DPRINTF(WM_DEBUG_RX,
3413 ("%s: RX: got Rx intr 0x%08x\n",
3414 device_xname(sc->sc_dev),
3415 icr & (ICR_RXDMT0|ICR_RXT0)));
3416 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3417 }
3418 #endif
3419 wm_rxintr(sc);
3420
3421 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3422 if (icr & ICR_TXDW) {
3423 DPRINTF(WM_DEBUG_TX,
3424 ("%s: TX: got TXDW interrupt\n",
3425 device_xname(sc->sc_dev)));
3426 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3427 }
3428 #endif
3429 wm_txintr(sc);
3430
3431 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3432 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3433 wm_linkintr(sc, icr);
3434 }
3435
3436 if (icr & ICR_RXO) {
3437 #if defined(WM_DEBUG)
3438 log(LOG_WARNING, "%s: Receive overrun\n",
3439 device_xname(sc->sc_dev));
3440 #endif /* defined(WM_DEBUG) */
3441 }
3442 }
3443
3444 if (handled) {
3445 /* Try to get more packets going. */
3446 ifp->if_start(ifp);
3447 }
3448
3449 return handled;
3450 }
3451
3452 /*
3453 * wm_txintr:
3454 *
3455 * Helper; handle transmit interrupts.
3456 */
3457 static void
3458 wm_txintr(struct wm_softc *sc)
3459 {
3460 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3461 struct wm_txsoft *txs;
3462 uint8_t status;
3463 int i;
3464
3465 ifp->if_flags &= ~IFF_OACTIVE;
3466
3467 /*
3468 * Go through the Tx list and free mbufs for those
3469 * frames which have been transmitted.
3470 */
3471 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3472 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3473 txs = &sc->sc_txsoft[i];
3474
3475 DPRINTF(WM_DEBUG_TX,
3476 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3477
3478 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3479 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3480
3481 status =
3482 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3483 if ((status & WTX_ST_DD) == 0) {
3484 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3485 BUS_DMASYNC_PREREAD);
3486 break;
3487 }
3488
3489 DPRINTF(WM_DEBUG_TX,
3490 ("%s: TX: job %d done: descs %d..%d\n",
3491 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3492 txs->txs_lastdesc));
3493
3494 /*
3495 * XXX We should probably be using the statistics
3496 * XXX registers, but I don't know if they exist
3497 * XXX on chips before the i82544.
3498 */
3499
3500 #ifdef WM_EVENT_COUNTERS
3501 if (status & WTX_ST_TU)
3502 WM_EVCNT_INCR(&sc->sc_ev_tu);
3503 #endif /* WM_EVENT_COUNTERS */
3504
3505 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3506 ifp->if_oerrors++;
3507 if (status & WTX_ST_LC)
3508 log(LOG_WARNING, "%s: late collision\n",
3509 device_xname(sc->sc_dev));
3510 else if (status & WTX_ST_EC) {
3511 ifp->if_collisions += 16;
3512 log(LOG_WARNING, "%s: excessive collisions\n",
3513 device_xname(sc->sc_dev));
3514 }
3515 } else
3516 ifp->if_opackets++;
3517
3518 sc->sc_txfree += txs->txs_ndesc;
3519 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3520 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3521 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3522 m_freem(txs->txs_mbuf);
3523 txs->txs_mbuf = NULL;
3524 }
3525
3526 /* Update the dirty transmit buffer pointer. */
3527 sc->sc_txsdirty = i;
3528 DPRINTF(WM_DEBUG_TX,
3529 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3530
3531 /*
3532 * If there are no more pending transmissions, cancel the watchdog
3533 * timer.
3534 */
3535 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3536 ifp->if_timer = 0;
3537 }
3538
3539 /*
3540 * wm_rxintr:
3541 *
3542 * Helper; handle receive interrupts.
3543 */
3544 static void
3545 wm_rxintr(struct wm_softc *sc)
3546 {
3547 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3548 struct wm_rxsoft *rxs;
3549 struct mbuf *m;
3550 int i, len;
3551 uint8_t status, errors;
3552 uint16_t vlantag;
3553
3554 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3555 rxs = &sc->sc_rxsoft[i];
3556
3557 DPRINTF(WM_DEBUG_RX,
3558 ("%s: RX: checking descriptor %d\n",
3559 device_xname(sc->sc_dev), i));
3560
3561 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3562
3563 status = sc->sc_rxdescs[i].wrx_status;
3564 errors = sc->sc_rxdescs[i].wrx_errors;
3565 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3566 vlantag = sc->sc_rxdescs[i].wrx_special;
3567
3568 if ((status & WRX_ST_DD) == 0) {
3569 /*
3570 * We have processed all of the receive descriptors.
3571 */
3572 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3573 break;
3574 }
3575
3576 if (__predict_false(sc->sc_rxdiscard)) {
3577 DPRINTF(WM_DEBUG_RX,
3578 ("%s: RX: discarding contents of descriptor %d\n",
3579 device_xname(sc->sc_dev), i));
3580 WM_INIT_RXDESC(sc, i);
3581 if (status & WRX_ST_EOP) {
3582 /* Reset our state. */
3583 DPRINTF(WM_DEBUG_RX,
3584 ("%s: RX: resetting rxdiscard -> 0\n",
3585 device_xname(sc->sc_dev)));
3586 sc->sc_rxdiscard = 0;
3587 }
3588 continue;
3589 }
3590
3591 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3592 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3593
3594 m = rxs->rxs_mbuf;
3595
3596 /*
3597 * Add a new receive buffer to the ring, unless of
3598 * course the length is zero. Treat the latter as a
3599 * failed mapping.
3600 */
3601 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3602 /*
3603 * Failed, throw away what we've done so
3604 * far, and discard the rest of the packet.
3605 */
3606 ifp->if_ierrors++;
3607 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3608 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3609 WM_INIT_RXDESC(sc, i);
3610 if ((status & WRX_ST_EOP) == 0)
3611 sc->sc_rxdiscard = 1;
3612 if (sc->sc_rxhead != NULL)
3613 m_freem(sc->sc_rxhead);
3614 WM_RXCHAIN_RESET(sc);
3615 DPRINTF(WM_DEBUG_RX,
3616 ("%s: RX: Rx buffer allocation failed, "
3617 "dropping packet%s\n", device_xname(sc->sc_dev),
3618 sc->sc_rxdiscard ? " (discard)" : ""));
3619 continue;
3620 }
3621
3622 m->m_len = len;
3623 sc->sc_rxlen += len;
3624 DPRINTF(WM_DEBUG_RX,
3625 ("%s: RX: buffer at %p len %d\n",
3626 device_xname(sc->sc_dev), m->m_data, len));
3627
3628 /*
3629 * If this is not the end of the packet, keep
3630 * looking.
3631 */
3632 if ((status & WRX_ST_EOP) == 0) {
3633 WM_RXCHAIN_LINK(sc, m);
3634 DPRINTF(WM_DEBUG_RX,
3635 ("%s: RX: not yet EOP, rxlen -> %d\n",
3636 device_xname(sc->sc_dev), sc->sc_rxlen));
3637 continue;
3638 }
3639
3640 /*
3641 * Okay, we have the entire packet now. The chip is
3642 * configured to include the FCS except I350
3643 * (not all chips can be configured to strip it),
3644 * so we need to trim it.
3645 * May need to adjust length of previous mbuf in the
3646 * chain if the current mbuf is too short.
3647 * For an eratta, the RCTL_SECRC bit in RCTL register
3648 * is always set in I350, so we don't trim it.
3649 */
3650 if (sc->sc_type != WM_T_I350) {
3651 if (m->m_len < ETHER_CRC_LEN) {
3652 sc->sc_rxtail->m_len
3653 -= (ETHER_CRC_LEN - m->m_len);
3654 m->m_len = 0;
3655 } else
3656 m->m_len -= ETHER_CRC_LEN;
3657 len = sc->sc_rxlen - ETHER_CRC_LEN;
3658 } else
3659 len = sc->sc_rxlen;
3660
3661 WM_RXCHAIN_LINK(sc, m);
3662
3663 *sc->sc_rxtailp = NULL;
3664 m = sc->sc_rxhead;
3665
3666 WM_RXCHAIN_RESET(sc);
3667
3668 DPRINTF(WM_DEBUG_RX,
3669 ("%s: RX: have entire packet, len -> %d\n",
3670 device_xname(sc->sc_dev), len));
3671
3672 /*
3673 * If an error occurred, update stats and drop the packet.
3674 */
3675 if (errors &
3676 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3677 if (errors & WRX_ER_SE)
3678 log(LOG_WARNING, "%s: symbol error\n",
3679 device_xname(sc->sc_dev));
3680 else if (errors & WRX_ER_SEQ)
3681 log(LOG_WARNING, "%s: receive sequence error\n",
3682 device_xname(sc->sc_dev));
3683 else if (errors & WRX_ER_CE)
3684 log(LOG_WARNING, "%s: CRC error\n",
3685 device_xname(sc->sc_dev));
3686 m_freem(m);
3687 continue;
3688 }
3689
3690 /*
3691 * No errors. Receive the packet.
3692 */
3693 m->m_pkthdr.rcvif = ifp;
3694 m->m_pkthdr.len = len;
3695
3696 /*
3697 * If VLANs are enabled, VLAN packets have been unwrapped
3698 * for us. Associate the tag with the packet.
3699 */
3700 if ((status & WRX_ST_VP) != 0) {
3701 VLAN_INPUT_TAG(ifp, m,
3702 le16toh(vlantag),
3703 continue);
3704 }
3705
3706 /*
3707 * Set up checksum info for this packet.
3708 */
3709 if ((status & WRX_ST_IXSM) == 0) {
3710 if (status & WRX_ST_IPCS) {
3711 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3712 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3713 if (errors & WRX_ER_IPE)
3714 m->m_pkthdr.csum_flags |=
3715 M_CSUM_IPv4_BAD;
3716 }
3717 if (status & WRX_ST_TCPCS) {
3718 /*
3719 * Note: we don't know if this was TCP or UDP,
3720 * so we just set both bits, and expect the
3721 * upper layers to deal.
3722 */
3723 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3724 m->m_pkthdr.csum_flags |=
3725 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3726 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3727 if (errors & WRX_ER_TCPE)
3728 m->m_pkthdr.csum_flags |=
3729 M_CSUM_TCP_UDP_BAD;
3730 }
3731 }
3732
3733 ifp->if_ipackets++;
3734
3735 /* Pass this up to any BPF listeners. */
3736 bpf_mtap(ifp, m);
3737
3738 /* Pass it on. */
3739 (*ifp->if_input)(ifp, m);
3740 }
3741
3742 /* Update the receive pointer. */
3743 sc->sc_rxptr = i;
3744
3745 DPRINTF(WM_DEBUG_RX,
3746 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3747 }
3748
3749 /*
3750 * wm_linkintr_gmii:
3751 *
3752 * Helper; handle link interrupts for GMII.
3753 */
3754 static void
3755 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3756 {
3757
3758 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3759 __func__));
3760
3761 if (icr & ICR_LSC) {
3762 DPRINTF(WM_DEBUG_LINK,
3763 ("%s: LINK: LSC -> mii_tick\n",
3764 device_xname(sc->sc_dev)));
3765 mii_tick(&sc->sc_mii);
3766 if (sc->sc_type == WM_T_82543) {
3767 int miistatus, active;
3768
3769 /*
3770 * With 82543, we need to force speed and
3771 * duplex on the MAC equal to what the PHY
3772 * speed and duplex configuration is.
3773 */
3774 miistatus = sc->sc_mii.mii_media_status;
3775
3776 if (miistatus & IFM_ACTIVE) {
3777 active = sc->sc_mii.mii_media_active;
3778 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3779 switch (IFM_SUBTYPE(active)) {
3780 case IFM_10_T:
3781 sc->sc_ctrl |= CTRL_SPEED_10;
3782 break;
3783 case IFM_100_TX:
3784 sc->sc_ctrl |= CTRL_SPEED_100;
3785 break;
3786 case IFM_1000_T:
3787 sc->sc_ctrl |= CTRL_SPEED_1000;
3788 break;
3789 default:
3790 /*
3791 * fiber?
3792 * Shoud not enter here.
3793 */
3794 printf("unknown media (%x)\n",
3795 active);
3796 break;
3797 }
3798 if (active & IFM_FDX)
3799 sc->sc_ctrl |= CTRL_FD;
3800 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3801 }
3802 } else if ((sc->sc_type == WM_T_ICH8)
3803 && (sc->sc_phytype == WMPHY_IGP_3)) {
3804 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3805 } else if (sc->sc_type == WM_T_PCH) {
3806 wm_k1_gig_workaround_hv(sc,
3807 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3808 }
3809
3810 if ((sc->sc_phytype == WMPHY_82578)
3811 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3812 == IFM_1000_T)) {
3813
3814 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3815 delay(200*1000); /* XXX too big */
3816
3817 /* Link stall fix for link up */
3818 wm_gmii_hv_writereg(sc->sc_dev, 1,
3819 HV_MUX_DATA_CTRL,
3820 HV_MUX_DATA_CTRL_GEN_TO_MAC
3821 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3822 wm_gmii_hv_writereg(sc->sc_dev, 1,
3823 HV_MUX_DATA_CTRL,
3824 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3825 }
3826 }
3827 } else if (icr & ICR_RXSEQ) {
3828 DPRINTF(WM_DEBUG_LINK,
3829 ("%s: LINK Receive sequence error\n",
3830 device_xname(sc->sc_dev)));
3831 }
3832 }
3833
3834 /*
3835 * wm_linkintr_tbi:
3836 *
3837 * Helper; handle link interrupts for TBI mode.
3838 */
3839 static void
3840 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3841 {
3842 uint32_t status;
3843
3844 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3845 __func__));
3846
3847 status = CSR_READ(sc, WMREG_STATUS);
3848 if (icr & ICR_LSC) {
3849 if (status & STATUS_LU) {
3850 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3851 device_xname(sc->sc_dev),
3852 (status & STATUS_FD) ? "FDX" : "HDX"));
3853 /*
3854 * NOTE: CTRL will update TFCE and RFCE automatically,
3855 * so we should update sc->sc_ctrl
3856 */
3857
3858 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3859 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3860 sc->sc_fcrtl &= ~FCRTL_XONE;
3861 if (status & STATUS_FD)
3862 sc->sc_tctl |=
3863 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3864 else
3865 sc->sc_tctl |=
3866 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3867 if (sc->sc_ctrl & CTRL_TFCE)
3868 sc->sc_fcrtl |= FCRTL_XONE;
3869 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3870 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3871 WMREG_OLD_FCRTL : WMREG_FCRTL,
3872 sc->sc_fcrtl);
3873 sc->sc_tbi_linkup = 1;
3874 } else {
3875 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3876 device_xname(sc->sc_dev)));
3877 sc->sc_tbi_linkup = 0;
3878 }
3879 wm_tbi_set_linkled(sc);
3880 } else if (icr & ICR_RXCFG) {
3881 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3882 device_xname(sc->sc_dev)));
3883 sc->sc_tbi_nrxcfg++;
3884 wm_check_for_link(sc);
3885 } else if (icr & ICR_RXSEQ) {
3886 DPRINTF(WM_DEBUG_LINK,
3887 ("%s: LINK: Receive sequence error\n",
3888 device_xname(sc->sc_dev)));
3889 }
3890 }
3891
3892 /*
3893 * wm_linkintr:
3894 *
3895 * Helper; handle link interrupts.
3896 */
3897 static void
3898 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3899 {
3900
3901 if (sc->sc_flags & WM_F_HAS_MII)
3902 wm_linkintr_gmii(sc, icr);
3903 else
3904 wm_linkintr_tbi(sc, icr);
3905 }
3906
3907 /*
3908 * wm_tick:
3909 *
3910 * One second timer, used to check link status, sweep up
3911 * completed transmit jobs, etc.
3912 */
3913 static void
3914 wm_tick(void *arg)
3915 {
3916 struct wm_softc *sc = arg;
3917 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3918 int s;
3919
3920 s = splnet();
3921
3922 if (sc->sc_type >= WM_T_82542_2_1) {
3923 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3924 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3925 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3926 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3927 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3928 }
3929
3930 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3931 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3932 + CSR_READ(sc, WMREG_CRCERRS)
3933 + CSR_READ(sc, WMREG_ALGNERRC)
3934 + CSR_READ(sc, WMREG_SYMERRC)
3935 + CSR_READ(sc, WMREG_RXERRC)
3936 + CSR_READ(sc, WMREG_SEC)
3937 + CSR_READ(sc, WMREG_CEXTERR)
3938 + CSR_READ(sc, WMREG_RLEC);
3939 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3940
3941 if (sc->sc_flags & WM_F_HAS_MII)
3942 mii_tick(&sc->sc_mii);
3943 else
3944 wm_tbi_check_link(sc);
3945
3946 splx(s);
3947
3948 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3949 }
3950
3951 /*
3952 * wm_reset:
3953 *
3954 * Reset the i82542 chip.
3955 */
3956 static void
3957 wm_reset(struct wm_softc *sc)
3958 {
3959 int phy_reset = 0;
3960 uint32_t reg, mask;
3961 int i;
3962
3963 /*
3964 * Allocate on-chip memory according to the MTU size.
3965 * The Packet Buffer Allocation register must be written
3966 * before the chip is reset.
3967 */
3968 switch (sc->sc_type) {
3969 case WM_T_82547:
3970 case WM_T_82547_2:
3971 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3972 PBA_22K : PBA_30K;
3973 sc->sc_txfifo_head = 0;
3974 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3975 sc->sc_txfifo_size =
3976 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3977 sc->sc_txfifo_stall = 0;
3978 break;
3979 case WM_T_82571:
3980 case WM_T_82572:
3981 case WM_T_82575: /* XXX need special handing for jumbo frames */
3982 case WM_T_I350:
3983 case WM_T_80003:
3984 sc->sc_pba = PBA_32K;
3985 break;
3986 case WM_T_82580:
3987 case WM_T_82580ER:
3988 sc->sc_pba = PBA_35K;
3989 break;
3990 case WM_T_82576:
3991 sc->sc_pba = PBA_64K;
3992 break;
3993 case WM_T_82573:
3994 sc->sc_pba = PBA_12K;
3995 break;
3996 case WM_T_82574:
3997 case WM_T_82583:
3998 sc->sc_pba = PBA_20K;
3999 break;
4000 case WM_T_ICH8:
4001 sc->sc_pba = PBA_8K;
4002 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4003 break;
4004 case WM_T_ICH9:
4005 case WM_T_ICH10:
4006 sc->sc_pba = PBA_10K;
4007 break;
4008 case WM_T_PCH:
4009 case WM_T_PCH2:
4010 sc->sc_pba = PBA_26K;
4011 break;
4012 default:
4013 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4014 PBA_40K : PBA_48K;
4015 break;
4016 }
4017 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4018
4019 /* Prevent the PCI-E bus from sticking */
4020 if (sc->sc_flags & WM_F_PCIE) {
4021 int timeout = 800;
4022
4023 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4024 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4025
4026 while (timeout--) {
4027 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
4028 break;
4029 delay(100);
4030 }
4031 }
4032
4033 /* Set the completion timeout for interface */
4034 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4035 || (sc->sc_type == WM_T_I350))
4036 wm_set_pcie_completion_timeout(sc);
4037
4038 /* Clear interrupt */
4039 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4040
4041 /* Stop the transmit and receive processes. */
4042 CSR_WRITE(sc, WMREG_RCTL, 0);
4043 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4044 sc->sc_rctl &= ~RCTL_EN;
4045
4046 /* XXX set_tbi_sbp_82543() */
4047
4048 delay(10*1000);
4049
4050 /* Must acquire the MDIO ownership before MAC reset */
4051 switch (sc->sc_type) {
4052 case WM_T_82573:
4053 case WM_T_82574:
4054 case WM_T_82583:
4055 i = 0;
4056 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4057 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4058 do {
4059 CSR_WRITE(sc, WMREG_EXTCNFCTR,
4060 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4061 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4062 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4063 break;
4064 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4065 delay(2*1000);
4066 i++;
4067 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4068 break;
4069 default:
4070 break;
4071 }
4072
4073 /*
4074 * 82541 Errata 29? & 82547 Errata 28?
4075 * See also the description about PHY_RST bit in CTRL register
4076 * in 8254x_GBe_SDM.pdf.
4077 */
4078 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4079 CSR_WRITE(sc, WMREG_CTRL,
4080 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4081 delay(5000);
4082 }
4083
4084 switch (sc->sc_type) {
4085 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4086 case WM_T_82541:
4087 case WM_T_82541_2:
4088 case WM_T_82547:
4089 case WM_T_82547_2:
4090 /*
4091 * On some chipsets, a reset through a memory-mapped write
4092 * cycle can cause the chip to reset before completing the
4093 * write cycle. This causes major headache that can be
4094 * avoided by issuing the reset via indirect register writes
4095 * through I/O space.
4096 *
4097 * So, if we successfully mapped the I/O BAR at attach time,
4098 * use that. Otherwise, try our luck with a memory-mapped
4099 * reset.
4100 */
4101 if (sc->sc_flags & WM_F_IOH_VALID)
4102 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4103 else
4104 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4105 break;
4106 case WM_T_82545_3:
4107 case WM_T_82546_3:
4108 /* Use the shadow control register on these chips. */
4109 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4110 break;
4111 case WM_T_80003:
4112 mask = swfwphysem[sc->sc_funcid];
4113 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4114 wm_get_swfw_semaphore(sc, mask);
4115 CSR_WRITE(sc, WMREG_CTRL, reg);
4116 wm_put_swfw_semaphore(sc, mask);
4117 break;
4118 case WM_T_ICH8:
4119 case WM_T_ICH9:
4120 case WM_T_ICH10:
4121 case WM_T_PCH:
4122 case WM_T_PCH2:
4123 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4124 if (wm_check_reset_block(sc) == 0) {
4125 /*
4126 * Gate automatic PHY configuration by hardware on
4127 * manaed 82579
4128 */
4129 if ((sc->sc_type == WM_T_PCH2)
4130 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4131 != 0))
4132 wm_gate_hw_phy_config_ich8lan(sc, 1);
4133
4134
4135 reg |= CTRL_PHY_RESET;
4136 phy_reset = 1;
4137 }
4138 wm_get_swfwhw_semaphore(sc);
4139 CSR_WRITE(sc, WMREG_CTRL, reg);
4140 delay(20*1000);
4141 wm_put_swfwhw_semaphore(sc);
4142 break;
4143 case WM_T_82542_2_0:
4144 case WM_T_82542_2_1:
4145 case WM_T_82543:
4146 case WM_T_82540:
4147 case WM_T_82545:
4148 case WM_T_82546:
4149 case WM_T_82571:
4150 case WM_T_82572:
4151 case WM_T_82573:
4152 case WM_T_82574:
4153 case WM_T_82575:
4154 case WM_T_82576:
4155 case WM_T_82580:
4156 case WM_T_82580ER:
4157 case WM_T_82583:
4158 case WM_T_I350:
4159 default:
4160 /* Everything else can safely use the documented method. */
4161 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4162 break;
4163 }
4164
4165 if (phy_reset != 0)
4166 wm_get_cfg_done(sc);
4167
4168 /* reload EEPROM */
4169 switch (sc->sc_type) {
4170 case WM_T_82542_2_0:
4171 case WM_T_82542_2_1:
4172 case WM_T_82543:
4173 case WM_T_82544:
4174 delay(10);
4175 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4176 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4177 delay(2000);
4178 break;
4179 case WM_T_82540:
4180 case WM_T_82545:
4181 case WM_T_82545_3:
4182 case WM_T_82546:
4183 case WM_T_82546_3:
4184 delay(5*1000);
4185 /* XXX Disable HW ARPs on ASF enabled adapters */
4186 break;
4187 case WM_T_82541:
4188 case WM_T_82541_2:
4189 case WM_T_82547:
4190 case WM_T_82547_2:
4191 delay(20000);
4192 /* XXX Disable HW ARPs on ASF enabled adapters */
4193 break;
4194 case WM_T_82571:
4195 case WM_T_82572:
4196 case WM_T_82573:
4197 case WM_T_82574:
4198 case WM_T_82583:
4199 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4200 delay(10);
4201 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4202 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4203 }
4204 /* check EECD_EE_AUTORD */
4205 wm_get_auto_rd_done(sc);
4206 /*
4207 * Phy configuration from NVM just starts after EECD_AUTO_RD
4208 * is set.
4209 */
4210 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4211 || (sc->sc_type == WM_T_82583))
4212 delay(25*1000);
4213 break;
4214 case WM_T_82575:
4215 case WM_T_82576:
4216 case WM_T_82580:
4217 case WM_T_82580ER:
4218 case WM_T_I350:
4219 case WM_T_80003:
4220 case WM_T_ICH8:
4221 case WM_T_ICH9:
4222 /* check EECD_EE_AUTORD */
4223 wm_get_auto_rd_done(sc);
4224 break;
4225 case WM_T_ICH10:
4226 case WM_T_PCH:
4227 case WM_T_PCH2:
4228 wm_lan_init_done(sc);
4229 break;
4230 default:
4231 panic("%s: unknown type\n", __func__);
4232 }
4233
4234 /* Check whether EEPROM is present or not */
4235 switch (sc->sc_type) {
4236 case WM_T_82575:
4237 case WM_T_82576:
4238 #if 0 /* XXX */
4239 case WM_T_82580:
4240 case WM_T_82580ER:
4241 #endif
4242 case WM_T_I350:
4243 case WM_T_ICH8:
4244 case WM_T_ICH9:
4245 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4246 /* Not found */
4247 sc->sc_flags |= WM_F_EEPROM_INVALID;
4248 if ((sc->sc_type == WM_T_82575)
4249 || (sc->sc_type == WM_T_82576)
4250 || (sc->sc_type == WM_T_82580)
4251 || (sc->sc_type == WM_T_82580ER)
4252 || (sc->sc_type == WM_T_I350))
4253 wm_reset_init_script_82575(sc);
4254 }
4255 break;
4256 default:
4257 break;
4258 }
4259
4260 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4261 || (sc->sc_type == WM_T_I350)) {
4262 /* clear global device reset status bit */
4263 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4264 }
4265
4266 /* Clear any pending interrupt events. */
4267 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4268 reg = CSR_READ(sc, WMREG_ICR);
4269
4270 /* reload sc_ctrl */
4271 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4272
4273 if (sc->sc_type == WM_T_I350)
4274 wm_set_eee_i350(sc);
4275
4276 /* dummy read from WUC */
4277 if (sc->sc_type == WM_T_PCH)
4278 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4279 /*
4280 * For PCH, this write will make sure that any noise will be detected
4281 * as a CRC error and be dropped rather than show up as a bad packet
4282 * to the DMA engine
4283 */
4284 if (sc->sc_type == WM_T_PCH)
4285 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4286
4287 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4288 CSR_WRITE(sc, WMREG_WUC, 0);
4289
4290 /* XXX need special handling for 82580 */
4291 }
4292
4293 static void
4294 wm_set_vlan(struct wm_softc *sc)
4295 {
4296 /* Deal with VLAN enables. */
4297 if (VLAN_ATTACHED(&sc->sc_ethercom))
4298 sc->sc_ctrl |= CTRL_VME;
4299 else
4300 sc->sc_ctrl &= ~CTRL_VME;
4301
4302 /* Write the control registers. */
4303 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4304 }
4305
4306 /*
4307 * wm_init: [ifnet interface function]
4308 *
4309 * Initialize the interface. Must be called at splnet().
4310 */
4311 static int
4312 wm_init(struct ifnet *ifp)
4313 {
4314 struct wm_softc *sc = ifp->if_softc;
4315 struct wm_rxsoft *rxs;
4316 int i, j, trynum, error = 0;
4317 uint32_t reg;
4318
4319 /*
4320 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4321 * There is a small but measurable benefit to avoiding the adjusment
4322 * of the descriptor so that the headers are aligned, for normal mtu,
4323 * on such platforms. One possibility is that the DMA itself is
4324 * slightly more efficient if the front of the entire packet (instead
4325 * of the front of the headers) is aligned.
4326 *
4327 * Note we must always set align_tweak to 0 if we are using
4328 * jumbo frames.
4329 */
4330 #ifdef __NO_STRICT_ALIGNMENT
4331 sc->sc_align_tweak = 0;
4332 #else
4333 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4334 sc->sc_align_tweak = 0;
4335 else
4336 sc->sc_align_tweak = 2;
4337 #endif /* __NO_STRICT_ALIGNMENT */
4338
4339 /* Cancel any pending I/O. */
4340 wm_stop(ifp, 0);
4341
4342 /* update statistics before reset */
4343 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4344 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4345
4346 /* Reset the chip to a known state. */
4347 wm_reset(sc);
4348
4349 switch (sc->sc_type) {
4350 case WM_T_82571:
4351 case WM_T_82572:
4352 case WM_T_82573:
4353 case WM_T_82574:
4354 case WM_T_82583:
4355 case WM_T_80003:
4356 case WM_T_ICH8:
4357 case WM_T_ICH9:
4358 case WM_T_ICH10:
4359 case WM_T_PCH:
4360 case WM_T_PCH2:
4361 if (wm_check_mng_mode(sc) != 0)
4362 wm_get_hw_control(sc);
4363 break;
4364 default:
4365 break;
4366 }
4367
4368 /* Reset the PHY. */
4369 if (sc->sc_flags & WM_F_HAS_MII)
4370 wm_gmii_reset(sc);
4371
4372 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4373 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4374 if ((sc->sc_type == WM_T_PCH) && (sc->sc_type == WM_T_PCH2))
4375 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4376
4377 /* Initialize the transmit descriptor ring. */
4378 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4379 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4380 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4381 sc->sc_txfree = WM_NTXDESC(sc);
4382 sc->sc_txnext = 0;
4383
4384 if (sc->sc_type < WM_T_82543) {
4385 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4386 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4387 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4388 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4389 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4390 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4391 } else {
4392 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4393 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4394 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4395 CSR_WRITE(sc, WMREG_TDH, 0);
4396 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4397 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4398
4399 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4400 /*
4401 * Don't write TDT before TCTL.EN is set.
4402 * See the document.
4403 */
4404 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4405 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4406 | TXDCTL_WTHRESH(0));
4407 else {
4408 CSR_WRITE(sc, WMREG_TDT, 0);
4409 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4410 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4411 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4412 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4413 }
4414 }
4415 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4416 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4417
4418 /* Initialize the transmit job descriptors. */
4419 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4420 sc->sc_txsoft[i].txs_mbuf = NULL;
4421 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4422 sc->sc_txsnext = 0;
4423 sc->sc_txsdirty = 0;
4424
4425 /*
4426 * Initialize the receive descriptor and receive job
4427 * descriptor rings.
4428 */
4429 if (sc->sc_type < WM_T_82543) {
4430 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4431 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4432 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4433 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4434 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4435 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4436
4437 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4438 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4439 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4440 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4441 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4442 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4443 } else {
4444 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4445 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4446 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4447 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4448 CSR_WRITE(sc, WMREG_EITR(0), 450);
4449 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4450 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4451 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4452 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4453 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4454 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4455 | RXDCTL_WTHRESH(1));
4456 } else {
4457 CSR_WRITE(sc, WMREG_RDH, 0);
4458 CSR_WRITE(sc, WMREG_RDT, 0);
4459 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4460 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4461 }
4462 }
4463 for (i = 0; i < WM_NRXDESC; i++) {
4464 rxs = &sc->sc_rxsoft[i];
4465 if (rxs->rxs_mbuf == NULL) {
4466 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4467 log(LOG_ERR, "%s: unable to allocate or map rx "
4468 "buffer %d, error = %d\n",
4469 device_xname(sc->sc_dev), i, error);
4470 /*
4471 * XXX Should attempt to run with fewer receive
4472 * XXX buffers instead of just failing.
4473 */
4474 wm_rxdrain(sc);
4475 goto out;
4476 }
4477 } else {
4478 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4479 WM_INIT_RXDESC(sc, i);
4480 /*
4481 * For 82575 and newer device, the RX descriptors
4482 * must be initialized after the setting of RCTL.EN in
4483 * wm_set_filter()
4484 */
4485 }
4486 }
4487 sc->sc_rxptr = 0;
4488 sc->sc_rxdiscard = 0;
4489 WM_RXCHAIN_RESET(sc);
4490
4491 /*
4492 * Clear out the VLAN table -- we don't use it (yet).
4493 */
4494 CSR_WRITE(sc, WMREG_VET, 0);
4495 if (sc->sc_type == WM_T_I350)
4496 trynum = 10; /* Due to hw errata */
4497 else
4498 trynum = 1;
4499 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4500 for (j = 0; j < trynum; j++)
4501 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4502
4503 /*
4504 * Set up flow-control parameters.
4505 *
4506 * XXX Values could probably stand some tuning.
4507 */
4508 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4509 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4510 && (sc->sc_type != WM_T_PCH2)) {
4511 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4512 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4513 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4514 }
4515
4516 sc->sc_fcrtl = FCRTL_DFLT;
4517 if (sc->sc_type < WM_T_82543) {
4518 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4519 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4520 } else {
4521 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4522 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4523 }
4524
4525 if (sc->sc_type == WM_T_80003)
4526 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4527 else
4528 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4529
4530 /* Writes the control register. */
4531 wm_set_vlan(sc);
4532
4533 if (sc->sc_flags & WM_F_HAS_MII) {
4534 int val;
4535
4536 switch (sc->sc_type) {
4537 case WM_T_80003:
4538 case WM_T_ICH8:
4539 case WM_T_ICH9:
4540 case WM_T_ICH10:
4541 case WM_T_PCH:
4542 case WM_T_PCH2:
4543 /*
4544 * Set the mac to wait the maximum time between each
4545 * iteration and increase the max iterations when
4546 * polling the phy; this fixes erroneous timeouts at
4547 * 10Mbps.
4548 */
4549 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4550 0xFFFF);
4551 val = wm_kmrn_readreg(sc,
4552 KUMCTRLSTA_OFFSET_INB_PARAM);
4553 val |= 0x3F;
4554 wm_kmrn_writereg(sc,
4555 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4556 break;
4557 default:
4558 break;
4559 }
4560
4561 if (sc->sc_type == WM_T_80003) {
4562 val = CSR_READ(sc, WMREG_CTRL_EXT);
4563 val &= ~CTRL_EXT_LINK_MODE_MASK;
4564 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4565
4566 /* Bypass RX and TX FIFO's */
4567 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4568 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4569 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4570 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4571 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4572 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4573 }
4574 }
4575 #if 0
4576 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4577 #endif
4578
4579 /*
4580 * Set up checksum offload parameters.
4581 */
4582 reg = CSR_READ(sc, WMREG_RXCSUM);
4583 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4584 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4585 reg |= RXCSUM_IPOFL;
4586 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4587 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4588 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4589 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4590 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4591
4592 /* Reset TBI's RXCFG count */
4593 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4594
4595 /*
4596 * Set up the interrupt registers.
4597 */
4598 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4599 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4600 ICR_RXO | ICR_RXT0;
4601 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4602 sc->sc_icr |= ICR_RXCFG;
4603 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4604
4605 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4606 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4607 || (sc->sc_type == WM_T_PCH2)) {
4608 reg = CSR_READ(sc, WMREG_KABGTXD);
4609 reg |= KABGTXD_BGSQLBIAS;
4610 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4611 }
4612
4613 /* Set up the inter-packet gap. */
4614 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4615
4616 if (sc->sc_type >= WM_T_82543) {
4617 /*
4618 * Set up the interrupt throttling register (units of 256ns)
4619 * Note that a footnote in Intel's documentation says this
4620 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4621 * or 10Mbit mode. Empirically, it appears to be the case
4622 * that that is also true for the 1024ns units of the other
4623 * interrupt-related timer registers -- so, really, we ought
4624 * to divide this value by 4 when the link speed is low.
4625 *
4626 * XXX implement this division at link speed change!
4627 */
4628
4629 /*
4630 * For N interrupts/sec, set this value to:
4631 * 1000000000 / (N * 256). Note that we set the
4632 * absolute and packet timer values to this value
4633 * divided by 4 to get "simple timer" behavior.
4634 */
4635
4636 sc->sc_itr = 1500; /* 2604 ints/sec */
4637 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4638 }
4639
4640 /* Set the VLAN ethernetype. */
4641 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4642
4643 /*
4644 * Set up the transmit control register; we start out with
4645 * a collision distance suitable for FDX, but update it whe
4646 * we resolve the media type.
4647 */
4648 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4649 | TCTL_CT(TX_COLLISION_THRESHOLD)
4650 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4651 if (sc->sc_type >= WM_T_82571)
4652 sc->sc_tctl |= TCTL_MULR;
4653 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4654
4655 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4656 /*
4657 * Write TDT after TCTL.EN is set.
4658 * See the document.
4659 */
4660 CSR_WRITE(sc, WMREG_TDT, 0);
4661 }
4662
4663 if (sc->sc_type == WM_T_80003) {
4664 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4665 reg &= ~TCTL_EXT_GCEX_MASK;
4666 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4667 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4668 }
4669
4670 /* Set the media. */
4671 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4672 goto out;
4673
4674 /* Configure for OS presence */
4675 wm_init_manageability(sc);
4676
4677 /*
4678 * Set up the receive control register; we actually program
4679 * the register when we set the receive filter. Use multicast
4680 * address offset type 0.
4681 *
4682 * Only the i82544 has the ability to strip the incoming
4683 * CRC, so we don't enable that feature.
4684 */
4685 sc->sc_mchash_type = 0;
4686 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4687 | RCTL_MO(sc->sc_mchash_type);
4688
4689 /*
4690 * The I350 has a bug where it always strips the CRC whether
4691 * asked to or not. So ask for stripped CRC here and cope in rxeof
4692 */
4693 if (sc->sc_type == WM_T_I350)
4694 sc->sc_rctl |= RCTL_SECRC;
4695
4696 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4697 && (ifp->if_mtu > ETHERMTU)) {
4698 sc->sc_rctl |= RCTL_LPE;
4699 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4700 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4701 }
4702
4703 if (MCLBYTES == 2048) {
4704 sc->sc_rctl |= RCTL_2k;
4705 } else {
4706 if (sc->sc_type >= WM_T_82543) {
4707 switch (MCLBYTES) {
4708 case 4096:
4709 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4710 break;
4711 case 8192:
4712 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4713 break;
4714 case 16384:
4715 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4716 break;
4717 default:
4718 panic("wm_init: MCLBYTES %d unsupported",
4719 MCLBYTES);
4720 break;
4721 }
4722 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4723 }
4724
4725 /* Set the receive filter. */
4726 wm_set_filter(sc);
4727
4728 /* On 575 and later set RDT only if RX enabled */
4729 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4730 for (i = 0; i < WM_NRXDESC; i++)
4731 WM_INIT_RXDESC(sc, i);
4732
4733 /* Start the one second link check clock. */
4734 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4735
4736 /* ...all done! */
4737 ifp->if_flags |= IFF_RUNNING;
4738 ifp->if_flags &= ~IFF_OACTIVE;
4739
4740 out:
4741 sc->sc_if_flags = ifp->if_flags;
4742 if (error)
4743 log(LOG_ERR, "%s: interface not running\n",
4744 device_xname(sc->sc_dev));
4745 return error;
4746 }
4747
4748 /*
4749 * wm_rxdrain:
4750 *
4751 * Drain the receive queue.
4752 */
4753 static void
4754 wm_rxdrain(struct wm_softc *sc)
4755 {
4756 struct wm_rxsoft *rxs;
4757 int i;
4758
4759 for (i = 0; i < WM_NRXDESC; i++) {
4760 rxs = &sc->sc_rxsoft[i];
4761 if (rxs->rxs_mbuf != NULL) {
4762 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4763 m_freem(rxs->rxs_mbuf);
4764 rxs->rxs_mbuf = NULL;
4765 }
4766 }
4767 }
4768
4769 /*
4770 * wm_stop: [ifnet interface function]
4771 *
4772 * Stop transmission on the interface.
4773 */
4774 static void
4775 wm_stop(struct ifnet *ifp, int disable)
4776 {
4777 struct wm_softc *sc = ifp->if_softc;
4778 struct wm_txsoft *txs;
4779 int i;
4780
4781 /* Stop the one second clock. */
4782 callout_stop(&sc->sc_tick_ch);
4783
4784 /* Stop the 82547 Tx FIFO stall check timer. */
4785 if (sc->sc_type == WM_T_82547)
4786 callout_stop(&sc->sc_txfifo_ch);
4787
4788 if (sc->sc_flags & WM_F_HAS_MII) {
4789 /* Down the MII. */
4790 mii_down(&sc->sc_mii);
4791 } else {
4792 #if 0
4793 /* Should we clear PHY's status properly? */
4794 wm_reset(sc);
4795 #endif
4796 }
4797
4798 /* Stop the transmit and receive processes. */
4799 CSR_WRITE(sc, WMREG_TCTL, 0);
4800 CSR_WRITE(sc, WMREG_RCTL, 0);
4801 sc->sc_rctl &= ~RCTL_EN;
4802
4803 /*
4804 * Clear the interrupt mask to ensure the device cannot assert its
4805 * interrupt line.
4806 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4807 * any currently pending or shared interrupt.
4808 */
4809 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4810 sc->sc_icr = 0;
4811
4812 /* Release any queued transmit buffers. */
4813 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4814 txs = &sc->sc_txsoft[i];
4815 if (txs->txs_mbuf != NULL) {
4816 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4817 m_freem(txs->txs_mbuf);
4818 txs->txs_mbuf = NULL;
4819 }
4820 }
4821
4822 /* Mark the interface as down and cancel the watchdog timer. */
4823 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4824 ifp->if_timer = 0;
4825
4826 if (disable)
4827 wm_rxdrain(sc);
4828
4829 #if 0 /* notyet */
4830 if (sc->sc_type >= WM_T_82544)
4831 CSR_WRITE(sc, WMREG_WUC, 0);
4832 #endif
4833 }
4834
4835 void
4836 wm_get_auto_rd_done(struct wm_softc *sc)
4837 {
4838 int i;
4839
4840 /* wait for eeprom to reload */
4841 switch (sc->sc_type) {
4842 case WM_T_82571:
4843 case WM_T_82572:
4844 case WM_T_82573:
4845 case WM_T_82574:
4846 case WM_T_82583:
4847 case WM_T_82575:
4848 case WM_T_82576:
4849 case WM_T_82580:
4850 case WM_T_82580ER:
4851 case WM_T_I350:
4852 case WM_T_80003:
4853 case WM_T_ICH8:
4854 case WM_T_ICH9:
4855 for (i = 0; i < 10; i++) {
4856 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4857 break;
4858 delay(1000);
4859 }
4860 if (i == 10) {
4861 log(LOG_ERR, "%s: auto read from eeprom failed to "
4862 "complete\n", device_xname(sc->sc_dev));
4863 }
4864 break;
4865 default:
4866 break;
4867 }
4868 }
4869
4870 void
4871 wm_lan_init_done(struct wm_softc *sc)
4872 {
4873 uint32_t reg = 0;
4874 int i;
4875
4876 /* wait for eeprom to reload */
4877 switch (sc->sc_type) {
4878 case WM_T_ICH10:
4879 case WM_T_PCH:
4880 case WM_T_PCH2:
4881 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4882 reg = CSR_READ(sc, WMREG_STATUS);
4883 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4884 break;
4885 delay(100);
4886 }
4887 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4888 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4889 "complete\n", device_xname(sc->sc_dev), __func__);
4890 }
4891 break;
4892 default:
4893 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4894 __func__);
4895 break;
4896 }
4897
4898 reg &= ~STATUS_LAN_INIT_DONE;
4899 CSR_WRITE(sc, WMREG_STATUS, reg);
4900 }
4901
4902 void
4903 wm_get_cfg_done(struct wm_softc *sc)
4904 {
4905 int mask;
4906 uint32_t reg;
4907 int i;
4908
4909 /* wait for eeprom to reload */
4910 switch (sc->sc_type) {
4911 case WM_T_82542_2_0:
4912 case WM_T_82542_2_1:
4913 /* null */
4914 break;
4915 case WM_T_82543:
4916 case WM_T_82544:
4917 case WM_T_82540:
4918 case WM_T_82545:
4919 case WM_T_82545_3:
4920 case WM_T_82546:
4921 case WM_T_82546_3:
4922 case WM_T_82541:
4923 case WM_T_82541_2:
4924 case WM_T_82547:
4925 case WM_T_82547_2:
4926 case WM_T_82573:
4927 case WM_T_82574:
4928 case WM_T_82583:
4929 /* generic */
4930 delay(10*1000);
4931 break;
4932 case WM_T_80003:
4933 case WM_T_82571:
4934 case WM_T_82572:
4935 case WM_T_82575:
4936 case WM_T_82576:
4937 case WM_T_82580:
4938 case WM_T_82580ER:
4939 case WM_T_I350:
4940 if (sc->sc_type == WM_T_82571) {
4941 /* Only 82571 shares port 0 */
4942 mask = EEMNGCTL_CFGDONE_0;
4943 } else
4944 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4945 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4946 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4947 break;
4948 delay(1000);
4949 }
4950 if (i >= WM_PHY_CFG_TIMEOUT) {
4951 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4952 device_xname(sc->sc_dev), __func__));
4953 }
4954 break;
4955 case WM_T_ICH8:
4956 case WM_T_ICH9:
4957 case WM_T_ICH10:
4958 case WM_T_PCH:
4959 case WM_T_PCH2:
4960 if (sc->sc_type >= WM_T_PCH) {
4961 reg = CSR_READ(sc, WMREG_STATUS);
4962 if ((reg & STATUS_PHYRA) != 0)
4963 CSR_WRITE(sc, WMREG_STATUS,
4964 reg & ~STATUS_PHYRA);
4965 }
4966 delay(10*1000);
4967 break;
4968 default:
4969 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4970 __func__);
4971 break;
4972 }
4973 }
4974
4975 /*
4976 * wm_acquire_eeprom:
4977 *
4978 * Perform the EEPROM handshake required on some chips.
4979 */
4980 static int
4981 wm_acquire_eeprom(struct wm_softc *sc)
4982 {
4983 uint32_t reg;
4984 int x;
4985 int ret = 0;
4986
4987 /* always success */
4988 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4989 return 0;
4990
4991 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4992 ret = wm_get_swfwhw_semaphore(sc);
4993 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4994 /* this will also do wm_get_swsm_semaphore() if needed */
4995 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4996 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4997 ret = wm_get_swsm_semaphore(sc);
4998 }
4999
5000 if (ret) {
5001 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5002 __func__);
5003 return 1;
5004 }
5005
5006 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5007 reg = CSR_READ(sc, WMREG_EECD);
5008
5009 /* Request EEPROM access. */
5010 reg |= EECD_EE_REQ;
5011 CSR_WRITE(sc, WMREG_EECD, reg);
5012
5013 /* ..and wait for it to be granted. */
5014 for (x = 0; x < 1000; x++) {
5015 reg = CSR_READ(sc, WMREG_EECD);
5016 if (reg & EECD_EE_GNT)
5017 break;
5018 delay(5);
5019 }
5020 if ((reg & EECD_EE_GNT) == 0) {
5021 aprint_error_dev(sc->sc_dev,
5022 "could not acquire EEPROM GNT\n");
5023 reg &= ~EECD_EE_REQ;
5024 CSR_WRITE(sc, WMREG_EECD, reg);
5025 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5026 wm_put_swfwhw_semaphore(sc);
5027 if (sc->sc_flags & WM_F_SWFW_SYNC)
5028 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5029 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5030 wm_put_swsm_semaphore(sc);
5031 return 1;
5032 }
5033 }
5034
5035 return 0;
5036 }
5037
5038 /*
5039 * wm_release_eeprom:
5040 *
5041 * Release the EEPROM mutex.
5042 */
5043 static void
5044 wm_release_eeprom(struct wm_softc *sc)
5045 {
5046 uint32_t reg;
5047
5048 /* always success */
5049 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5050 return;
5051
5052 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5053 reg = CSR_READ(sc, WMREG_EECD);
5054 reg &= ~EECD_EE_REQ;
5055 CSR_WRITE(sc, WMREG_EECD, reg);
5056 }
5057
5058 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5059 wm_put_swfwhw_semaphore(sc);
5060 if (sc->sc_flags & WM_F_SWFW_SYNC)
5061 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5062 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5063 wm_put_swsm_semaphore(sc);
5064 }
5065
5066 /*
5067 * wm_eeprom_sendbits:
5068 *
5069 * Send a series of bits to the EEPROM.
5070 */
5071 static void
5072 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5073 {
5074 uint32_t reg;
5075 int x;
5076
5077 reg = CSR_READ(sc, WMREG_EECD);
5078
5079 for (x = nbits; x > 0; x--) {
5080 if (bits & (1U << (x - 1)))
5081 reg |= EECD_DI;
5082 else
5083 reg &= ~EECD_DI;
5084 CSR_WRITE(sc, WMREG_EECD, reg);
5085 delay(2);
5086 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5087 delay(2);
5088 CSR_WRITE(sc, WMREG_EECD, reg);
5089 delay(2);
5090 }
5091 }
5092
5093 /*
5094 * wm_eeprom_recvbits:
5095 *
5096 * Receive a series of bits from the EEPROM.
5097 */
5098 static void
5099 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5100 {
5101 uint32_t reg, val;
5102 int x;
5103
5104 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5105
5106 val = 0;
5107 for (x = nbits; x > 0; x--) {
5108 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5109 delay(2);
5110 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5111 val |= (1U << (x - 1));
5112 CSR_WRITE(sc, WMREG_EECD, reg);
5113 delay(2);
5114 }
5115 *valp = val;
5116 }
5117
5118 /*
5119 * wm_read_eeprom_uwire:
5120 *
5121 * Read a word from the EEPROM using the MicroWire protocol.
5122 */
5123 static int
5124 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5125 {
5126 uint32_t reg, val;
5127 int i;
5128
5129 for (i = 0; i < wordcnt; i++) {
5130 /* Clear SK and DI. */
5131 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5132 CSR_WRITE(sc, WMREG_EECD, reg);
5133
5134 /*
5135 * XXX: workaround for a bug in qemu-0.12.x and prior
5136 * and Xen.
5137 *
5138 * We use this workaround only for 82540 because qemu's
5139 * e1000 act as 82540.
5140 */
5141 if (sc->sc_type == WM_T_82540) {
5142 reg |= EECD_SK;
5143 CSR_WRITE(sc, WMREG_EECD, reg);
5144 reg &= ~EECD_SK;
5145 CSR_WRITE(sc, WMREG_EECD, reg);
5146 delay(2);
5147 }
5148 /* XXX: end of workaround */
5149
5150 /* Set CHIP SELECT. */
5151 reg |= EECD_CS;
5152 CSR_WRITE(sc, WMREG_EECD, reg);
5153 delay(2);
5154
5155 /* Shift in the READ command. */
5156 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5157
5158 /* Shift in address. */
5159 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5160
5161 /* Shift out the data. */
5162 wm_eeprom_recvbits(sc, &val, 16);
5163 data[i] = val & 0xffff;
5164
5165 /* Clear CHIP SELECT. */
5166 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5167 CSR_WRITE(sc, WMREG_EECD, reg);
5168 delay(2);
5169 }
5170
5171 return 0;
5172 }
5173
5174 /*
5175 * wm_spi_eeprom_ready:
5176 *
5177 * Wait for a SPI EEPROM to be ready for commands.
5178 */
5179 static int
5180 wm_spi_eeprom_ready(struct wm_softc *sc)
5181 {
5182 uint32_t val;
5183 int usec;
5184
5185 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5186 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5187 wm_eeprom_recvbits(sc, &val, 8);
5188 if ((val & SPI_SR_RDY) == 0)
5189 break;
5190 }
5191 if (usec >= SPI_MAX_RETRIES) {
5192 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5193 return 1;
5194 }
5195 return 0;
5196 }
5197
5198 /*
5199 * wm_read_eeprom_spi:
5200 *
5201 * Read a work from the EEPROM using the SPI protocol.
5202 */
5203 static int
5204 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5205 {
5206 uint32_t reg, val;
5207 int i;
5208 uint8_t opc;
5209
5210 /* Clear SK and CS. */
5211 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5212 CSR_WRITE(sc, WMREG_EECD, reg);
5213 delay(2);
5214
5215 if (wm_spi_eeprom_ready(sc))
5216 return 1;
5217
5218 /* Toggle CS to flush commands. */
5219 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5220 delay(2);
5221 CSR_WRITE(sc, WMREG_EECD, reg);
5222 delay(2);
5223
5224 opc = SPI_OPC_READ;
5225 if (sc->sc_ee_addrbits == 8 && word >= 128)
5226 opc |= SPI_OPC_A8;
5227
5228 wm_eeprom_sendbits(sc, opc, 8);
5229 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5230
5231 for (i = 0; i < wordcnt; i++) {
5232 wm_eeprom_recvbits(sc, &val, 16);
5233 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5234 }
5235
5236 /* Raise CS and clear SK. */
5237 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5238 CSR_WRITE(sc, WMREG_EECD, reg);
5239 delay(2);
5240
5241 return 0;
5242 }
5243
5244 #define EEPROM_CHECKSUM 0xBABA
5245 #define EEPROM_SIZE 0x0040
5246
5247 /*
5248 * wm_validate_eeprom_checksum
5249 *
5250 * The checksum is defined as the sum of the first 64 (16 bit) words.
5251 */
5252 static int
5253 wm_validate_eeprom_checksum(struct wm_softc *sc)
5254 {
5255 uint16_t checksum;
5256 uint16_t eeprom_data;
5257 int i;
5258
5259 checksum = 0;
5260
5261 for (i = 0; i < EEPROM_SIZE; i++) {
5262 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5263 return 1;
5264 checksum += eeprom_data;
5265 }
5266
5267 if (checksum != (uint16_t) EEPROM_CHECKSUM)
5268 return 1;
5269
5270 return 0;
5271 }
5272
5273 /*
5274 * wm_read_eeprom:
5275 *
5276 * Read data from the serial EEPROM.
5277 */
5278 static int
5279 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5280 {
5281 int rv;
5282
5283 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5284 return 1;
5285
5286 if (wm_acquire_eeprom(sc))
5287 return 1;
5288
5289 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5290 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5291 || (sc->sc_type == WM_T_PCH2))
5292 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5293 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5294 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5295 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5296 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5297 else
5298 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5299
5300 wm_release_eeprom(sc);
5301 return rv;
5302 }
5303
5304 static int
5305 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5306 uint16_t *data)
5307 {
5308 int i, eerd = 0;
5309 int error = 0;
5310
5311 for (i = 0; i < wordcnt; i++) {
5312 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5313
5314 CSR_WRITE(sc, WMREG_EERD, eerd);
5315 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5316 if (error != 0)
5317 break;
5318
5319 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5320 }
5321
5322 return error;
5323 }
5324
5325 static int
5326 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5327 {
5328 uint32_t attempts = 100000;
5329 uint32_t i, reg = 0;
5330 int32_t done = -1;
5331
5332 for (i = 0; i < attempts; i++) {
5333 reg = CSR_READ(sc, rw);
5334
5335 if (reg & EERD_DONE) {
5336 done = 0;
5337 break;
5338 }
5339 delay(5);
5340 }
5341
5342 return done;
5343 }
5344
5345 static int
5346 wm_check_alt_mac_addr(struct wm_softc *sc)
5347 {
5348 uint16_t myea[ETHER_ADDR_LEN / 2];
5349 uint16_t offset = EEPROM_OFF_MACADDR;
5350
5351 /* Try to read alternative MAC address pointer */
5352 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5353 return -1;
5354
5355 /* Check pointer */
5356 if (offset == 0xffff)
5357 return -1;
5358
5359 /*
5360 * Check whether alternative MAC address is valid or not.
5361 * Some cards have non 0xffff pointer but those don't use
5362 * alternative MAC address in reality.
5363 *
5364 * Check whether the broadcast bit is set or not.
5365 */
5366 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5367 if (((myea[0] & 0xff) & 0x01) == 0)
5368 return 0; /* found! */
5369
5370 /* not found */
5371 return -1;
5372 }
5373
5374 static int
5375 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5376 {
5377 uint16_t myea[ETHER_ADDR_LEN / 2];
5378 uint16_t offset = EEPROM_OFF_MACADDR;
5379 int do_invert = 0;
5380
5381 switch (sc->sc_type) {
5382 case WM_T_82580:
5383 case WM_T_82580ER:
5384 case WM_T_I350:
5385 switch (sc->sc_funcid) {
5386 case 0:
5387 /* default value (== EEPROM_OFF_MACADDR) */
5388 break;
5389 case 1:
5390 offset = EEPROM_OFF_LAN1;
5391 break;
5392 case 2:
5393 offset = EEPROM_OFF_LAN2;
5394 break;
5395 case 3:
5396 offset = EEPROM_OFF_LAN3;
5397 break;
5398 default:
5399 goto bad;
5400 /* NOTREACHED */
5401 break;
5402 }
5403 break;
5404 case WM_T_82571:
5405 case WM_T_82575:
5406 case WM_T_82576:
5407 case WM_T_80003:
5408 if (wm_check_alt_mac_addr(sc) != 0) {
5409 /* reset the offset to LAN0 */
5410 offset = EEPROM_OFF_MACADDR;
5411 if ((sc->sc_funcid & 0x01) == 1)
5412 do_invert = 1;
5413 goto do_read;
5414 }
5415 switch (sc->sc_funcid) {
5416 case 0:
5417 /*
5418 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5419 * itself.
5420 */
5421 break;
5422 case 1:
5423 offset += EEPROM_OFF_MACADDR_LAN1;
5424 break;
5425 case 2:
5426 offset += EEPROM_OFF_MACADDR_LAN2;
5427 break;
5428 case 3:
5429 offset += EEPROM_OFF_MACADDR_LAN3;
5430 break;
5431 default:
5432 goto bad;
5433 /* NOTREACHED */
5434 break;
5435 }
5436 break;
5437 default:
5438 if ((sc->sc_funcid & 0x01) == 1)
5439 do_invert = 1;
5440 break;
5441 }
5442
5443 do_read:
5444 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5445 myea) != 0) {
5446 goto bad;
5447 }
5448
5449 enaddr[0] = myea[0] & 0xff;
5450 enaddr[1] = myea[0] >> 8;
5451 enaddr[2] = myea[1] & 0xff;
5452 enaddr[3] = myea[1] >> 8;
5453 enaddr[4] = myea[2] & 0xff;
5454 enaddr[5] = myea[2] >> 8;
5455
5456 /*
5457 * Toggle the LSB of the MAC address on the second port
5458 * of some dual port cards.
5459 */
5460 if (do_invert != 0)
5461 enaddr[5] ^= 1;
5462
5463 return 0;
5464
5465 bad:
5466 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5467
5468 return -1;
5469 }
5470
5471 /*
5472 * wm_add_rxbuf:
5473 *
5474 * Add a receive buffer to the indiciated descriptor.
5475 */
5476 static int
5477 wm_add_rxbuf(struct wm_softc *sc, int idx)
5478 {
5479 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5480 struct mbuf *m;
5481 int error;
5482
5483 MGETHDR(m, M_DONTWAIT, MT_DATA);
5484 if (m == NULL)
5485 return ENOBUFS;
5486
5487 MCLGET(m, M_DONTWAIT);
5488 if ((m->m_flags & M_EXT) == 0) {
5489 m_freem(m);
5490 return ENOBUFS;
5491 }
5492
5493 if (rxs->rxs_mbuf != NULL)
5494 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5495
5496 rxs->rxs_mbuf = m;
5497
5498 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5499 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5500 BUS_DMA_READ|BUS_DMA_NOWAIT);
5501 if (error) {
5502 /* XXX XXX XXX */
5503 aprint_error_dev(sc->sc_dev,
5504 "unable to load rx DMA map %d, error = %d\n",
5505 idx, error);
5506 panic("wm_add_rxbuf");
5507 }
5508
5509 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5510 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5511
5512 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5513 if ((sc->sc_rctl & RCTL_EN) != 0)
5514 WM_INIT_RXDESC(sc, idx);
5515 } else
5516 WM_INIT_RXDESC(sc, idx);
5517
5518 return 0;
5519 }
5520
5521 /*
5522 * wm_set_ral:
5523 *
5524 * Set an entery in the receive address list.
5525 */
5526 static void
5527 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5528 {
5529 uint32_t ral_lo, ral_hi;
5530
5531 if (enaddr != NULL) {
5532 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5533 (enaddr[3] << 24);
5534 ral_hi = enaddr[4] | (enaddr[5] << 8);
5535 ral_hi |= RAL_AV;
5536 } else {
5537 ral_lo = 0;
5538 ral_hi = 0;
5539 }
5540
5541 if (sc->sc_type >= WM_T_82544) {
5542 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5543 ral_lo);
5544 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5545 ral_hi);
5546 } else {
5547 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5548 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5549 }
5550 }
5551
5552 /*
5553 * wm_mchash:
5554 *
5555 * Compute the hash of the multicast address for the 4096-bit
5556 * multicast filter.
5557 */
5558 static uint32_t
5559 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5560 {
5561 static const int lo_shift[4] = { 4, 3, 2, 0 };
5562 static const int hi_shift[4] = { 4, 5, 6, 8 };
5563 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5564 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5565 uint32_t hash;
5566
5567 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5568 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5569 || (sc->sc_type == WM_T_PCH2)) {
5570 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5571 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5572 return (hash & 0x3ff);
5573 }
5574 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5575 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5576
5577 return (hash & 0xfff);
5578 }
5579
5580 /*
5581 * wm_set_filter:
5582 *
5583 * Set up the receive filter.
5584 */
5585 static void
5586 wm_set_filter(struct wm_softc *sc)
5587 {
5588 struct ethercom *ec = &sc->sc_ethercom;
5589 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5590 struct ether_multi *enm;
5591 struct ether_multistep step;
5592 bus_addr_t mta_reg;
5593 uint32_t hash, reg, bit;
5594 int i, size;
5595
5596 if (sc->sc_type >= WM_T_82544)
5597 mta_reg = WMREG_CORDOVA_MTA;
5598 else
5599 mta_reg = WMREG_MTA;
5600
5601 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5602
5603 if (ifp->if_flags & IFF_BROADCAST)
5604 sc->sc_rctl |= RCTL_BAM;
5605 if (ifp->if_flags & IFF_PROMISC) {
5606 sc->sc_rctl |= RCTL_UPE;
5607 goto allmulti;
5608 }
5609
5610 /*
5611 * Set the station address in the first RAL slot, and
5612 * clear the remaining slots.
5613 */
5614 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5615 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5616 || (sc->sc_type == WM_T_PCH2))
5617 size = WM_ICH8_RAL_TABSIZE;
5618 else
5619 size = WM_RAL_TABSIZE;
5620 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5621 for (i = 1; i < size; i++)
5622 wm_set_ral(sc, NULL, i);
5623
5624 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5625 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5626 || (sc->sc_type == WM_T_PCH2))
5627 size = WM_ICH8_MC_TABSIZE;
5628 else
5629 size = WM_MC_TABSIZE;
5630 /* Clear out the multicast table. */
5631 for (i = 0; i < size; i++)
5632 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5633
5634 ETHER_FIRST_MULTI(step, ec, enm);
5635 while (enm != NULL) {
5636 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5637 /*
5638 * We must listen to a range of multicast addresses.
5639 * For now, just accept all multicasts, rather than
5640 * trying to set only those filter bits needed to match
5641 * the range. (At this time, the only use of address
5642 * ranges is for IP multicast routing, for which the
5643 * range is big enough to require all bits set.)
5644 */
5645 goto allmulti;
5646 }
5647
5648 hash = wm_mchash(sc, enm->enm_addrlo);
5649
5650 reg = (hash >> 5);
5651 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5652 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5653 || (sc->sc_type == WM_T_PCH2))
5654 reg &= 0x1f;
5655 else
5656 reg &= 0x7f;
5657 bit = hash & 0x1f;
5658
5659 hash = CSR_READ(sc, mta_reg + (reg << 2));
5660 hash |= 1U << bit;
5661
5662 /* XXX Hardware bug?? */
5663 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5664 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5665 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5666 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5667 } else
5668 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5669
5670 ETHER_NEXT_MULTI(step, enm);
5671 }
5672
5673 ifp->if_flags &= ~IFF_ALLMULTI;
5674 goto setit;
5675
5676 allmulti:
5677 ifp->if_flags |= IFF_ALLMULTI;
5678 sc->sc_rctl |= RCTL_MPE;
5679
5680 setit:
5681 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5682 }
5683
5684 /*
5685 * wm_tbi_mediainit:
5686 *
5687 * Initialize media for use on 1000BASE-X devices.
5688 */
5689 static void
5690 wm_tbi_mediainit(struct wm_softc *sc)
5691 {
5692 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5693 const char *sep = "";
5694
5695 if (sc->sc_type < WM_T_82543)
5696 sc->sc_tipg = TIPG_WM_DFLT;
5697 else
5698 sc->sc_tipg = TIPG_LG_DFLT;
5699
5700 sc->sc_tbi_anegticks = 5;
5701
5702 /* Initialize our media structures */
5703 sc->sc_mii.mii_ifp = ifp;
5704
5705 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5706 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5707 wm_tbi_mediastatus);
5708
5709 /*
5710 * SWD Pins:
5711 *
5712 * 0 = Link LED (output)
5713 * 1 = Loss Of Signal (input)
5714 */
5715 sc->sc_ctrl |= CTRL_SWDPIO(0);
5716 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5717
5718 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5719
5720 #define ADD(ss, mm, dd) \
5721 do { \
5722 aprint_normal("%s%s", sep, ss); \
5723 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5724 sep = ", "; \
5725 } while (/*CONSTCOND*/0)
5726
5727 aprint_normal_dev(sc->sc_dev, "");
5728 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5729 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5730 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5731 aprint_normal("\n");
5732
5733 #undef ADD
5734
5735 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5736 }
5737
5738 /*
5739 * wm_tbi_mediastatus: [ifmedia interface function]
5740 *
5741 * Get the current interface media status on a 1000BASE-X device.
5742 */
5743 static void
5744 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5745 {
5746 struct wm_softc *sc = ifp->if_softc;
5747 uint32_t ctrl, status;
5748
5749 ifmr->ifm_status = IFM_AVALID;
5750 ifmr->ifm_active = IFM_ETHER;
5751
5752 status = CSR_READ(sc, WMREG_STATUS);
5753 if ((status & STATUS_LU) == 0) {
5754 ifmr->ifm_active |= IFM_NONE;
5755 return;
5756 }
5757
5758 ifmr->ifm_status |= IFM_ACTIVE;
5759 ifmr->ifm_active |= IFM_1000_SX;
5760 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5761 ifmr->ifm_active |= IFM_FDX;
5762 ctrl = CSR_READ(sc, WMREG_CTRL);
5763 if (ctrl & CTRL_RFCE)
5764 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5765 if (ctrl & CTRL_TFCE)
5766 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5767 }
5768
5769 /*
5770 * wm_tbi_mediachange: [ifmedia interface function]
5771 *
5772 * Set hardware to newly-selected media on a 1000BASE-X device.
5773 */
5774 static int
5775 wm_tbi_mediachange(struct ifnet *ifp)
5776 {
5777 struct wm_softc *sc = ifp->if_softc;
5778 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5779 uint32_t status;
5780 int i;
5781
5782 sc->sc_txcw = 0;
5783 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5784 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5785 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5786 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5787 sc->sc_txcw |= TXCW_ANE;
5788 } else {
5789 /*
5790 * If autonegotiation is turned off, force link up and turn on
5791 * full duplex
5792 */
5793 sc->sc_txcw &= ~TXCW_ANE;
5794 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5795 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5796 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5797 delay(1000);
5798 }
5799
5800 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5801 device_xname(sc->sc_dev),sc->sc_txcw));
5802 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5803 delay(10000);
5804
5805 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5806 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5807
5808 /*
5809 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5810 * optics detect a signal, 0 if they don't.
5811 */
5812 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5813 /* Have signal; wait for the link to come up. */
5814
5815 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5816 /*
5817 * Reset the link, and let autonegotiation do its thing
5818 */
5819 sc->sc_ctrl |= CTRL_LRST;
5820 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5821 delay(1000);
5822 sc->sc_ctrl &= ~CTRL_LRST;
5823 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5824 delay(1000);
5825 }
5826
5827 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5828 delay(10000);
5829 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5830 break;
5831 }
5832
5833 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5834 device_xname(sc->sc_dev),i));
5835
5836 status = CSR_READ(sc, WMREG_STATUS);
5837 DPRINTF(WM_DEBUG_LINK,
5838 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5839 device_xname(sc->sc_dev),status, STATUS_LU));
5840 if (status & STATUS_LU) {
5841 /* Link is up. */
5842 DPRINTF(WM_DEBUG_LINK,
5843 ("%s: LINK: set media -> link up %s\n",
5844 device_xname(sc->sc_dev),
5845 (status & STATUS_FD) ? "FDX" : "HDX"));
5846
5847 /*
5848 * NOTE: CTRL will update TFCE and RFCE automatically,
5849 * so we should update sc->sc_ctrl
5850 */
5851 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5852 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5853 sc->sc_fcrtl &= ~FCRTL_XONE;
5854 if (status & STATUS_FD)
5855 sc->sc_tctl |=
5856 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5857 else
5858 sc->sc_tctl |=
5859 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5860 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5861 sc->sc_fcrtl |= FCRTL_XONE;
5862 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5863 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5864 WMREG_OLD_FCRTL : WMREG_FCRTL,
5865 sc->sc_fcrtl);
5866 sc->sc_tbi_linkup = 1;
5867 } else {
5868 if (i == WM_LINKUP_TIMEOUT)
5869 wm_check_for_link(sc);
5870 /* Link is down. */
5871 DPRINTF(WM_DEBUG_LINK,
5872 ("%s: LINK: set media -> link down\n",
5873 device_xname(sc->sc_dev)));
5874 sc->sc_tbi_linkup = 0;
5875 }
5876 } else {
5877 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5878 device_xname(sc->sc_dev)));
5879 sc->sc_tbi_linkup = 0;
5880 }
5881
5882 wm_tbi_set_linkled(sc);
5883
5884 return 0;
5885 }
5886
5887 /*
5888 * wm_tbi_set_linkled:
5889 *
5890 * Update the link LED on 1000BASE-X devices.
5891 */
5892 static void
5893 wm_tbi_set_linkled(struct wm_softc *sc)
5894 {
5895
5896 if (sc->sc_tbi_linkup)
5897 sc->sc_ctrl |= CTRL_SWDPIN(0);
5898 else
5899 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5900
5901 /* 82540 or newer devices are active low */
5902 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5903
5904 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5905 }
5906
5907 /*
5908 * wm_tbi_check_link:
5909 *
5910 * Check the link on 1000BASE-X devices.
5911 */
5912 static void
5913 wm_tbi_check_link(struct wm_softc *sc)
5914 {
5915 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5916 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5917 uint32_t rxcw, ctrl, status;
5918
5919 status = CSR_READ(sc, WMREG_STATUS);
5920
5921 rxcw = CSR_READ(sc, WMREG_RXCW);
5922 ctrl = CSR_READ(sc, WMREG_CTRL);
5923
5924 /* set link status */
5925 if ((status & STATUS_LU) == 0) {
5926 DPRINTF(WM_DEBUG_LINK,
5927 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5928 sc->sc_tbi_linkup = 0;
5929 } else if (sc->sc_tbi_linkup == 0) {
5930 DPRINTF(WM_DEBUG_LINK,
5931 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5932 (status & STATUS_FD) ? "FDX" : "HDX"));
5933 sc->sc_tbi_linkup = 1;
5934 }
5935
5936 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5937 && ((status & STATUS_LU) == 0)) {
5938 sc->sc_tbi_linkup = 0;
5939 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5940 /* RXCFG storm! */
5941 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5942 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5943 wm_init(ifp);
5944 ifp->if_start(ifp);
5945 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5946 /* If the timer expired, retry autonegotiation */
5947 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5948 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5949 sc->sc_tbi_ticks = 0;
5950 /*
5951 * Reset the link, and let autonegotiation do
5952 * its thing
5953 */
5954 sc->sc_ctrl |= CTRL_LRST;
5955 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5956 delay(1000);
5957 sc->sc_ctrl &= ~CTRL_LRST;
5958 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5959 delay(1000);
5960 CSR_WRITE(sc, WMREG_TXCW,
5961 sc->sc_txcw & ~TXCW_ANE);
5962 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5963 }
5964 }
5965 }
5966
5967 wm_tbi_set_linkled(sc);
5968 }
5969
5970 /*
5971 * wm_gmii_reset:
5972 *
5973 * Reset the PHY.
5974 */
5975 static void
5976 wm_gmii_reset(struct wm_softc *sc)
5977 {
5978 uint32_t reg;
5979 int rv;
5980
5981 /* get phy semaphore */
5982 switch (sc->sc_type) {
5983 case WM_T_82571:
5984 case WM_T_82572:
5985 case WM_T_82573:
5986 case WM_T_82574:
5987 case WM_T_82583:
5988 /* XXX should get sw semaphore, too */
5989 rv = wm_get_swsm_semaphore(sc);
5990 break;
5991 case WM_T_82575:
5992 case WM_T_82576:
5993 case WM_T_82580:
5994 case WM_T_82580ER:
5995 case WM_T_I350:
5996 case WM_T_80003:
5997 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5998 break;
5999 case WM_T_ICH8:
6000 case WM_T_ICH9:
6001 case WM_T_ICH10:
6002 case WM_T_PCH:
6003 case WM_T_PCH2:
6004 rv = wm_get_swfwhw_semaphore(sc);
6005 break;
6006 default:
6007 /* nothing to do*/
6008 rv = 0;
6009 break;
6010 }
6011 if (rv != 0) {
6012 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6013 __func__);
6014 return;
6015 }
6016
6017 switch (sc->sc_type) {
6018 case WM_T_82542_2_0:
6019 case WM_T_82542_2_1:
6020 /* null */
6021 break;
6022 case WM_T_82543:
6023 /*
6024 * With 82543, we need to force speed and duplex on the MAC
6025 * equal to what the PHY speed and duplex configuration is.
6026 * In addition, we need to perform a hardware reset on the PHY
6027 * to take it out of reset.
6028 */
6029 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6030 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6031
6032 /* The PHY reset pin is active-low. */
6033 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6034 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6035 CTRL_EXT_SWDPIN(4));
6036 reg |= CTRL_EXT_SWDPIO(4);
6037
6038 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6039 delay(10*1000);
6040
6041 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6042 delay(150);
6043 #if 0
6044 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6045 #endif
6046 delay(20*1000); /* XXX extra delay to get PHY ID? */
6047 break;
6048 case WM_T_82544: /* reset 10000us */
6049 case WM_T_82540:
6050 case WM_T_82545:
6051 case WM_T_82545_3:
6052 case WM_T_82546:
6053 case WM_T_82546_3:
6054 case WM_T_82541:
6055 case WM_T_82541_2:
6056 case WM_T_82547:
6057 case WM_T_82547_2:
6058 case WM_T_82571: /* reset 100us */
6059 case WM_T_82572:
6060 case WM_T_82573:
6061 case WM_T_82574:
6062 case WM_T_82575:
6063 case WM_T_82576:
6064 case WM_T_82580:
6065 case WM_T_82580ER:
6066 case WM_T_I350:
6067 case WM_T_82583:
6068 case WM_T_80003:
6069 /* generic reset */
6070 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6071 delay(20000);
6072 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6073 delay(20000);
6074
6075 if ((sc->sc_type == WM_T_82541)
6076 || (sc->sc_type == WM_T_82541_2)
6077 || (sc->sc_type == WM_T_82547)
6078 || (sc->sc_type == WM_T_82547_2)) {
6079 /* workaround for igp are done in igp_reset() */
6080 /* XXX add code to set LED after phy reset */
6081 }
6082 break;
6083 case WM_T_ICH8:
6084 case WM_T_ICH9:
6085 case WM_T_ICH10:
6086 case WM_T_PCH:
6087 case WM_T_PCH2:
6088 /* generic reset */
6089 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6090 delay(100);
6091 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6092 delay(150);
6093 break;
6094 default:
6095 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6096 __func__);
6097 break;
6098 }
6099
6100 /* release PHY semaphore */
6101 switch (sc->sc_type) {
6102 case WM_T_82571:
6103 case WM_T_82572:
6104 case WM_T_82573:
6105 case WM_T_82574:
6106 case WM_T_82583:
6107 /* XXX should put sw semaphore, too */
6108 wm_put_swsm_semaphore(sc);
6109 break;
6110 case WM_T_82575:
6111 case WM_T_82576:
6112 case WM_T_82580:
6113 case WM_T_82580ER:
6114 case WM_T_I350:
6115 case WM_T_80003:
6116 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6117 break;
6118 case WM_T_ICH8:
6119 case WM_T_ICH9:
6120 case WM_T_ICH10:
6121 case WM_T_PCH:
6122 case WM_T_PCH2:
6123 wm_put_swfwhw_semaphore(sc);
6124 break;
6125 default:
6126 /* nothing to do*/
6127 rv = 0;
6128 break;
6129 }
6130
6131 /* get_cfg_done */
6132 wm_get_cfg_done(sc);
6133
6134 /* extra setup */
6135 switch (sc->sc_type) {
6136 case WM_T_82542_2_0:
6137 case WM_T_82542_2_1:
6138 case WM_T_82543:
6139 case WM_T_82544:
6140 case WM_T_82540:
6141 case WM_T_82545:
6142 case WM_T_82545_3:
6143 case WM_T_82546:
6144 case WM_T_82546_3:
6145 case WM_T_82541_2:
6146 case WM_T_82547_2:
6147 case WM_T_82571:
6148 case WM_T_82572:
6149 case WM_T_82573:
6150 case WM_T_82574:
6151 case WM_T_82575:
6152 case WM_T_82576:
6153 case WM_T_82580:
6154 case WM_T_82580ER:
6155 case WM_T_I350:
6156 case WM_T_82583:
6157 case WM_T_80003:
6158 /* null */
6159 break;
6160 case WM_T_82541:
6161 case WM_T_82547:
6162 /* XXX Configure actively LED after PHY reset */
6163 break;
6164 case WM_T_ICH8:
6165 case WM_T_ICH9:
6166 case WM_T_ICH10:
6167 case WM_T_PCH:
6168 case WM_T_PCH2:
6169 /* Allow time for h/w to get to a quiescent state afer reset */
6170 delay(10*1000);
6171
6172 if (sc->sc_type == WM_T_PCH)
6173 wm_hv_phy_workaround_ich8lan(sc);
6174
6175 if (sc->sc_type == WM_T_PCH2)
6176 wm_lv_phy_workaround_ich8lan(sc);
6177
6178 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6179 /*
6180 * dummy read to clear the phy wakeup bit after lcd
6181 * reset
6182 */
6183 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6184 }
6185
6186 /*
6187 * XXX Configure the LCD with th extended configuration region
6188 * in NVM
6189 */
6190
6191 /* Configure the LCD with the OEM bits in NVM */
6192 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6193 /*
6194 * Disable LPLU.
6195 * XXX It seems that 82567 has LPLU, too.
6196 */
6197 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6198 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6199 reg |= HV_OEM_BITS_ANEGNOW;
6200 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6201 }
6202 break;
6203 default:
6204 panic("%s: unknown type\n", __func__);
6205 break;
6206 }
6207 }
6208
6209 /*
6210 * wm_gmii_mediainit:
6211 *
6212 * Initialize media for use on 1000BASE-T devices.
6213 */
6214 static void
6215 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6216 {
6217 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6218
6219 /* We have MII. */
6220 sc->sc_flags |= WM_F_HAS_MII;
6221
6222 if (sc->sc_type == WM_T_80003)
6223 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6224 else
6225 sc->sc_tipg = TIPG_1000T_DFLT;
6226
6227 /*
6228 * Let the chip set speed/duplex on its own based on
6229 * signals from the PHY.
6230 * XXXbouyer - I'm not sure this is right for the 80003,
6231 * the em driver only sets CTRL_SLU here - but it seems to work.
6232 */
6233 sc->sc_ctrl |= CTRL_SLU;
6234 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6235
6236 /* Initialize our media structures and probe the GMII. */
6237 sc->sc_mii.mii_ifp = ifp;
6238
6239 switch (prodid) {
6240 case PCI_PRODUCT_INTEL_PCH_M_LM:
6241 case PCI_PRODUCT_INTEL_PCH_M_LC:
6242 /* 82577 */
6243 sc->sc_phytype = WMPHY_82577;
6244 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6245 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6246 break;
6247 case PCI_PRODUCT_INTEL_PCH_D_DM:
6248 case PCI_PRODUCT_INTEL_PCH_D_DC:
6249 /* 82578 */
6250 sc->sc_phytype = WMPHY_82578;
6251 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6252 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6253 break;
6254 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6255 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6256 /* 82578 */
6257 sc->sc_phytype = WMPHY_82579;
6258 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
6259 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
6260 break;
6261 case PCI_PRODUCT_INTEL_82801I_BM:
6262 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6263 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6264 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6265 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6266 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6267 /* 82567 */
6268 sc->sc_phytype = WMPHY_BM;
6269 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6270 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6271 break;
6272 default:
6273 if ((sc->sc_flags & WM_F_SGMII) != 0) {
6274 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
6275 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
6276 } else if (sc->sc_type >= WM_T_80003) {
6277 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
6278 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
6279 } else if (sc->sc_type >= WM_T_82544) {
6280 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
6281 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
6282 } else {
6283 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
6284 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
6285 }
6286 break;
6287 }
6288 sc->sc_mii.mii_statchg = wm_gmii_statchg;
6289
6290 wm_gmii_reset(sc);
6291
6292 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6293 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
6294 wm_gmii_mediastatus);
6295
6296 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6297 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6298 || (sc->sc_type == WM_T_I350)) {
6299 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6300 /* Attach only one port */
6301 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6302 MII_OFFSET_ANY, MIIF_DOPAUSE);
6303 } else {
6304 int i;
6305 uint32_t ctrl_ext;
6306
6307 /* Power on sgmii phy if it is disabled */
6308 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6309 CSR_WRITE(sc, WMREG_CTRL_EXT,
6310 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6311 CSR_WRITE_FLUSH(sc);
6312 delay(300*1000); /* XXX too long */
6313
6314 /* from 1 to 8 */
6315 for (i = 1; i < 8; i++)
6316 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6317 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6318
6319 /* restore previous sfp cage power state */
6320 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6321 }
6322 } else {
6323 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6324 MII_OFFSET_ANY, MIIF_DOPAUSE);
6325 }
6326
6327 if ((sc->sc_type == WM_T_PCH2) &&
6328 (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
6329 wm_set_mdio_slow_mode_hv(sc);
6330 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6331 MII_OFFSET_ANY, MIIF_DOPAUSE);
6332 }
6333
6334 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6335 /* if failed, retry with *_bm_* */
6336 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
6337 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
6338
6339 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6340 MII_OFFSET_ANY, MIIF_DOPAUSE);
6341 }
6342 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
6343 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6344 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
6345 sc->sc_phytype = WMPHY_NONE;
6346 } else {
6347 /* Check PHY type */
6348 uint32_t model;
6349 struct mii_softc *child;
6350
6351 child = LIST_FIRST(&sc->sc_mii.mii_phys);
6352 if (device_is_a(child->mii_dev, "igphy")) {
6353 struct igphy_softc *isc = (struct igphy_softc *)child;
6354
6355 model = isc->sc_mii.mii_mpd_model;
6356 if (model == MII_MODEL_yyINTEL_I82566)
6357 sc->sc_phytype = WMPHY_IGP_3;
6358 }
6359
6360 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
6361 }
6362 }
6363
6364 /*
6365 * wm_gmii_mediastatus: [ifmedia interface function]
6366 *
6367 * Get the current interface media status on a 1000BASE-T device.
6368 */
6369 static void
6370 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6371 {
6372 struct wm_softc *sc = ifp->if_softc;
6373
6374 ether_mediastatus(ifp, ifmr);
6375 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6376 | sc->sc_flowflags;
6377 }
6378
6379 /*
6380 * wm_gmii_mediachange: [ifmedia interface function]
6381 *
6382 * Set hardware to newly-selected media on a 1000BASE-T device.
6383 */
6384 static int
6385 wm_gmii_mediachange(struct ifnet *ifp)
6386 {
6387 struct wm_softc *sc = ifp->if_softc;
6388 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6389 int rc;
6390
6391 if ((ifp->if_flags & IFF_UP) == 0)
6392 return 0;
6393
6394 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6395 sc->sc_ctrl |= CTRL_SLU;
6396 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6397 || (sc->sc_type > WM_T_82543)) {
6398 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6399 } else {
6400 sc->sc_ctrl &= ~CTRL_ASDE;
6401 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6402 if (ife->ifm_media & IFM_FDX)
6403 sc->sc_ctrl |= CTRL_FD;
6404 switch (IFM_SUBTYPE(ife->ifm_media)) {
6405 case IFM_10_T:
6406 sc->sc_ctrl |= CTRL_SPEED_10;
6407 break;
6408 case IFM_100_TX:
6409 sc->sc_ctrl |= CTRL_SPEED_100;
6410 break;
6411 case IFM_1000_T:
6412 sc->sc_ctrl |= CTRL_SPEED_1000;
6413 break;
6414 default:
6415 panic("wm_gmii_mediachange: bad media 0x%x",
6416 ife->ifm_media);
6417 }
6418 }
6419 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6420 if (sc->sc_type <= WM_T_82543)
6421 wm_gmii_reset(sc);
6422
6423 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6424 return 0;
6425 return rc;
6426 }
6427
6428 #define MDI_IO CTRL_SWDPIN(2)
6429 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6430 #define MDI_CLK CTRL_SWDPIN(3)
6431
6432 static void
6433 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6434 {
6435 uint32_t i, v;
6436
6437 v = CSR_READ(sc, WMREG_CTRL);
6438 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6439 v |= MDI_DIR | CTRL_SWDPIO(3);
6440
6441 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6442 if (data & i)
6443 v |= MDI_IO;
6444 else
6445 v &= ~MDI_IO;
6446 CSR_WRITE(sc, WMREG_CTRL, v);
6447 delay(10);
6448 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6449 delay(10);
6450 CSR_WRITE(sc, WMREG_CTRL, v);
6451 delay(10);
6452 }
6453 }
6454
6455 static uint32_t
6456 i82543_mii_recvbits(struct wm_softc *sc)
6457 {
6458 uint32_t v, i, data = 0;
6459
6460 v = CSR_READ(sc, WMREG_CTRL);
6461 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6462 v |= CTRL_SWDPIO(3);
6463
6464 CSR_WRITE(sc, WMREG_CTRL, v);
6465 delay(10);
6466 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6467 delay(10);
6468 CSR_WRITE(sc, WMREG_CTRL, v);
6469 delay(10);
6470
6471 for (i = 0; i < 16; i++) {
6472 data <<= 1;
6473 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6474 delay(10);
6475 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6476 data |= 1;
6477 CSR_WRITE(sc, WMREG_CTRL, v);
6478 delay(10);
6479 }
6480
6481 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6482 delay(10);
6483 CSR_WRITE(sc, WMREG_CTRL, v);
6484 delay(10);
6485
6486 return data;
6487 }
6488
6489 #undef MDI_IO
6490 #undef MDI_DIR
6491 #undef MDI_CLK
6492
6493 /*
6494 * wm_gmii_i82543_readreg: [mii interface function]
6495 *
6496 * Read a PHY register on the GMII (i82543 version).
6497 */
6498 static int
6499 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6500 {
6501 struct wm_softc *sc = device_private(self);
6502 int rv;
6503
6504 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6505 i82543_mii_sendbits(sc, reg | (phy << 5) |
6506 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6507 rv = i82543_mii_recvbits(sc) & 0xffff;
6508
6509 DPRINTF(WM_DEBUG_GMII,
6510 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6511 device_xname(sc->sc_dev), phy, reg, rv));
6512
6513 return rv;
6514 }
6515
6516 /*
6517 * wm_gmii_i82543_writereg: [mii interface function]
6518 *
6519 * Write a PHY register on the GMII (i82543 version).
6520 */
6521 static void
6522 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6523 {
6524 struct wm_softc *sc = device_private(self);
6525
6526 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6527 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6528 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6529 (MII_COMMAND_START << 30), 32);
6530 }
6531
6532 /*
6533 * wm_gmii_i82544_readreg: [mii interface function]
6534 *
6535 * Read a PHY register on the GMII.
6536 */
6537 static int
6538 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6539 {
6540 struct wm_softc *sc = device_private(self);
6541 uint32_t mdic = 0;
6542 int i, rv;
6543
6544 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6545 MDIC_REGADD(reg));
6546
6547 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6548 mdic = CSR_READ(sc, WMREG_MDIC);
6549 if (mdic & MDIC_READY)
6550 break;
6551 delay(50);
6552 }
6553
6554 if ((mdic & MDIC_READY) == 0) {
6555 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6556 device_xname(sc->sc_dev), phy, reg);
6557 rv = 0;
6558 } else if (mdic & MDIC_E) {
6559 #if 0 /* This is normal if no PHY is present. */
6560 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6561 device_xname(sc->sc_dev), phy, reg);
6562 #endif
6563 rv = 0;
6564 } else {
6565 rv = MDIC_DATA(mdic);
6566 if (rv == 0xffff)
6567 rv = 0;
6568 }
6569
6570 return rv;
6571 }
6572
6573 /*
6574 * wm_gmii_i82544_writereg: [mii interface function]
6575 *
6576 * Write a PHY register on the GMII.
6577 */
6578 static void
6579 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6580 {
6581 struct wm_softc *sc = device_private(self);
6582 uint32_t mdic = 0;
6583 int i;
6584
6585 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6586 MDIC_REGADD(reg) | MDIC_DATA(val));
6587
6588 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6589 mdic = CSR_READ(sc, WMREG_MDIC);
6590 if (mdic & MDIC_READY)
6591 break;
6592 delay(50);
6593 }
6594
6595 if ((mdic & MDIC_READY) == 0)
6596 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6597 device_xname(sc->sc_dev), phy, reg);
6598 else if (mdic & MDIC_E)
6599 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6600 device_xname(sc->sc_dev), phy, reg);
6601 }
6602
6603 /*
6604 * wm_gmii_i80003_readreg: [mii interface function]
6605 *
6606 * Read a PHY register on the kumeran
6607 * This could be handled by the PHY layer if we didn't have to lock the
6608 * ressource ...
6609 */
6610 static int
6611 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6612 {
6613 struct wm_softc *sc = device_private(self);
6614 int sem;
6615 int rv;
6616
6617 if (phy != 1) /* only one PHY on kumeran bus */
6618 return 0;
6619
6620 sem = swfwphysem[sc->sc_funcid];
6621 if (wm_get_swfw_semaphore(sc, sem)) {
6622 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6623 __func__);
6624 return 0;
6625 }
6626
6627 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6628 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6629 reg >> GG82563_PAGE_SHIFT);
6630 } else {
6631 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6632 reg >> GG82563_PAGE_SHIFT);
6633 }
6634 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6635 delay(200);
6636 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6637 delay(200);
6638
6639 wm_put_swfw_semaphore(sc, sem);
6640 return rv;
6641 }
6642
6643 /*
6644 * wm_gmii_i80003_writereg: [mii interface function]
6645 *
6646 * Write a PHY register on the kumeran.
6647 * This could be handled by the PHY layer if we didn't have to lock the
6648 * ressource ...
6649 */
6650 static void
6651 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6652 {
6653 struct wm_softc *sc = device_private(self);
6654 int sem;
6655
6656 if (phy != 1) /* only one PHY on kumeran bus */
6657 return;
6658
6659 sem = swfwphysem[sc->sc_funcid];
6660 if (wm_get_swfw_semaphore(sc, sem)) {
6661 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6662 __func__);
6663 return;
6664 }
6665
6666 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6667 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6668 reg >> GG82563_PAGE_SHIFT);
6669 } else {
6670 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6671 reg >> GG82563_PAGE_SHIFT);
6672 }
6673 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6674 delay(200);
6675 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6676 delay(200);
6677
6678 wm_put_swfw_semaphore(sc, sem);
6679 }
6680
6681 /*
6682 * wm_gmii_bm_readreg: [mii interface function]
6683 *
6684 * Read a PHY register on the kumeran
6685 * This could be handled by the PHY layer if we didn't have to lock the
6686 * ressource ...
6687 */
6688 static int
6689 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6690 {
6691 struct wm_softc *sc = device_private(self);
6692 int sem;
6693 int rv;
6694
6695 sem = swfwphysem[sc->sc_funcid];
6696 if (wm_get_swfw_semaphore(sc, sem)) {
6697 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6698 __func__);
6699 return 0;
6700 }
6701
6702 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6703 if (phy == 1)
6704 wm_gmii_i82544_writereg(self, phy, 0x1f,
6705 reg);
6706 else
6707 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6708 reg >> GG82563_PAGE_SHIFT);
6709
6710 }
6711
6712 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6713 wm_put_swfw_semaphore(sc, sem);
6714 return rv;
6715 }
6716
6717 /*
6718 * wm_gmii_bm_writereg: [mii interface function]
6719 *
6720 * Write a PHY register on the kumeran.
6721 * This could be handled by the PHY layer if we didn't have to lock the
6722 * ressource ...
6723 */
6724 static void
6725 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6726 {
6727 struct wm_softc *sc = device_private(self);
6728 int sem;
6729
6730 sem = swfwphysem[sc->sc_funcid];
6731 if (wm_get_swfw_semaphore(sc, sem)) {
6732 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6733 __func__);
6734 return;
6735 }
6736
6737 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6738 if (phy == 1)
6739 wm_gmii_i82544_writereg(self, phy, 0x1f,
6740 reg);
6741 else
6742 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6743 reg >> GG82563_PAGE_SHIFT);
6744
6745 }
6746
6747 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6748 wm_put_swfw_semaphore(sc, sem);
6749 }
6750
6751 static void
6752 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6753 {
6754 struct wm_softc *sc = device_private(self);
6755 uint16_t regnum = BM_PHY_REG_NUM(offset);
6756 uint16_t wuce;
6757
6758 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6759 if (sc->sc_type == WM_T_PCH) {
6760 /* XXX e1000 driver do nothing... why? */
6761 }
6762
6763 /* Set page 769 */
6764 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6765 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6766
6767 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6768
6769 wuce &= ~BM_WUC_HOST_WU_BIT;
6770 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6771 wuce | BM_WUC_ENABLE_BIT);
6772
6773 /* Select page 800 */
6774 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6775 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6776
6777 /* Write page 800 */
6778 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6779
6780 if (rd)
6781 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6782 else
6783 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6784
6785 /* Set page 769 */
6786 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6787 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6788
6789 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6790 }
6791
6792 /*
6793 * wm_gmii_hv_readreg: [mii interface function]
6794 *
6795 * Read a PHY register on the kumeran
6796 * This could be handled by the PHY layer if we didn't have to lock the
6797 * ressource ...
6798 */
6799 static int
6800 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6801 {
6802 struct wm_softc *sc = device_private(self);
6803 uint16_t page = BM_PHY_REG_PAGE(reg);
6804 uint16_t regnum = BM_PHY_REG_NUM(reg);
6805 uint16_t val;
6806 int rv;
6807
6808 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6809 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6810 __func__);
6811 return 0;
6812 }
6813
6814 /* XXX Workaround failure in MDIO access while cable is disconnected */
6815 if (sc->sc_phytype == WMPHY_82577) {
6816 /* XXX must write */
6817 }
6818
6819 /* Page 800 works differently than the rest so it has its own func */
6820 if (page == BM_WUC_PAGE) {
6821 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6822 return val;
6823 }
6824
6825 /*
6826 * Lower than page 768 works differently than the rest so it has its
6827 * own func
6828 */
6829 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6830 printf("gmii_hv_readreg!!!\n");
6831 return 0;
6832 }
6833
6834 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6835 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6836 page << BME1000_PAGE_SHIFT);
6837 }
6838
6839 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6840 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6841 return rv;
6842 }
6843
6844 /*
6845 * wm_gmii_hv_writereg: [mii interface function]
6846 *
6847 * Write a PHY register on the kumeran.
6848 * This could be handled by the PHY layer if we didn't have to lock the
6849 * ressource ...
6850 */
6851 static void
6852 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6853 {
6854 struct wm_softc *sc = device_private(self);
6855 uint16_t page = BM_PHY_REG_PAGE(reg);
6856 uint16_t regnum = BM_PHY_REG_NUM(reg);
6857
6858 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6859 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6860 __func__);
6861 return;
6862 }
6863
6864 /* XXX Workaround failure in MDIO access while cable is disconnected */
6865
6866 /* Page 800 works differently than the rest so it has its own func */
6867 if (page == BM_WUC_PAGE) {
6868 uint16_t tmp;
6869
6870 tmp = val;
6871 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6872 return;
6873 }
6874
6875 /*
6876 * Lower than page 768 works differently than the rest so it has its
6877 * own func
6878 */
6879 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6880 printf("gmii_hv_writereg!!!\n");
6881 return;
6882 }
6883
6884 /*
6885 * XXX Workaround MDIO accesses being disabled after entering IEEE
6886 * Power Down (whenever bit 11 of the PHY control register is set)
6887 */
6888
6889 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6890 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6891 page << BME1000_PAGE_SHIFT);
6892 }
6893
6894 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6895 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6896 }
6897
6898 /*
6899 * wm_gmii_hv_readreg: [mii interface function]
6900 *
6901 * Read a PHY register on the kumeran
6902 * This could be handled by the PHY layer if we didn't have to lock the
6903 * ressource ...
6904 */
6905 static int
6906 wm_sgmii_readreg(device_t self, int phy, int reg)
6907 {
6908 struct wm_softc *sc = device_private(self);
6909 uint32_t i2ccmd;
6910 int i, rv;
6911
6912 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6913 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6914 __func__);
6915 return 0;
6916 }
6917
6918 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6919 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6920 | I2CCMD_OPCODE_READ;
6921 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6922
6923 /* Poll the ready bit */
6924 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6925 delay(50);
6926 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6927 if (i2ccmd & I2CCMD_READY)
6928 break;
6929 }
6930 if ((i2ccmd & I2CCMD_READY) == 0)
6931 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6932 if ((i2ccmd & I2CCMD_ERROR) != 0)
6933 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6934
6935 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6936
6937 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6938 return rv;
6939 }
6940
6941 /*
6942 * wm_gmii_hv_writereg: [mii interface function]
6943 *
6944 * Write a PHY register on the kumeran.
6945 * This could be handled by the PHY layer if we didn't have to lock the
6946 * ressource ...
6947 */
6948 static void
6949 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6950 {
6951 struct wm_softc *sc = device_private(self);
6952 uint32_t i2ccmd;
6953 int i;
6954
6955 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6956 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6957 __func__);
6958 return;
6959 }
6960
6961 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6962 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6963 | I2CCMD_OPCODE_WRITE;
6964 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6965
6966 /* Poll the ready bit */
6967 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6968 delay(50);
6969 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6970 if (i2ccmd & I2CCMD_READY)
6971 break;
6972 }
6973 if ((i2ccmd & I2CCMD_READY) == 0)
6974 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6975 if ((i2ccmd & I2CCMD_ERROR) != 0)
6976 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6977
6978 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6979 }
6980
6981 /*
6982 * wm_gmii_statchg: [mii interface function]
6983 *
6984 * Callback from MII layer when media changes.
6985 */
6986 static void
6987 wm_gmii_statchg(struct ifnet *ifp)
6988 {
6989 struct wm_softc *sc = ifp->if_softc;
6990 struct mii_data *mii = &sc->sc_mii;
6991
6992 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6993 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6994 sc->sc_fcrtl &= ~FCRTL_XONE;
6995
6996 /*
6997 * Get flow control negotiation result.
6998 */
6999 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7000 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7001 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7002 mii->mii_media_active &= ~IFM_ETH_FMASK;
7003 }
7004
7005 if (sc->sc_flowflags & IFM_FLOW) {
7006 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7007 sc->sc_ctrl |= CTRL_TFCE;
7008 sc->sc_fcrtl |= FCRTL_XONE;
7009 }
7010 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7011 sc->sc_ctrl |= CTRL_RFCE;
7012 }
7013
7014 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7015 DPRINTF(WM_DEBUG_LINK,
7016 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7017 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7018 } else {
7019 DPRINTF(WM_DEBUG_LINK,
7020 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7021 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7022 }
7023
7024 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7025 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7026 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7027 : WMREG_FCRTL, sc->sc_fcrtl);
7028 if (sc->sc_type == WM_T_80003) {
7029 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7030 case IFM_1000_T:
7031 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7032 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7033 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7034 break;
7035 default:
7036 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7037 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7038 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7039 break;
7040 }
7041 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7042 }
7043 }
7044
7045 /*
7046 * wm_kmrn_readreg:
7047 *
7048 * Read a kumeran register
7049 */
7050 static int
7051 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7052 {
7053 int rv;
7054
7055 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7056 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7057 aprint_error_dev(sc->sc_dev,
7058 "%s: failed to get semaphore\n", __func__);
7059 return 0;
7060 }
7061 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7062 if (wm_get_swfwhw_semaphore(sc)) {
7063 aprint_error_dev(sc->sc_dev,
7064 "%s: failed to get semaphore\n", __func__);
7065 return 0;
7066 }
7067 }
7068
7069 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7070 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7071 KUMCTRLSTA_REN);
7072 delay(2);
7073
7074 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7075
7076 if (sc->sc_flags == WM_F_SWFW_SYNC)
7077 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7078 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7079 wm_put_swfwhw_semaphore(sc);
7080
7081 return rv;
7082 }
7083
7084 /*
7085 * wm_kmrn_writereg:
7086 *
7087 * Write a kumeran register
7088 */
7089 static void
7090 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7091 {
7092
7093 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7094 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7095 aprint_error_dev(sc->sc_dev,
7096 "%s: failed to get semaphore\n", __func__);
7097 return;
7098 }
7099 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7100 if (wm_get_swfwhw_semaphore(sc)) {
7101 aprint_error_dev(sc->sc_dev,
7102 "%s: failed to get semaphore\n", __func__);
7103 return;
7104 }
7105 }
7106
7107 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7108 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7109 (val & KUMCTRLSTA_MASK));
7110
7111 if (sc->sc_flags == WM_F_SWFW_SYNC)
7112 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7113 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7114 wm_put_swfwhw_semaphore(sc);
7115 }
7116
7117 static int
7118 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7119 {
7120 uint32_t eecd = 0;
7121
7122 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7123 || sc->sc_type == WM_T_82583) {
7124 eecd = CSR_READ(sc, WMREG_EECD);
7125
7126 /* Isolate bits 15 & 16 */
7127 eecd = ((eecd >> 15) & 0x03);
7128
7129 /* If both bits are set, device is Flash type */
7130 if (eecd == 0x03)
7131 return 0;
7132 }
7133 return 1;
7134 }
7135
7136 static int
7137 wm_get_swsm_semaphore(struct wm_softc *sc)
7138 {
7139 int32_t timeout;
7140 uint32_t swsm;
7141
7142 /* Get the FW semaphore. */
7143 timeout = 1000 + 1; /* XXX */
7144 while (timeout) {
7145 swsm = CSR_READ(sc, WMREG_SWSM);
7146 swsm |= SWSM_SWESMBI;
7147 CSR_WRITE(sc, WMREG_SWSM, swsm);
7148 /* if we managed to set the bit we got the semaphore. */
7149 swsm = CSR_READ(sc, WMREG_SWSM);
7150 if (swsm & SWSM_SWESMBI)
7151 break;
7152
7153 delay(50);
7154 timeout--;
7155 }
7156
7157 if (timeout == 0) {
7158 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7159 /* Release semaphores */
7160 wm_put_swsm_semaphore(sc);
7161 return 1;
7162 }
7163 return 0;
7164 }
7165
7166 static void
7167 wm_put_swsm_semaphore(struct wm_softc *sc)
7168 {
7169 uint32_t swsm;
7170
7171 swsm = CSR_READ(sc, WMREG_SWSM);
7172 swsm &= ~(SWSM_SWESMBI);
7173 CSR_WRITE(sc, WMREG_SWSM, swsm);
7174 }
7175
7176 static int
7177 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7178 {
7179 uint32_t swfw_sync;
7180 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7181 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7182 int timeout = 200;
7183
7184 for (timeout = 0; timeout < 200; timeout++) {
7185 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7186 if (wm_get_swsm_semaphore(sc)) {
7187 aprint_error_dev(sc->sc_dev,
7188 "%s: failed to get semaphore\n",
7189 __func__);
7190 return 1;
7191 }
7192 }
7193 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7194 if ((swfw_sync & (swmask | fwmask)) == 0) {
7195 swfw_sync |= swmask;
7196 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7197 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7198 wm_put_swsm_semaphore(sc);
7199 return 0;
7200 }
7201 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7202 wm_put_swsm_semaphore(sc);
7203 delay(5000);
7204 }
7205 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7206 device_xname(sc->sc_dev), mask, swfw_sync);
7207 return 1;
7208 }
7209
7210 static void
7211 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7212 {
7213 uint32_t swfw_sync;
7214
7215 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7216 while (wm_get_swsm_semaphore(sc) != 0)
7217 continue;
7218 }
7219 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7220 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7221 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7222 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7223 wm_put_swsm_semaphore(sc);
7224 }
7225
7226 static int
7227 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7228 {
7229 uint32_t ext_ctrl;
7230 int timeout = 200;
7231
7232 for (timeout = 0; timeout < 200; timeout++) {
7233 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7234 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7235 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7236
7237 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7238 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7239 return 0;
7240 delay(5000);
7241 }
7242 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7243 device_xname(sc->sc_dev), ext_ctrl);
7244 return 1;
7245 }
7246
7247 static void
7248 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7249 {
7250 uint32_t ext_ctrl;
7251 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7252 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7253 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7254 }
7255
7256 static int
7257 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7258 {
7259 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7260 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7261
7262 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7263 /* Value of bit 22 corresponds to the flash bank we're on. */
7264 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7265 } else {
7266 uint8_t bank_high_byte;
7267 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
7268 if ((bank_high_byte & 0xc0) == 0x80)
7269 *bank = 0;
7270 else {
7271 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7272 &bank_high_byte);
7273 if ((bank_high_byte & 0xc0) == 0x80)
7274 *bank = 1;
7275 else {
7276 aprint_error_dev(sc->sc_dev,
7277 "EEPROM not present\n");
7278 return -1;
7279 }
7280 }
7281 }
7282
7283 return 0;
7284 }
7285
7286 /******************************************************************************
7287 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7288 * register.
7289 *
7290 * sc - Struct containing variables accessed by shared code
7291 * offset - offset of word in the EEPROM to read
7292 * data - word read from the EEPROM
7293 * words - number of words to read
7294 *****************************************************************************/
7295 static int
7296 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7297 {
7298 int32_t error = 0;
7299 uint32_t flash_bank = 0;
7300 uint32_t act_offset = 0;
7301 uint32_t bank_offset = 0;
7302 uint16_t word = 0;
7303 uint16_t i = 0;
7304
7305 /* We need to know which is the valid flash bank. In the event
7306 * that we didn't allocate eeprom_shadow_ram, we may not be
7307 * managing flash_bank. So it cannot be trusted and needs
7308 * to be updated with each read.
7309 */
7310 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7311 if (error) {
7312 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7313 __func__);
7314 return error;
7315 }
7316
7317 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
7318 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7319
7320 error = wm_get_swfwhw_semaphore(sc);
7321 if (error) {
7322 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7323 __func__);
7324 return error;
7325 }
7326
7327 for (i = 0; i < words; i++) {
7328 /* The NVM part needs a byte offset, hence * 2 */
7329 act_offset = bank_offset + ((offset + i) * 2);
7330 error = wm_read_ich8_word(sc, act_offset, &word);
7331 if (error) {
7332 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
7333 __func__);
7334 break;
7335 }
7336 data[i] = word;
7337 }
7338
7339 wm_put_swfwhw_semaphore(sc);
7340 return error;
7341 }
7342
7343 /******************************************************************************
7344 * This function does initial flash setup so that a new read/write/erase cycle
7345 * can be started.
7346 *
7347 * sc - The pointer to the hw structure
7348 ****************************************************************************/
7349 static int32_t
7350 wm_ich8_cycle_init(struct wm_softc *sc)
7351 {
7352 uint16_t hsfsts;
7353 int32_t error = 1;
7354 int32_t i = 0;
7355
7356 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7357
7358 /* May be check the Flash Des Valid bit in Hw status */
7359 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7360 return error;
7361 }
7362
7363 /* Clear FCERR in Hw status by writing 1 */
7364 /* Clear DAEL in Hw status by writing a 1 */
7365 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7366
7367 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7368
7369 /*
7370 * Either we should have a hardware SPI cycle in progress bit to check
7371 * against, in order to start a new cycle or FDONE bit should be
7372 * changed in the hardware so that it is 1 after harware reset, which
7373 * can then be used as an indication whether a cycle is in progress or
7374 * has been completed .. we should also have some software semaphore
7375 * mechanism to guard FDONE or the cycle in progress bit so that two
7376 * threads access to those bits can be sequentiallized or a way so that
7377 * 2 threads dont start the cycle at the same time
7378 */
7379
7380 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7381 /*
7382 * There is no cycle running at present, so we can start a
7383 * cycle
7384 */
7385
7386 /* Begin by setting Flash Cycle Done. */
7387 hsfsts |= HSFSTS_DONE;
7388 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7389 error = 0;
7390 } else {
7391 /*
7392 * otherwise poll for sometime so the current cycle has a
7393 * chance to end before giving up.
7394 */
7395 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7396 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7397 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7398 error = 0;
7399 break;
7400 }
7401 delay(1);
7402 }
7403 if (error == 0) {
7404 /*
7405 * Successful in waiting for previous cycle to timeout,
7406 * now set the Flash Cycle Done.
7407 */
7408 hsfsts |= HSFSTS_DONE;
7409 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7410 }
7411 }
7412 return error;
7413 }
7414
7415 /******************************************************************************
7416 * This function starts a flash cycle and waits for its completion
7417 *
7418 * sc - The pointer to the hw structure
7419 ****************************************************************************/
7420 static int32_t
7421 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7422 {
7423 uint16_t hsflctl;
7424 uint16_t hsfsts;
7425 int32_t error = 1;
7426 uint32_t i = 0;
7427
7428 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7429 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7430 hsflctl |= HSFCTL_GO;
7431 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7432
7433 /* wait till FDONE bit is set to 1 */
7434 do {
7435 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7436 if (hsfsts & HSFSTS_DONE)
7437 break;
7438 delay(1);
7439 i++;
7440 } while (i < timeout);
7441 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7442 error = 0;
7443
7444 return error;
7445 }
7446
7447 /******************************************************************************
7448 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7449 *
7450 * sc - The pointer to the hw structure
7451 * index - The index of the byte or word to read.
7452 * size - Size of data to read, 1=byte 2=word
7453 * data - Pointer to the word to store the value read.
7454 *****************************************************************************/
7455 static int32_t
7456 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7457 uint32_t size, uint16_t* data)
7458 {
7459 uint16_t hsfsts;
7460 uint16_t hsflctl;
7461 uint32_t flash_linear_address;
7462 uint32_t flash_data = 0;
7463 int32_t error = 1;
7464 int32_t count = 0;
7465
7466 if (size < 1 || size > 2 || data == 0x0 ||
7467 index > ICH_FLASH_LINEAR_ADDR_MASK)
7468 return error;
7469
7470 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7471 sc->sc_ich8_flash_base;
7472
7473 do {
7474 delay(1);
7475 /* Steps */
7476 error = wm_ich8_cycle_init(sc);
7477 if (error)
7478 break;
7479
7480 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7481 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7482 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7483 & HSFCTL_BCOUNT_MASK;
7484 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7485 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7486
7487 /*
7488 * Write the last 24 bits of index into Flash Linear address
7489 * field in Flash Address
7490 */
7491 /* TODO: TBD maybe check the index against the size of flash */
7492
7493 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7494
7495 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7496
7497 /*
7498 * Check if FCERR is set to 1, if set to 1, clear it and try
7499 * the whole sequence a few more times, else read in (shift in)
7500 * the Flash Data0, the order is least significant byte first
7501 * msb to lsb
7502 */
7503 if (error == 0) {
7504 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7505 if (size == 1)
7506 *data = (uint8_t)(flash_data & 0x000000FF);
7507 else if (size == 2)
7508 *data = (uint16_t)(flash_data & 0x0000FFFF);
7509 break;
7510 } else {
7511 /*
7512 * If we've gotten here, then things are probably
7513 * completely hosed, but if the error condition is
7514 * detected, it won't hurt to give it another try...
7515 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7516 */
7517 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7518 if (hsfsts & HSFSTS_ERR) {
7519 /* Repeat for some time before giving up. */
7520 continue;
7521 } else if ((hsfsts & HSFSTS_DONE) == 0)
7522 break;
7523 }
7524 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7525
7526 return error;
7527 }
7528
7529 /******************************************************************************
7530 * Reads a single byte from the NVM using the ICH8 flash access registers.
7531 *
7532 * sc - pointer to wm_hw structure
7533 * index - The index of the byte to read.
7534 * data - Pointer to a byte to store the value read.
7535 *****************************************************************************/
7536 static int32_t
7537 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7538 {
7539 int32_t status;
7540 uint16_t word = 0;
7541
7542 status = wm_read_ich8_data(sc, index, 1, &word);
7543 if (status == 0)
7544 *data = (uint8_t)word;
7545 else
7546 *data = 0;
7547
7548 return status;
7549 }
7550
7551 /******************************************************************************
7552 * Reads a word from the NVM using the ICH8 flash access registers.
7553 *
7554 * sc - pointer to wm_hw structure
7555 * index - The starting byte index of the word to read.
7556 * data - Pointer to a word to store the value read.
7557 *****************************************************************************/
7558 static int32_t
7559 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7560 {
7561 int32_t status;
7562
7563 status = wm_read_ich8_data(sc, index, 2, data);
7564 return status;
7565 }
7566
7567 static int
7568 wm_check_mng_mode(struct wm_softc *sc)
7569 {
7570 int rv;
7571
7572 switch (sc->sc_type) {
7573 case WM_T_ICH8:
7574 case WM_T_ICH9:
7575 case WM_T_ICH10:
7576 case WM_T_PCH:
7577 case WM_T_PCH2:
7578 rv = wm_check_mng_mode_ich8lan(sc);
7579 break;
7580 case WM_T_82574:
7581 case WM_T_82583:
7582 rv = wm_check_mng_mode_82574(sc);
7583 break;
7584 case WM_T_82571:
7585 case WM_T_82572:
7586 case WM_T_82573:
7587 case WM_T_80003:
7588 rv = wm_check_mng_mode_generic(sc);
7589 break;
7590 default:
7591 /* noting to do */
7592 rv = 0;
7593 break;
7594 }
7595
7596 return rv;
7597 }
7598
7599 static int
7600 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7601 {
7602 uint32_t fwsm;
7603
7604 fwsm = CSR_READ(sc, WMREG_FWSM);
7605
7606 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7607 return 1;
7608
7609 return 0;
7610 }
7611
7612 static int
7613 wm_check_mng_mode_82574(struct wm_softc *sc)
7614 {
7615 uint16_t data;
7616
7617 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7618
7619 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7620 return 1;
7621
7622 return 0;
7623 }
7624
7625 static int
7626 wm_check_mng_mode_generic(struct wm_softc *sc)
7627 {
7628 uint32_t fwsm;
7629
7630 fwsm = CSR_READ(sc, WMREG_FWSM);
7631
7632 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7633 return 1;
7634
7635 return 0;
7636 }
7637
7638 static int
7639 wm_enable_mng_pass_thru(struct wm_softc *sc)
7640 {
7641 uint32_t manc, fwsm, factps;
7642
7643 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7644 return 0;
7645
7646 manc = CSR_READ(sc, WMREG_MANC);
7647
7648 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7649 device_xname(sc->sc_dev), manc));
7650 if (((manc & MANC_RECV_TCO_EN) == 0)
7651 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7652 return 0;
7653
7654 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7655 fwsm = CSR_READ(sc, WMREG_FWSM);
7656 factps = CSR_READ(sc, WMREG_FACTPS);
7657 if (((factps & FACTPS_MNGCG) == 0)
7658 && ((fwsm & FWSM_MODE_MASK)
7659 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7660 return 1;
7661 } else if (((manc & MANC_SMBUS_EN) != 0)
7662 && ((manc & MANC_ASF_EN) == 0))
7663 return 1;
7664
7665 return 0;
7666 }
7667
7668 static int
7669 wm_check_reset_block(struct wm_softc *sc)
7670 {
7671 uint32_t reg;
7672
7673 switch (sc->sc_type) {
7674 case WM_T_ICH8:
7675 case WM_T_ICH9:
7676 case WM_T_ICH10:
7677 case WM_T_PCH:
7678 case WM_T_PCH2:
7679 reg = CSR_READ(sc, WMREG_FWSM);
7680 if ((reg & FWSM_RSPCIPHY) != 0)
7681 return 0;
7682 else
7683 return -1;
7684 break;
7685 case WM_T_82571:
7686 case WM_T_82572:
7687 case WM_T_82573:
7688 case WM_T_82574:
7689 case WM_T_82583:
7690 case WM_T_80003:
7691 reg = CSR_READ(sc, WMREG_MANC);
7692 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7693 return -1;
7694 else
7695 return 0;
7696 break;
7697 default:
7698 /* no problem */
7699 break;
7700 }
7701
7702 return 0;
7703 }
7704
7705 static void
7706 wm_get_hw_control(struct wm_softc *sc)
7707 {
7708 uint32_t reg;
7709
7710 switch (sc->sc_type) {
7711 case WM_T_82573:
7712 reg = CSR_READ(sc, WMREG_SWSM);
7713 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7714 break;
7715 case WM_T_82571:
7716 case WM_T_82572:
7717 case WM_T_82574:
7718 case WM_T_82583:
7719 case WM_T_80003:
7720 case WM_T_ICH8:
7721 case WM_T_ICH9:
7722 case WM_T_ICH10:
7723 case WM_T_PCH:
7724 case WM_T_PCH2:
7725 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7726 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7727 break;
7728 default:
7729 break;
7730 }
7731 }
7732
7733 static void
7734 wm_release_hw_control(struct wm_softc *sc)
7735 {
7736 uint32_t reg;
7737
7738 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7739 return;
7740
7741 if (sc->sc_type == WM_T_82573) {
7742 reg = CSR_READ(sc, WMREG_SWSM);
7743 reg &= ~SWSM_DRV_LOAD;
7744 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7745 } else {
7746 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7747 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7748 }
7749 }
7750
7751 /* XXX Currently TBI only */
7752 static int
7753 wm_check_for_link(struct wm_softc *sc)
7754 {
7755 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7756 uint32_t rxcw;
7757 uint32_t ctrl;
7758 uint32_t status;
7759 uint32_t sig;
7760
7761 rxcw = CSR_READ(sc, WMREG_RXCW);
7762 ctrl = CSR_READ(sc, WMREG_CTRL);
7763 status = CSR_READ(sc, WMREG_STATUS);
7764
7765 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7766
7767 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7768 device_xname(sc->sc_dev), __func__,
7769 ((ctrl & CTRL_SWDPIN(1)) == sig),
7770 ((status & STATUS_LU) != 0),
7771 ((rxcw & RXCW_C) != 0)
7772 ));
7773
7774 /*
7775 * SWDPIN LU RXCW
7776 * 0 0 0
7777 * 0 0 1 (should not happen)
7778 * 0 1 0 (should not happen)
7779 * 0 1 1 (should not happen)
7780 * 1 0 0 Disable autonego and force linkup
7781 * 1 0 1 got /C/ but not linkup yet
7782 * 1 1 0 (linkup)
7783 * 1 1 1 If IFM_AUTO, back to autonego
7784 *
7785 */
7786 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7787 && ((status & STATUS_LU) == 0)
7788 && ((rxcw & RXCW_C) == 0)) {
7789 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7790 __func__));
7791 sc->sc_tbi_linkup = 0;
7792 /* Disable auto-negotiation in the TXCW register */
7793 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7794
7795 /*
7796 * Force link-up and also force full-duplex.
7797 *
7798 * NOTE: CTRL was updated TFCE and RFCE automatically,
7799 * so we should update sc->sc_ctrl
7800 */
7801 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7802 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7803 } else if (((status & STATUS_LU) != 0)
7804 && ((rxcw & RXCW_C) != 0)
7805 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7806 sc->sc_tbi_linkup = 1;
7807 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7808 __func__));
7809 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7810 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7811 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7812 && ((rxcw & RXCW_C) != 0)) {
7813 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7814 } else {
7815 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7816 status));
7817 }
7818
7819 return 0;
7820 }
7821
7822 /* Work-around for 82566 Kumeran PCS lock loss */
7823 static void
7824 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7825 {
7826 int miistatus, active, i;
7827 int reg;
7828
7829 miistatus = sc->sc_mii.mii_media_status;
7830
7831 /* If the link is not up, do nothing */
7832 if ((miistatus & IFM_ACTIVE) != 0)
7833 return;
7834
7835 active = sc->sc_mii.mii_media_active;
7836
7837 /* Nothing to do if the link is other than 1Gbps */
7838 if (IFM_SUBTYPE(active) != IFM_1000_T)
7839 return;
7840
7841 for (i = 0; i < 10; i++) {
7842 /* read twice */
7843 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7844 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7845 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7846 goto out; /* GOOD! */
7847
7848 /* Reset the PHY */
7849 wm_gmii_reset(sc);
7850 delay(5*1000);
7851 }
7852
7853 /* Disable GigE link negotiation */
7854 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7855 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7856 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7857
7858 /*
7859 * Call gig speed drop workaround on Gig disable before accessing
7860 * any PHY registers.
7861 */
7862 wm_gig_downshift_workaround_ich8lan(sc);
7863
7864 out:
7865 return;
7866 }
7867
7868 /* WOL from S5 stops working */
7869 static void
7870 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7871 {
7872 uint16_t kmrn_reg;
7873
7874 /* Only for igp3 */
7875 if (sc->sc_phytype == WMPHY_IGP_3) {
7876 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7877 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7878 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7879 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7880 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7881 }
7882 }
7883
7884 #ifdef WM_WOL
7885 /* Power down workaround on D3 */
7886 static void
7887 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7888 {
7889 uint32_t reg;
7890 int i;
7891
7892 for (i = 0; i < 2; i++) {
7893 /* Disable link */
7894 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7895 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7896 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7897
7898 /*
7899 * Call gig speed drop workaround on Gig disable before
7900 * accessing any PHY registers
7901 */
7902 if (sc->sc_type == WM_T_ICH8)
7903 wm_gig_downshift_workaround_ich8lan(sc);
7904
7905 /* Write VR power-down enable */
7906 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7907 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7908 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7909 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7910
7911 /* Read it back and test */
7912 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7913 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7914 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7915 break;
7916
7917 /* Issue PHY reset and repeat at most one more time */
7918 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7919 }
7920 }
7921 #endif /* WM_WOL */
7922
7923 /*
7924 * Workaround for pch's PHYs
7925 * XXX should be moved to new PHY driver?
7926 */
7927 static void
7928 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7929 {
7930 if (sc->sc_phytype == WMPHY_82577)
7931 wm_set_mdio_slow_mode_hv(sc);
7932
7933 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7934
7935 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7936
7937 /* 82578 */
7938 if (sc->sc_phytype == WMPHY_82578) {
7939 /* PCH rev. < 3 */
7940 if (sc->sc_rev < 3) {
7941 /* XXX 6 bit shift? Why? Is it page2? */
7942 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7943 0x66c0);
7944 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7945 0xffff);
7946 }
7947
7948 /* XXX phy rev. < 2 */
7949 }
7950
7951 /* Select page 0 */
7952
7953 /* XXX acquire semaphore */
7954 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7955 /* XXX release semaphore */
7956
7957 /*
7958 * Configure the K1 Si workaround during phy reset assuming there is
7959 * link so that it disables K1 if link is in 1Gbps.
7960 */
7961 wm_k1_gig_workaround_hv(sc, 1);
7962 }
7963
7964 static void
7965 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7966 {
7967
7968 wm_set_mdio_slow_mode_hv(sc);
7969 }
7970
7971 static void
7972 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7973 {
7974 int k1_enable = sc->sc_nvm_k1_enabled;
7975
7976 /* XXX acquire semaphore */
7977
7978 if (link) {
7979 k1_enable = 0;
7980
7981 /* Link stall fix for link up */
7982 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7983 } else {
7984 /* Link stall fix for link down */
7985 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7986 }
7987
7988 wm_configure_k1_ich8lan(sc, k1_enable);
7989
7990 /* XXX release semaphore */
7991 }
7992
7993 static void
7994 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
7995 {
7996 uint32_t reg;
7997
7998 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
7999 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8000 reg | HV_KMRN_MDIO_SLOW);
8001 }
8002
8003 static void
8004 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8005 {
8006 uint32_t ctrl, ctrl_ext, tmp;
8007 uint16_t kmrn_reg;
8008
8009 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8010
8011 if (k1_enable)
8012 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8013 else
8014 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8015
8016 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8017
8018 delay(20);
8019
8020 ctrl = CSR_READ(sc, WMREG_CTRL);
8021 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8022
8023 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8024 tmp |= CTRL_FRCSPD;
8025
8026 CSR_WRITE(sc, WMREG_CTRL, tmp);
8027 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8028 delay(20);
8029
8030 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8031 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8032 delay(20);
8033 }
8034
8035 static void
8036 wm_smbustopci(struct wm_softc *sc)
8037 {
8038 uint32_t fwsm;
8039
8040 fwsm = CSR_READ(sc, WMREG_FWSM);
8041 if (((fwsm & FWSM_FW_VALID) == 0)
8042 && ((wm_check_reset_block(sc) == 0))) {
8043 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8044 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8045 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8046 delay(10);
8047 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8049 delay(50*1000);
8050
8051 /*
8052 * Gate automatic PHY configuration by hardware on non-managed
8053 * 82579
8054 */
8055 if (sc->sc_type == WM_T_PCH2)
8056 wm_gate_hw_phy_config_ich8lan(sc, 1);
8057 }
8058 }
8059
8060 static void
8061 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8062 {
8063 uint32_t gcr;
8064 pcireg_t ctrl2;
8065
8066 gcr = CSR_READ(sc, WMREG_GCR);
8067
8068 /* Only take action if timeout value is defaulted to 0 */
8069 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8070 goto out;
8071
8072 if ((gcr & GCR_CAP_VER2) == 0) {
8073 gcr |= GCR_CMPL_TMOUT_10MS;
8074 goto out;
8075 }
8076
8077 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8078 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
8079 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
8080 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8081 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
8082
8083 out:
8084 /* Disable completion timeout resend */
8085 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8086
8087 CSR_WRITE(sc, WMREG_GCR, gcr);
8088 }
8089
8090 /* special case - for 82575 - need to do manual init ... */
8091 static void
8092 wm_reset_init_script_82575(struct wm_softc *sc)
8093 {
8094 /*
8095 * remark: this is untested code - we have no board without EEPROM
8096 * same setup as mentioned int the freeBSD driver for the i82575
8097 */
8098
8099 /* SerDes configuration via SERDESCTRL */
8100 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8101 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8102 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8103 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8104
8105 /* CCM configuration via CCMCTL register */
8106 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8107 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8108
8109 /* PCIe lanes configuration */
8110 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8111 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8112 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8113 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8114
8115 /* PCIe PLL Configuration */
8116 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8117 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8118 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8119 }
8120
8121 static void
8122 wm_init_manageability(struct wm_softc *sc)
8123 {
8124
8125 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8126 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8127 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8128
8129 /* disabl hardware interception of ARP */
8130 manc &= ~MANC_ARP_EN;
8131
8132 /* enable receiving management packets to the host */
8133 if (sc->sc_type >= WM_T_82571) {
8134 manc |= MANC_EN_MNG2HOST;
8135 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8136 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8137
8138 }
8139
8140 CSR_WRITE(sc, WMREG_MANC, manc);
8141 }
8142 }
8143
8144 static void
8145 wm_release_manageability(struct wm_softc *sc)
8146 {
8147
8148 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8149 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8150
8151 if (sc->sc_type >= WM_T_82571)
8152 manc &= ~MANC_EN_MNG2HOST;
8153
8154 CSR_WRITE(sc, WMREG_MANC, manc);
8155 }
8156 }
8157
8158 static void
8159 wm_get_wakeup(struct wm_softc *sc)
8160 {
8161
8162 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8163 switch (sc->sc_type) {
8164 case WM_T_82573:
8165 case WM_T_82583:
8166 sc->sc_flags |= WM_F_HAS_AMT;
8167 /* FALLTHROUGH */
8168 case WM_T_80003:
8169 case WM_T_82541:
8170 case WM_T_82547:
8171 case WM_T_82571:
8172 case WM_T_82572:
8173 case WM_T_82574:
8174 case WM_T_82575:
8175 case WM_T_82576:
8176 #if 0 /* XXX */
8177 case WM_T_82580:
8178 case WM_T_82580ER:
8179 case WM_T_I350:
8180 #endif
8181 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8182 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8183 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8184 break;
8185 case WM_T_ICH8:
8186 case WM_T_ICH9:
8187 case WM_T_ICH10:
8188 case WM_T_PCH:
8189 case WM_T_PCH2:
8190 sc->sc_flags |= WM_F_HAS_AMT;
8191 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8192 break;
8193 default:
8194 break;
8195 }
8196
8197 /* 1: HAS_MANAGE */
8198 if (wm_enable_mng_pass_thru(sc) != 0)
8199 sc->sc_flags |= WM_F_HAS_MANAGE;
8200
8201 #ifdef WM_DEBUG
8202 printf("\n");
8203 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8204 printf("HAS_AMT,");
8205 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8206 printf("ARC_SUBSYS_VALID,");
8207 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8208 printf("ASF_FIRMWARE_PRES,");
8209 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8210 printf("HAS_MANAGE,");
8211 printf("\n");
8212 #endif
8213 /*
8214 * Note that the WOL flags is set after the resetting of the eeprom
8215 * stuff
8216 */
8217 }
8218
8219 #ifdef WM_WOL
8220 /* WOL in the newer chipset interfaces (pchlan) */
8221 static void
8222 wm_enable_phy_wakeup(struct wm_softc *sc)
8223 {
8224 #if 0
8225 uint16_t preg;
8226
8227 /* Copy MAC RARs to PHY RARs */
8228
8229 /* Copy MAC MTA to PHY MTA */
8230
8231 /* Configure PHY Rx Control register */
8232
8233 /* Enable PHY wakeup in MAC register */
8234
8235 /* Configure and enable PHY wakeup in PHY registers */
8236
8237 /* Activate PHY wakeup */
8238
8239 /* XXX */
8240 #endif
8241 }
8242
8243 static void
8244 wm_enable_wakeup(struct wm_softc *sc)
8245 {
8246 uint32_t reg, pmreg;
8247 pcireg_t pmode;
8248
8249 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8250 &pmreg, NULL) == 0)
8251 return;
8252
8253 /* Advertise the wakeup capability */
8254 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8255 | CTRL_SWDPIN(3));
8256 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8257
8258 /* ICH workaround */
8259 switch (sc->sc_type) {
8260 case WM_T_ICH8:
8261 case WM_T_ICH9:
8262 case WM_T_ICH10:
8263 case WM_T_PCH:
8264 case WM_T_PCH2:
8265 /* Disable gig during WOL */
8266 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8267 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8268 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8269 if (sc->sc_type == WM_T_PCH)
8270 wm_gmii_reset(sc);
8271
8272 /* Power down workaround */
8273 if (sc->sc_phytype == WMPHY_82577) {
8274 struct mii_softc *child;
8275
8276 /* Assume that the PHY is copper */
8277 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8278 if (child->mii_mpd_rev <= 2)
8279 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8280 (768 << 5) | 25, 0x0444); /* magic num */
8281 }
8282 break;
8283 default:
8284 break;
8285 }
8286
8287 /* Keep the laser running on fiber adapters */
8288 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8289 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8290 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8291 reg |= CTRL_EXT_SWDPIN(3);
8292 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8293 }
8294
8295 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8296 #if 0 /* for the multicast packet */
8297 reg |= WUFC_MC;
8298 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8299 #endif
8300
8301 if (sc->sc_type == WM_T_PCH) {
8302 wm_enable_phy_wakeup(sc);
8303 } else {
8304 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8305 CSR_WRITE(sc, WMREG_WUFC, reg);
8306 }
8307
8308 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8309 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8310 || (sc->sc_type == WM_T_PCH2))
8311 && (sc->sc_phytype == WMPHY_IGP_3))
8312 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8313
8314 /* Request PME */
8315 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8316 #if 0
8317 /* Disable WOL */
8318 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8319 #else
8320 /* For WOL */
8321 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8322 #endif
8323 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8324 }
8325 #endif /* WM_WOL */
8326
8327 static bool
8328 wm_suspend(device_t self, const pmf_qual_t *qual)
8329 {
8330 struct wm_softc *sc = device_private(self);
8331
8332 wm_release_manageability(sc);
8333 wm_release_hw_control(sc);
8334 #ifdef WM_WOL
8335 wm_enable_wakeup(sc);
8336 #endif
8337
8338 return true;
8339 }
8340
8341 static bool
8342 wm_resume(device_t self, const pmf_qual_t *qual)
8343 {
8344 struct wm_softc *sc = device_private(self);
8345
8346 wm_init_manageability(sc);
8347
8348 return true;
8349 }
8350
8351 static void
8352 wm_set_eee_i350(struct wm_softc * sc)
8353 {
8354 uint32_t ipcnfg, eeer;
8355
8356 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8357 eeer = CSR_READ(sc, WMREG_EEER);
8358
8359 if ((sc->sc_flags & WM_F_EEE) != 0) {
8360 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8361 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8362 | EEER_LPI_FC);
8363 } else {
8364 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8365 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8366 | EEER_LPI_FC);
8367 }
8368
8369 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8370 CSR_WRITE(sc, WMREG_EEER, eeer);
8371 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8372 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8373 }
8374