if_wm.c revision 1.281 1 /* $NetBSD: if_wm.c,v 1.281 2014/07/23 10:48:16 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.281 2014/07/23 10:48:16 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 #define WM_DEBUG_NVM 0x20
136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138
139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
140 #else
141 #define DPRINTF(x, y) /* nothing */
142 #endif /* WM_DEBUG */
143
144 #ifdef NET_MPSAFE
145 #define WM_MPSAFE 1
146 #endif
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 union {
203 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
204 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
205 } wdc_u;
206 };
207
208 struct wm_control_data_82542 {
209 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
210 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
211 };
212
213 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
214 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
215 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
216
217 /*
218 * Software state for transmit jobs.
219 */
220 struct wm_txsoft {
221 struct mbuf *txs_mbuf; /* head of our mbuf chain */
222 bus_dmamap_t txs_dmamap; /* our DMA map */
223 int txs_firstdesc; /* first descriptor in packet */
224 int txs_lastdesc; /* last descriptor in packet */
225 int txs_ndesc; /* # of descriptors used */
226 };
227
228 /*
229 * Software state for receive buffers. Each descriptor gets a
230 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
231 * more than one buffer, we chain them together.
232 */
233 struct wm_rxsoft {
234 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
235 bus_dmamap_t rxs_dmamap; /* our DMA map */
236 };
237
238 #define WM_LINKUP_TIMEOUT 50
239
240 static uint16_t swfwphysem[] = {
241 SWFW_PHY0_SM,
242 SWFW_PHY1_SM,
243 SWFW_PHY2_SM,
244 SWFW_PHY3_SM
245 };
246
247 /*
248 * Software state per device.
249 */
250 struct wm_softc {
251 device_t sc_dev; /* generic device information */
252 bus_space_tag_t sc_st; /* bus space tag */
253 bus_space_handle_t sc_sh; /* bus space handle */
254 bus_size_t sc_ss; /* bus space size */
255 bus_space_tag_t sc_iot; /* I/O space tag */
256 bus_space_handle_t sc_ioh; /* I/O space handle */
257 bus_size_t sc_ios; /* I/O space size */
258 bus_space_tag_t sc_flasht; /* flash registers space tag */
259 bus_space_handle_t sc_flashh; /* flash registers space handle */
260 bus_dma_tag_t sc_dmat; /* bus DMA tag */
261
262 struct ethercom sc_ethercom; /* ethernet common data */
263 struct mii_data sc_mii; /* MII/media information */
264
265 pci_chipset_tag_t sc_pc;
266 pcitag_t sc_pcitag;
267 int sc_bus_speed; /* PCI/PCIX bus speed */
268 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
269
270 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
271 wm_chip_type sc_type; /* MAC type */
272 int sc_rev; /* MAC revision */
273 wm_phy_type sc_phytype; /* PHY type */
274 int sc_funcid; /* unit number of the chip (0 to 3) */
275 int sc_flags; /* flags; see below */
276 int sc_if_flags; /* last if_flags */
277 int sc_flowflags; /* 802.3x flow control flags */
278 int sc_align_tweak;
279
280 void *sc_ih; /* interrupt cookie */
281 callout_t sc_tick_ch; /* tick callout */
282 bool sc_stopping;
283
284 int sc_ee_addrbits; /* EEPROM address bits */
285 int sc_ich8_flash_base;
286 int sc_ich8_flash_bank_size;
287 int sc_nvm_k1_enabled;
288
289 /* Software state for the transmit and receive descriptors. */
290 int sc_txnum; /* must be a power of two */
291 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
292 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
293
294 /* Control data structures. */
295 int sc_ntxdesc; /* must be a power of two */
296 struct wm_control_data_82544 *sc_control_data;
297 bus_dmamap_t sc_cddmamap; /* control data DMA map */
298 bus_dma_segment_t sc_cd_seg; /* control data segment */
299 int sc_cd_rseg; /* real number of control segment */
300 size_t sc_cd_size; /* control data size */
301 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
302 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
303 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
304 #define sc_rxdescs sc_control_data->wcd_rxdescs
305
306 #ifdef WM_EVENT_COUNTERS
307 /* Event counters. */
308 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
309 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
310 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
311 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
312 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
313 struct evcnt sc_ev_rxintr; /* Rx interrupts */
314 struct evcnt sc_ev_linkintr; /* Link interrupts */
315
316 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
317 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
318 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
319 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
320 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
321 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
322 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
323 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
324
325 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
326 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
327
328 struct evcnt sc_ev_tu; /* Tx underrun */
329
330 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
331 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
332 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
333 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
334 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
335 #endif /* WM_EVENT_COUNTERS */
336
337 bus_addr_t sc_tdt_reg; /* offset of TDT register */
338
339 int sc_txfree; /* number of free Tx descriptors */
340 int sc_txnext; /* next ready Tx descriptor */
341
342 int sc_txsfree; /* number of free Tx jobs */
343 int sc_txsnext; /* next free Tx job */
344 int sc_txsdirty; /* dirty Tx jobs */
345
346 /* These 5 variables are used only on the 82547. */
347 int sc_txfifo_size; /* Tx FIFO size */
348 int sc_txfifo_head; /* current head of FIFO */
349 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
350 int sc_txfifo_stall; /* Tx FIFO is stalled */
351 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
352
353 bus_addr_t sc_rdt_reg; /* offset of RDT register */
354
355 int sc_rxptr; /* next ready Rx descriptor/queue ent */
356 int sc_rxdiscard;
357 int sc_rxlen;
358 struct mbuf *sc_rxhead;
359 struct mbuf *sc_rxtail;
360 struct mbuf **sc_rxtailp;
361
362 uint32_t sc_ctrl; /* prototype CTRL register */
363 #if 0
364 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
365 #endif
366 uint32_t sc_icr; /* prototype interrupt bits */
367 uint32_t sc_itr; /* prototype intr throttling reg */
368 uint32_t sc_tctl; /* prototype TCTL register */
369 uint32_t sc_rctl; /* prototype RCTL register */
370 uint32_t sc_txcw; /* prototype TXCW register */
371 uint32_t sc_tipg; /* prototype TIPG register */
372 uint32_t sc_fcrtl; /* prototype FCRTL register */
373 uint32_t sc_pba; /* prototype PBA register */
374
375 int sc_tbi_linkup; /* TBI link status */
376 int sc_tbi_anegticks; /* autonegotiation ticks */
377 int sc_tbi_ticks; /* tbi ticks */
378 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
379 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
380
381 int sc_mchash_type; /* multicast filter offset */
382
383 krndsource_t rnd_source; /* random source */
384
385 kmutex_t *sc_txrx_lock; /* lock for tx/rx operations */
386 /* XXX need separation? */
387 };
388
389 #define WM_LOCK(_sc) if ((_sc)->sc_txrx_lock) mutex_enter((_sc)->sc_txrx_lock)
390 #define WM_UNLOCK(_sc) if ((_sc)->sc_txrx_lock) mutex_exit((_sc)->sc_txrx_lock)
391 #define WM_LOCKED(_sc) (!(_sc)->sc_txrx_lock || mutex_owned((_sc)->sc_txrx_lock))
392
393 #ifdef WM_MPSAFE
394 #define CALLOUT_FLAGS CALLOUT_MPSAFE
395 #else
396 #define CALLOUT_FLAGS 0
397 #endif
398
399 #define WM_RXCHAIN_RESET(sc) \
400 do { \
401 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
402 *(sc)->sc_rxtailp = NULL; \
403 (sc)->sc_rxlen = 0; \
404 } while (/*CONSTCOND*/0)
405
406 #define WM_RXCHAIN_LINK(sc, m) \
407 do { \
408 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
409 (sc)->sc_rxtailp = &(m)->m_next; \
410 } while (/*CONSTCOND*/0)
411
412 #ifdef WM_EVENT_COUNTERS
413 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
414 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
415 #else
416 #define WM_EVCNT_INCR(ev) /* nothing */
417 #define WM_EVCNT_ADD(ev, val) /* nothing */
418 #endif
419
420 #define CSR_READ(sc, reg) \
421 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
422 #define CSR_WRITE(sc, reg, val) \
423 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
424 #define CSR_WRITE_FLUSH(sc) \
425 (void) CSR_READ((sc), WMREG_STATUS)
426
427 #define ICH8_FLASH_READ32(sc, reg) \
428 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
429 #define ICH8_FLASH_WRITE32(sc, reg, data) \
430 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
431
432 #define ICH8_FLASH_READ16(sc, reg) \
433 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
434 #define ICH8_FLASH_WRITE16(sc, reg, data) \
435 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
436
437 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
438 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
439
440 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
441 #define WM_CDTXADDR_HI(sc, x) \
442 (sizeof(bus_addr_t) == 8 ? \
443 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
444
445 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
446 #define WM_CDRXADDR_HI(sc, x) \
447 (sizeof(bus_addr_t) == 8 ? \
448 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
449
450 #define WM_CDTXSYNC(sc, x, n, ops) \
451 do { \
452 int __x, __n; \
453 \
454 __x = (x); \
455 __n = (n); \
456 \
457 /* If it will wrap around, sync to the end of the ring. */ \
458 if ((__x + __n) > WM_NTXDESC(sc)) { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
461 (WM_NTXDESC(sc) - __x), (ops)); \
462 __n -= (WM_NTXDESC(sc) - __x); \
463 __x = 0; \
464 } \
465 \
466 /* Now sync whatever is left. */ \
467 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
468 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
469 } while (/*CONSTCOND*/0)
470
471 #define WM_CDRXSYNC(sc, x, ops) \
472 do { \
473 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
474 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
475 } while (/*CONSTCOND*/0)
476
477 #define WM_INIT_RXDESC(sc, x) \
478 do { \
479 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
480 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
481 struct mbuf *__m = __rxs->rxs_mbuf; \
482 \
483 /* \
484 * Note: We scoot the packet forward 2 bytes in the buffer \
485 * so that the payload after the Ethernet header is aligned \
486 * to a 4-byte boundary. \
487 * \
488 * XXX BRAINDAMAGE ALERT! \
489 * The stupid chip uses the same size for every buffer, which \
490 * is set in the Receive Control register. We are using the 2K \
491 * size option, but what we REALLY want is (2K - 2)! For this \
492 * reason, we can't "scoot" packets longer than the standard \
493 * Ethernet MTU. On strict-alignment platforms, if the total \
494 * size exceeds (2K - 2) we set align_tweak to 0 and let \
495 * the upper layer copy the headers. \
496 */ \
497 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
498 \
499 wm_set_dma_addr(&__rxd->wrx_addr, \
500 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
501 __rxd->wrx_len = 0; \
502 __rxd->wrx_cksum = 0; \
503 __rxd->wrx_status = 0; \
504 __rxd->wrx_errors = 0; \
505 __rxd->wrx_special = 0; \
506 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
507 \
508 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
509 } while (/*CONSTCOND*/0)
510
511 /*
512 * Register read/write functions.
513 * Other than CSR_{READ|WRITE}().
514 */
515 #if 0
516 static inline uint32_t wm_io_read(struct wm_softc *, int);
517 #endif
518 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
519 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
520 uint32_t, uint32_t);
521 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
522
523 /*
524 * Device driver interface functions and commonly used functions.
525 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
526 */
527 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
528 static int wm_match(device_t, cfdata_t, void *);
529 static void wm_attach(device_t, device_t, void *);
530 static int wm_detach(device_t, int);
531 static bool wm_suspend(device_t, const pmf_qual_t *);
532 static bool wm_resume(device_t, const pmf_qual_t *);
533 static void wm_watchdog(struct ifnet *);
534 static void wm_tick(void *);
535 static int wm_ifflags_cb(struct ethercom *);
536 static int wm_ioctl(struct ifnet *, u_long, void *);
537 /* MAC address related */
538 static int wm_check_alt_mac_addr(struct wm_softc *);
539 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
540 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
541 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
542 static void wm_set_filter(struct wm_softc *);
543 /* Reset and init related */
544 static void wm_set_vlan(struct wm_softc *);
545 static void wm_set_pcie_completion_timeout(struct wm_softc *);
546 static void wm_get_auto_rd_done(struct wm_softc *);
547 static void wm_lan_init_done(struct wm_softc *);
548 static void wm_get_cfg_done(struct wm_softc *);
549 static void wm_reset(struct wm_softc *);
550 static int wm_add_rxbuf(struct wm_softc *, int);
551 static void wm_rxdrain(struct wm_softc *);
552 static int wm_init(struct ifnet *);
553 static int wm_init_locked(struct ifnet *);
554 static void wm_stop(struct ifnet *, int);
555 static void wm_stop_locked(struct ifnet *, int);
556 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
557 uint32_t *, uint8_t *);
558 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
559 static void wm_82547_txfifo_stall(void *);
560 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
561 /* Start */
562 static void wm_start(struct ifnet *);
563 static void wm_start_locked(struct ifnet *);
564 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
565 uint32_t *, uint32_t *, bool *);
566 static void wm_nq_start(struct ifnet *);
567 static void wm_nq_start_locked(struct ifnet *);
568 /* Interrupt */
569 static void wm_txintr(struct wm_softc *);
570 static void wm_rxintr(struct wm_softc *);
571 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
572 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
573 static void wm_linkintr(struct wm_softc *, uint32_t);
574 static int wm_intr(void *);
575
576 /*
577 * Media related.
578 * GMII, SGMII, TBI (and SERDES)
579 */
580 /* GMII related */
581 static void wm_gmii_reset(struct wm_softc *);
582 static int wm_get_phy_id_82575(struct wm_softc *);
583 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
584 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
585 static int wm_gmii_mediachange(struct ifnet *);
586 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
587 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
588 static int wm_gmii_i82543_readreg(device_t, int, int);
589 static void wm_gmii_i82543_writereg(device_t, int, int, int);
590 static int wm_gmii_i82544_readreg(device_t, int, int);
591 static void wm_gmii_i82544_writereg(device_t, int, int, int);
592 static int wm_gmii_i80003_readreg(device_t, int, int);
593 static void wm_gmii_i80003_writereg(device_t, int, int, int);
594 static int wm_gmii_bm_readreg(device_t, int, int);
595 static void wm_gmii_bm_writereg(device_t, int, int, int);
596 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
597 static int wm_gmii_hv_readreg(device_t, int, int);
598 static void wm_gmii_hv_writereg(device_t, int, int, int);
599 static int wm_gmii_82580_readreg(device_t, int, int);
600 static void wm_gmii_82580_writereg(device_t, int, int, int);
601 static void wm_gmii_statchg(struct ifnet *);
602 static int wm_kmrn_readreg(struct wm_softc *, int);
603 static void wm_kmrn_writereg(struct wm_softc *, int, int);
604 /* SGMII */
605 static bool wm_sgmii_uses_mdio(struct wm_softc *);
606 static int wm_sgmii_readreg(device_t, int, int);
607 static void wm_sgmii_writereg(device_t, int, int, int);
608 /* TBI related */
609 static int wm_check_for_link(struct wm_softc *);
610 static void wm_tbi_mediainit(struct wm_softc *);
611 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
612 static int wm_tbi_mediachange(struct ifnet *);
613 static void wm_tbi_set_linkled(struct wm_softc *);
614 static void wm_tbi_check_link(struct wm_softc *);
615
616 /*
617 * NVM related.
618 * Microwire, SPI (w/wo EERD) and Flash.
619 */
620 /* Both spi and uwire */
621 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
622 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
623 /* Microwire */
624 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
625 /* SPI */
626 static void wm_set_spiaddrbits(struct wm_softc *);
627 static int wm_nvm_ready_spi(struct wm_softc *);
628 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
629 /* Using with EERD */
630 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
631 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
632 /* Flash */
633 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
634 unsigned int *);
635 static int32_t wm_ich8_cycle_init(struct wm_softc *);
636 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
637 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
638 uint16_t *);
639 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
640 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
641 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
642 /* Lock, detecting NVM type, validate checksum and read */
643 static int wm_nvm_acquire(struct wm_softc *);
644 static void wm_nvm_release(struct wm_softc *);
645 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
646 static int wm_nvm_validate_checksum(struct wm_softc *);
647 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
648
649 /*
650 * Hardware semaphores.
651 * Very complexed...
652 */
653 static int wm_get_swsm_semaphore(struct wm_softc *);
654 static void wm_put_swsm_semaphore(struct wm_softc *);
655 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
656 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
657 static int wm_get_swfwhw_semaphore(struct wm_softc *);
658 static void wm_put_swfwhw_semaphore(struct wm_softc *);
659 static int wm_get_hw_semaphore_82573(struct wm_softc *);
660 static void wm_put_hw_semaphore_82573(struct wm_softc *);
661
662 /*
663 * Management mode and power management related subroutines.
664 * BMC, AMT, suspend/resume and EEE.
665 */
666 static int wm_check_mng_mode(struct wm_softc *);
667 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
668 static int wm_check_mng_mode_82574(struct wm_softc *);
669 static int wm_check_mng_mode_generic(struct wm_softc *);
670 static int wm_enable_mng_pass_thru(struct wm_softc *);
671 static int wm_check_reset_block(struct wm_softc *);
672 static void wm_get_hw_control(struct wm_softc *);
673 static void wm_release_hw_control(struct wm_softc *);
674 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
675 static void wm_smbustopci(struct wm_softc *);
676 static void wm_init_manageability(struct wm_softc *);
677 static void wm_release_manageability(struct wm_softc *);
678 static void wm_get_wakeup(struct wm_softc *);
679 #ifdef WM_WOL
680 static void wm_enable_phy_wakeup(struct wm_softc *);
681 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
682 static void wm_enable_wakeup(struct wm_softc *);
683 #endif
684 /* EEE */
685 static void wm_set_eee_i350(struct wm_softc *);
686
687 /*
688 * Workarounds (mainly PHY related).
689 * Basically, PHY's workarounds are in the PHY drivers.
690 */
691 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
692 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
693 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
694 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
695 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
696 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
697 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
698 static void wm_reset_init_script_82575(struct wm_softc *);
699
700 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
701 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
702
703 /*
704 * Devices supported by this driver.
705 */
706 static const struct wm_product {
707 pci_vendor_id_t wmp_vendor;
708 pci_product_id_t wmp_product;
709 const char *wmp_name;
710 wm_chip_type wmp_type;
711 int wmp_flags;
712 #define WMP_F_1000X 0x01
713 #define WMP_F_1000T 0x02
714 #define WMP_F_SERDES 0x04
715 } wm_products[] = {
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
717 "Intel i82542 1000BASE-X Ethernet",
718 WM_T_82542_2_1, WMP_F_1000X },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
721 "Intel i82543GC 1000BASE-X Ethernet",
722 WM_T_82543, WMP_F_1000X },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
725 "Intel i82543GC 1000BASE-T Ethernet",
726 WM_T_82543, WMP_F_1000T },
727
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
729 "Intel i82544EI 1000BASE-T Ethernet",
730 WM_T_82544, WMP_F_1000T },
731
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
733 "Intel i82544EI 1000BASE-X Ethernet",
734 WM_T_82544, WMP_F_1000X },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
737 "Intel i82544GC 1000BASE-T Ethernet",
738 WM_T_82544, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
741 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
742 WM_T_82544, WMP_F_1000T },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
745 "Intel i82540EM 1000BASE-T Ethernet",
746 WM_T_82540, WMP_F_1000T },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
749 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
750 WM_T_82540, WMP_F_1000T },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
753 "Intel i82540EP 1000BASE-T Ethernet",
754 WM_T_82540, WMP_F_1000T },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
757 "Intel i82540EP 1000BASE-T Ethernet",
758 WM_T_82540, WMP_F_1000T },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
761 "Intel i82540EP 1000BASE-T Ethernet",
762 WM_T_82540, WMP_F_1000T },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
765 "Intel i82545EM 1000BASE-T Ethernet",
766 WM_T_82545, WMP_F_1000T },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
769 "Intel i82545GM 1000BASE-T Ethernet",
770 WM_T_82545_3, WMP_F_1000T },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
773 "Intel i82545GM 1000BASE-X Ethernet",
774 WM_T_82545_3, WMP_F_1000X },
775
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
777 "Intel i82545GM Gigabit Ethernet (SERDES)",
778 WM_T_82545_3, WMP_F_SERDES },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
781 "Intel i82546EB 1000BASE-T Ethernet",
782 WM_T_82546, WMP_F_1000T },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
785 "Intel i82546EB 1000BASE-T Ethernet",
786 WM_T_82546, WMP_F_1000T },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
789 "Intel i82545EM 1000BASE-X Ethernet",
790 WM_T_82545, WMP_F_1000X },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
793 "Intel i82546EB 1000BASE-X Ethernet",
794 WM_T_82546, WMP_F_1000X },
795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
797 "Intel i82546GB 1000BASE-T Ethernet",
798 WM_T_82546_3, WMP_F_1000T },
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
801 "Intel i82546GB 1000BASE-X Ethernet",
802 WM_T_82546_3, WMP_F_1000X },
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
805 "Intel i82546GB Gigabit Ethernet (SERDES)",
806 WM_T_82546_3, WMP_F_SERDES },
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
809 "i82546GB quad-port Gigabit Ethernet",
810 WM_T_82546_3, WMP_F_1000T },
811
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
813 "i82546GB quad-port Gigabit Ethernet (KSP3)",
814 WM_T_82546_3, WMP_F_1000T },
815
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
817 "Intel PRO/1000MT (82546GB)",
818 WM_T_82546_3, WMP_F_1000T },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
821 "Intel i82541EI 1000BASE-T Ethernet",
822 WM_T_82541, WMP_F_1000T },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
825 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
826 WM_T_82541, WMP_F_1000T },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
829 "Intel i82541EI Mobile 1000BASE-T Ethernet",
830 WM_T_82541, WMP_F_1000T },
831
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
833 "Intel i82541ER 1000BASE-T Ethernet",
834 WM_T_82541_2, WMP_F_1000T },
835
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
837 "Intel i82541GI 1000BASE-T Ethernet",
838 WM_T_82541_2, WMP_F_1000T },
839
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
841 "Intel i82541GI Mobile 1000BASE-T Ethernet",
842 WM_T_82541_2, WMP_F_1000T },
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
845 "Intel i82541PI 1000BASE-T Ethernet",
846 WM_T_82541_2, WMP_F_1000T },
847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
849 "Intel i82547EI 1000BASE-T Ethernet",
850 WM_T_82547, WMP_F_1000T },
851
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
853 "Intel i82547EI Mobile 1000BASE-T Ethernet",
854 WM_T_82547, WMP_F_1000T },
855
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
857 "Intel i82547GI 1000BASE-T Ethernet",
858 WM_T_82547_2, WMP_F_1000T },
859
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
861 "Intel PRO/1000 PT (82571EB)",
862 WM_T_82571, WMP_F_1000T },
863
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
865 "Intel PRO/1000 PF (82571EB)",
866 WM_T_82571, WMP_F_1000X },
867
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
869 "Intel PRO/1000 PB (82571EB)",
870 WM_T_82571, WMP_F_SERDES },
871
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
873 "Intel PRO/1000 QT (82571EB)",
874 WM_T_82571, WMP_F_1000T },
875
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
877 "Intel i82572EI 1000baseT Ethernet",
878 WM_T_82572, WMP_F_1000T },
879
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
881 "Intel PRO/1000 PT Quad Port Server Adapter",
882 WM_T_82571, WMP_F_1000T, },
883
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
885 "Intel i82572EI 1000baseX Ethernet",
886 WM_T_82572, WMP_F_1000X },
887
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
889 "Intel i82572EI Gigabit Ethernet (SERDES)",
890 WM_T_82572, WMP_F_SERDES },
891
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
893 "Intel i82572EI 1000baseT Ethernet",
894 WM_T_82572, WMP_F_1000T },
895
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
897 "Intel i82573E",
898 WM_T_82573, WMP_F_1000T },
899
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
901 "Intel i82573E IAMT",
902 WM_T_82573, WMP_F_1000T },
903
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
905 "Intel i82573L Gigabit Ethernet",
906 WM_T_82573, WMP_F_1000T },
907
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
909 "Intel i82574L",
910 WM_T_82574, WMP_F_1000T },
911
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
913 "Intel i82583V",
914 WM_T_82583, WMP_F_1000T },
915
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
917 "i80003 dual 1000baseT Ethernet",
918 WM_T_80003, WMP_F_1000T },
919
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
921 "i80003 dual 1000baseX Ethernet",
922 WM_T_80003, WMP_F_1000T },
923
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
925 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
926 WM_T_80003, WMP_F_SERDES },
927
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
929 "Intel i80003 1000baseT Ethernet",
930 WM_T_80003, WMP_F_1000T },
931
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
933 "Intel i80003 Gigabit Ethernet (SERDES)",
934 WM_T_80003, WMP_F_SERDES },
935
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
937 "Intel i82801H (M_AMT) LAN Controller",
938 WM_T_ICH8, WMP_F_1000T },
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
940 "Intel i82801H (AMT) LAN Controller",
941 WM_T_ICH8, WMP_F_1000T },
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
943 "Intel i82801H LAN Controller",
944 WM_T_ICH8, WMP_F_1000T },
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
946 "Intel i82801H (IFE) LAN Controller",
947 WM_T_ICH8, WMP_F_1000T },
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
949 "Intel i82801H (M) LAN Controller",
950 WM_T_ICH8, WMP_F_1000T },
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
952 "Intel i82801H IFE (GT) LAN Controller",
953 WM_T_ICH8, WMP_F_1000T },
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
955 "Intel i82801H IFE (G) LAN Controller",
956 WM_T_ICH8, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
958 "82801I (AMT) LAN Controller",
959 WM_T_ICH9, WMP_F_1000T },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
961 "82801I LAN Controller",
962 WM_T_ICH9, WMP_F_1000T },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
964 "82801I (G) LAN Controller",
965 WM_T_ICH9, WMP_F_1000T },
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
967 "82801I (GT) LAN Controller",
968 WM_T_ICH9, WMP_F_1000T },
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
970 "82801I (C) LAN Controller",
971 WM_T_ICH9, WMP_F_1000T },
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
973 "82801I mobile LAN Controller",
974 WM_T_ICH9, WMP_F_1000T },
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
976 "82801I mobile (V) LAN Controller",
977 WM_T_ICH9, WMP_F_1000T },
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
979 "82801I mobile (AMT) LAN Controller",
980 WM_T_ICH9, WMP_F_1000T },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
982 "82567LM-4 LAN Controller",
983 WM_T_ICH9, WMP_F_1000T },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
985 "82567V-3 LAN Controller",
986 WM_T_ICH9, WMP_F_1000T },
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
988 "82567LM-2 LAN Controller",
989 WM_T_ICH10, WMP_F_1000T },
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
991 "82567LF-2 LAN Controller",
992 WM_T_ICH10, WMP_F_1000T },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
994 "82567LM-3 LAN Controller",
995 WM_T_ICH10, WMP_F_1000T },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
997 "82567LF-3 LAN Controller",
998 WM_T_ICH10, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1000 "82567V-2 LAN Controller",
1001 WM_T_ICH10, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1003 "82567V-3? LAN Controller",
1004 WM_T_ICH10, WMP_F_1000T },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1006 "HANKSVILLE LAN Controller",
1007 WM_T_ICH10, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1009 "PCH LAN (82577LM) Controller",
1010 WM_T_PCH, WMP_F_1000T },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1012 "PCH LAN (82577LC) Controller",
1013 WM_T_PCH, WMP_F_1000T },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1015 "PCH LAN (82578DM) Controller",
1016 WM_T_PCH, WMP_F_1000T },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1018 "PCH LAN (82578DC) Controller",
1019 WM_T_PCH, WMP_F_1000T },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1021 "PCH2 LAN (82579LM) Controller",
1022 WM_T_PCH2, WMP_F_1000T },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1024 "PCH2 LAN (82579V) Controller",
1025 WM_T_PCH2, WMP_F_1000T },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1027 "82575EB dual-1000baseT Ethernet",
1028 WM_T_82575, WMP_F_1000T },
1029 #if 0
1030 /*
1031 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
1032 * disabled for now ...
1033 */
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1035 "82575EB dual-1000baseX Ethernet (SERDES)",
1036 WM_T_82575, WMP_F_SERDES },
1037 #endif
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1039 "82575GB quad-1000baseT Ethernet",
1040 WM_T_82575, WMP_F_1000T },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1042 "82575GB quad-1000baseT Ethernet (PM)",
1043 WM_T_82575, WMP_F_1000T },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1045 "82576 1000BaseT Ethernet",
1046 WM_T_82576, WMP_F_1000T },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1048 "82576 1000BaseX Ethernet",
1049 WM_T_82576, WMP_F_1000X },
1050
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1052 "82576 gigabit Ethernet (SERDES)",
1053 WM_T_82576, WMP_F_SERDES },
1054
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1056 "82576 quad-1000BaseT Ethernet",
1057 WM_T_82576, WMP_F_1000T },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1059 "82576 gigabit Ethernet",
1060 WM_T_82576, WMP_F_1000T },
1061
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1063 "82576 gigabit Ethernet (SERDES)",
1064 WM_T_82576, WMP_F_SERDES },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1066 "82576 quad-gigabit Ethernet (SERDES)",
1067 WM_T_82576, WMP_F_SERDES },
1068
1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1070 "82580 1000BaseT Ethernet",
1071 WM_T_82580, WMP_F_1000T },
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1073 "82580 1000BaseX Ethernet",
1074 WM_T_82580, WMP_F_1000X },
1075
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1077 "82580 1000BaseT Ethernet (SERDES)",
1078 WM_T_82580, WMP_F_SERDES },
1079
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1081 "82580 gigabit Ethernet (SGMII)",
1082 WM_T_82580, WMP_F_1000T },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1084 "82580 dual-1000BaseT Ethernet",
1085 WM_T_82580, WMP_F_1000T },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1087 "82580 1000BaseT Ethernet",
1088 WM_T_82580ER, WMP_F_1000T },
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1090 "82580 dual-1000BaseT Ethernet",
1091 WM_T_82580ER, WMP_F_1000T },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1093 "82580 quad-1000BaseX Ethernet",
1094 WM_T_82580, WMP_F_1000X },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1096 "I350 Gigabit Network Connection",
1097 WM_T_I350, WMP_F_1000T },
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1099 "I350 Gigabit Fiber Network Connection",
1100 WM_T_I350, WMP_F_1000X },
1101
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1103 "I350 Gigabit Backplane Connection",
1104 WM_T_I350, WMP_F_SERDES },
1105 #if 0
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1107 "I350 Gigabit Connection",
1108 WM_T_I350, WMP_F_1000T },
1109 #endif
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1111 "I354 Gigabit Connection",
1112 WM_T_I354, WMP_F_1000T },
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1114 "I210-T1 Ethernet Server Adapter",
1115 WM_T_I210, WMP_F_1000T },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1117 "I210 Ethernet (Copper OEM)",
1118 WM_T_I210, WMP_F_1000T },
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1120 "I210 Ethernet (Copper IT)",
1121 WM_T_I210, WMP_F_1000T },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1123 "I210 Gigabit Ethernet (Fiber)",
1124 WM_T_I210, WMP_F_1000X },
1125
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1127 "I210 Gigabit Ethernet (SERDES)",
1128 WM_T_I210, WMP_F_SERDES },
1129 #if 0
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1131 "I210 Gigabit Ethernet (SGMII)",
1132 WM_T_I210, WMP_F_SERDES },
1133 #endif
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1135 "I211 Ethernet (COPPER)",
1136 WM_T_I211, WMP_F_1000T },
1137 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1138 "I217 V Ethernet Connection",
1139 WM_T_PCH_LPT, WMP_F_1000T },
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1141 "I217 LM Ethernet Connection",
1142 WM_T_PCH_LPT, WMP_F_1000T },
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1144 "I218 V Ethernet Connection",
1145 WM_T_PCH_LPT, WMP_F_1000T },
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1147 "I218 LM Ethernet Connection",
1148 WM_T_PCH_LPT, WMP_F_1000T },
1149 { 0, 0,
1150 NULL,
1151 0, 0 },
1152 };
1153
1154 #ifdef WM_EVENT_COUNTERS
1155 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1156 #endif /* WM_EVENT_COUNTERS */
1157
1158
1159 /*
1160 * Register read/write functions.
1161 * Other than CSR_{READ|WRITE}().
1162 */
1163
1164 #if 0 /* Not currently used */
1165 static inline uint32_t
1166 wm_io_read(struct wm_softc *sc, int reg)
1167 {
1168
1169 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1170 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1171 }
1172 #endif
1173
1174 static inline void
1175 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1176 {
1177
1178 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1179 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1180 }
1181
1182 static inline void
1183 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1184 uint32_t data)
1185 {
1186 uint32_t regval;
1187 int i;
1188
1189 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1190
1191 CSR_WRITE(sc, reg, regval);
1192
1193 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1194 delay(5);
1195 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1196 break;
1197 }
1198 if (i == SCTL_CTL_POLL_TIMEOUT) {
1199 aprint_error("%s: WARNING:"
1200 " i82575 reg 0x%08x setup did not indicate ready\n",
1201 device_xname(sc->sc_dev), reg);
1202 }
1203 }
1204
1205 static inline void
1206 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1207 {
1208 wa->wa_low = htole32(v & 0xffffffffU);
1209 if (sizeof(bus_addr_t) == 8)
1210 wa->wa_high = htole32((uint64_t) v >> 32);
1211 else
1212 wa->wa_high = 0;
1213 }
1214
1215 /*
1216 * Device driver interface functions and commonly used functions.
1217 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1218 */
1219
1220 /* Lookup supported device table */
1221 static const struct wm_product *
1222 wm_lookup(const struct pci_attach_args *pa)
1223 {
1224 const struct wm_product *wmp;
1225
1226 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1227 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1228 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1229 return wmp;
1230 }
1231 return NULL;
1232 }
1233
1234 /* The match function (ca_match) */
1235 static int
1236 wm_match(device_t parent, cfdata_t cf, void *aux)
1237 {
1238 struct pci_attach_args *pa = aux;
1239
1240 if (wm_lookup(pa) != NULL)
1241 return 1;
1242
1243 return 0;
1244 }
1245
1246 /* The attach function (ca_attach) */
1247 static void
1248 wm_attach(device_t parent, device_t self, void *aux)
1249 {
1250 struct wm_softc *sc = device_private(self);
1251 struct pci_attach_args *pa = aux;
1252 prop_dictionary_t dict;
1253 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1254 pci_chipset_tag_t pc = pa->pa_pc;
1255 pci_intr_handle_t ih;
1256 const char *intrstr = NULL;
1257 const char *eetype, *xname;
1258 bus_space_tag_t memt;
1259 bus_space_handle_t memh;
1260 bus_size_t memsize;
1261 int memh_valid;
1262 int i, error;
1263 const struct wm_product *wmp;
1264 prop_data_t ea;
1265 prop_number_t pn;
1266 uint8_t enaddr[ETHER_ADDR_LEN];
1267 uint16_t cfg1, cfg2, swdpin, io3;
1268 pcireg_t preg, memtype;
1269 uint16_t eeprom_data, apme_mask;
1270 bool force_clear_smbi;
1271 uint32_t reg;
1272 char intrbuf[PCI_INTRSTR_LEN];
1273
1274 sc->sc_dev = self;
1275 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1276 sc->sc_stopping = false;
1277
1278 sc->sc_wmp = wmp = wm_lookup(pa);
1279 if (wmp == NULL) {
1280 printf("\n");
1281 panic("wm_attach: impossible");
1282 }
1283
1284 sc->sc_pc = pa->pa_pc;
1285 sc->sc_pcitag = pa->pa_tag;
1286
1287 if (pci_dma64_available(pa))
1288 sc->sc_dmat = pa->pa_dmat64;
1289 else
1290 sc->sc_dmat = pa->pa_dmat;
1291
1292 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1293 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1294
1295 sc->sc_type = wmp->wmp_type;
1296 if (sc->sc_type < WM_T_82543) {
1297 if (sc->sc_rev < 2) {
1298 aprint_error_dev(sc->sc_dev,
1299 "i82542 must be at least rev. 2\n");
1300 return;
1301 }
1302 if (sc->sc_rev < 3)
1303 sc->sc_type = WM_T_82542_2_0;
1304 }
1305
1306 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1307 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1308 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1309 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1310 sc->sc_flags |= WM_F_NEWQUEUE;
1311
1312 /* Set device properties (mactype) */
1313 dict = device_properties(sc->sc_dev);
1314 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1315
1316 /*
1317 * Map the device. All devices support memory-mapped acccess,
1318 * and it is really required for normal operation.
1319 */
1320 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1321 switch (memtype) {
1322 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1323 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1324 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1325 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1326 break;
1327 default:
1328 memh_valid = 0;
1329 break;
1330 }
1331
1332 if (memh_valid) {
1333 sc->sc_st = memt;
1334 sc->sc_sh = memh;
1335 sc->sc_ss = memsize;
1336 } else {
1337 aprint_error_dev(sc->sc_dev,
1338 "unable to map device registers\n");
1339 return;
1340 }
1341
1342 /*
1343 * In addition, i82544 and later support I/O mapped indirect
1344 * register access. It is not desirable (nor supported in
1345 * this driver) to use it for normal operation, though it is
1346 * required to work around bugs in some chip versions.
1347 */
1348 if (sc->sc_type >= WM_T_82544) {
1349 /* First we have to find the I/O BAR. */
1350 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1351 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1352 if (memtype == PCI_MAPREG_TYPE_IO)
1353 break;
1354 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1355 PCI_MAPREG_MEM_TYPE_64BIT)
1356 i += 4; /* skip high bits, too */
1357 }
1358 if (i < PCI_MAPREG_END) {
1359 /*
1360 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1361 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1362 * It's no problem because newer chips has no this
1363 * bug.
1364 *
1365 * The i8254x doesn't apparently respond when the
1366 * I/O BAR is 0, which looks somewhat like it's not
1367 * been configured.
1368 */
1369 preg = pci_conf_read(pc, pa->pa_tag, i);
1370 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1371 aprint_error_dev(sc->sc_dev,
1372 "WARNING: I/O BAR at zero.\n");
1373 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1374 0, &sc->sc_iot, &sc->sc_ioh,
1375 NULL, &sc->sc_ios) == 0) {
1376 sc->sc_flags |= WM_F_IOH_VALID;
1377 } else {
1378 aprint_error_dev(sc->sc_dev,
1379 "WARNING: unable to map I/O space\n");
1380 }
1381 }
1382
1383 }
1384
1385 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1386 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1387 preg |= PCI_COMMAND_MASTER_ENABLE;
1388 if (sc->sc_type < WM_T_82542_2_1)
1389 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1390 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1391
1392 /* power up chip */
1393 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1394 NULL)) && error != EOPNOTSUPP) {
1395 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1396 return;
1397 }
1398
1399 /*
1400 * Map and establish our interrupt.
1401 */
1402 if (pci_intr_map(pa, &ih)) {
1403 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1404 return;
1405 }
1406 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1407 #ifdef WM_MPSAFE
1408 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1409 #endif
1410 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1411 if (sc->sc_ih == NULL) {
1412 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1413 if (intrstr != NULL)
1414 aprint_error(" at %s", intrstr);
1415 aprint_error("\n");
1416 return;
1417 }
1418 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1419
1420 /*
1421 * Check the function ID (unit number of the chip).
1422 */
1423 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1424 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1425 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1426 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1427 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1428 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1429 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1430 else
1431 sc->sc_funcid = 0;
1432
1433 /*
1434 * Determine a few things about the bus we're connected to.
1435 */
1436 if (sc->sc_type < WM_T_82543) {
1437 /* We don't really know the bus characteristics here. */
1438 sc->sc_bus_speed = 33;
1439 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1440 /*
1441 * CSA (Communication Streaming Architecture) is about as fast
1442 * a 32-bit 66MHz PCI Bus.
1443 */
1444 sc->sc_flags |= WM_F_CSA;
1445 sc->sc_bus_speed = 66;
1446 aprint_verbose_dev(sc->sc_dev,
1447 "Communication Streaming Architecture\n");
1448 if (sc->sc_type == WM_T_82547) {
1449 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1450 callout_setfunc(&sc->sc_txfifo_ch,
1451 wm_82547_txfifo_stall, sc);
1452 aprint_verbose_dev(sc->sc_dev,
1453 "using 82547 Tx FIFO stall work-around\n");
1454 }
1455 } else if (sc->sc_type >= WM_T_82571) {
1456 sc->sc_flags |= WM_F_PCIE;
1457 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1458 && (sc->sc_type != WM_T_ICH10)
1459 && (sc->sc_type != WM_T_PCH)
1460 && (sc->sc_type != WM_T_PCH2)
1461 && (sc->sc_type != WM_T_PCH_LPT)) {
1462 /* ICH* and PCH* have no PCIe capability registers */
1463 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1464 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1465 NULL) == 0)
1466 aprint_error_dev(sc->sc_dev,
1467 "unable to find PCIe capability\n");
1468 }
1469 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1470 } else {
1471 reg = CSR_READ(sc, WMREG_STATUS);
1472 if (reg & STATUS_BUS64)
1473 sc->sc_flags |= WM_F_BUS64;
1474 if ((reg & STATUS_PCIX_MODE) != 0) {
1475 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1476
1477 sc->sc_flags |= WM_F_PCIX;
1478 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1479 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1480 aprint_error_dev(sc->sc_dev,
1481 "unable to find PCIX capability\n");
1482 else if (sc->sc_type != WM_T_82545_3 &&
1483 sc->sc_type != WM_T_82546_3) {
1484 /*
1485 * Work around a problem caused by the BIOS
1486 * setting the max memory read byte count
1487 * incorrectly.
1488 */
1489 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1490 sc->sc_pcixe_capoff + PCIX_CMD);
1491 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1492 sc->sc_pcixe_capoff + PCIX_STATUS);
1493
1494 bytecnt =
1495 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1496 PCIX_CMD_BYTECNT_SHIFT;
1497 maxb =
1498 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1499 PCIX_STATUS_MAXB_SHIFT;
1500 if (bytecnt > maxb) {
1501 aprint_verbose_dev(sc->sc_dev,
1502 "resetting PCI-X MMRBC: %d -> %d\n",
1503 512 << bytecnt, 512 << maxb);
1504 pcix_cmd = (pcix_cmd &
1505 ~PCIX_CMD_BYTECNT_MASK) |
1506 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1507 pci_conf_write(pa->pa_pc, pa->pa_tag,
1508 sc->sc_pcixe_capoff + PCIX_CMD,
1509 pcix_cmd);
1510 }
1511 }
1512 }
1513 /*
1514 * The quad port adapter is special; it has a PCIX-PCIX
1515 * bridge on the board, and can run the secondary bus at
1516 * a higher speed.
1517 */
1518 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1519 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1520 : 66;
1521 } else if (sc->sc_flags & WM_F_PCIX) {
1522 switch (reg & STATUS_PCIXSPD_MASK) {
1523 case STATUS_PCIXSPD_50_66:
1524 sc->sc_bus_speed = 66;
1525 break;
1526 case STATUS_PCIXSPD_66_100:
1527 sc->sc_bus_speed = 100;
1528 break;
1529 case STATUS_PCIXSPD_100_133:
1530 sc->sc_bus_speed = 133;
1531 break;
1532 default:
1533 aprint_error_dev(sc->sc_dev,
1534 "unknown PCIXSPD %d; assuming 66MHz\n",
1535 reg & STATUS_PCIXSPD_MASK);
1536 sc->sc_bus_speed = 66;
1537 break;
1538 }
1539 } else
1540 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1541 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1542 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1543 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1544 }
1545
1546 /*
1547 * Allocate the control data structures, and create and load the
1548 * DMA map for it.
1549 *
1550 * NOTE: All Tx descriptors must be in the same 4G segment of
1551 * memory. So must Rx descriptors. We simplify by allocating
1552 * both sets within the same 4G segment.
1553 */
1554 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1555 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1556 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1557 sizeof(struct wm_control_data_82542) :
1558 sizeof(struct wm_control_data_82544);
1559 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1560 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1561 &sc->sc_cd_rseg, 0)) != 0) {
1562 aprint_error_dev(sc->sc_dev,
1563 "unable to allocate control data, error = %d\n",
1564 error);
1565 goto fail_0;
1566 }
1567
1568 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1569 sc->sc_cd_rseg, sc->sc_cd_size,
1570 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1571 aprint_error_dev(sc->sc_dev,
1572 "unable to map control data, error = %d\n", error);
1573 goto fail_1;
1574 }
1575
1576 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1577 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1578 aprint_error_dev(sc->sc_dev,
1579 "unable to create control data DMA map, error = %d\n",
1580 error);
1581 goto fail_2;
1582 }
1583
1584 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1585 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1586 aprint_error_dev(sc->sc_dev,
1587 "unable to load control data DMA map, error = %d\n",
1588 error);
1589 goto fail_3;
1590 }
1591
1592 /* Create the transmit buffer DMA maps. */
1593 WM_TXQUEUELEN(sc) =
1594 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1595 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1596 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1597 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1598 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1599 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1600 aprint_error_dev(sc->sc_dev,
1601 "unable to create Tx DMA map %d, error = %d\n",
1602 i, error);
1603 goto fail_4;
1604 }
1605 }
1606
1607 /* Create the receive buffer DMA maps. */
1608 for (i = 0; i < WM_NRXDESC; i++) {
1609 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1610 MCLBYTES, 0, 0,
1611 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1612 aprint_error_dev(sc->sc_dev,
1613 "unable to create Rx DMA map %d error = %d\n",
1614 i, error);
1615 goto fail_5;
1616 }
1617 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1618 }
1619
1620 /* clear interesting stat counters */
1621 CSR_READ(sc, WMREG_COLC);
1622 CSR_READ(sc, WMREG_RXERRC);
1623
1624 /* get PHY control from SMBus to PCIe */
1625 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1626 || (sc->sc_type == WM_T_PCH_LPT))
1627 wm_smbustopci(sc);
1628
1629 /* Reset the chip to a known state. */
1630 wm_reset(sc);
1631
1632 /* Get some information about the EEPROM. */
1633 switch (sc->sc_type) {
1634 case WM_T_82542_2_0:
1635 case WM_T_82542_2_1:
1636 case WM_T_82543:
1637 case WM_T_82544:
1638 /* Microwire */
1639 sc->sc_ee_addrbits = 6;
1640 break;
1641 case WM_T_82540:
1642 case WM_T_82545:
1643 case WM_T_82545_3:
1644 case WM_T_82546:
1645 case WM_T_82546_3:
1646 /* Microwire */
1647 reg = CSR_READ(sc, WMREG_EECD);
1648 if (reg & EECD_EE_SIZE)
1649 sc->sc_ee_addrbits = 8;
1650 else
1651 sc->sc_ee_addrbits = 6;
1652 sc->sc_flags |= WM_F_LOCK_EECD;
1653 break;
1654 case WM_T_82541:
1655 case WM_T_82541_2:
1656 case WM_T_82547:
1657 case WM_T_82547_2:
1658 reg = CSR_READ(sc, WMREG_EECD);
1659 if (reg & EECD_EE_TYPE) {
1660 /* SPI */
1661 wm_set_spiaddrbits(sc);
1662 } else
1663 /* Microwire */
1664 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1665 sc->sc_flags |= WM_F_LOCK_EECD;
1666 break;
1667 case WM_T_82571:
1668 case WM_T_82572:
1669 /* SPI */
1670 wm_set_spiaddrbits(sc);
1671 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1672 break;
1673 case WM_T_82573:
1674 sc->sc_flags |= WM_F_LOCK_SWSM;
1675 /* FALLTHROUGH */
1676 case WM_T_82574:
1677 case WM_T_82583:
1678 if (wm_nvm_is_onboard_eeprom(sc) == 0)
1679 sc->sc_flags |= WM_F_EEPROM_FLASH;
1680 else {
1681 /* SPI */
1682 wm_set_spiaddrbits(sc);
1683 }
1684 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1685 break;
1686 case WM_T_82575:
1687 case WM_T_82576:
1688 case WM_T_82580:
1689 case WM_T_82580ER:
1690 case WM_T_I350:
1691 case WM_T_I354:
1692 case WM_T_80003:
1693 /* SPI */
1694 wm_set_spiaddrbits(sc);
1695 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1696 | WM_F_LOCK_SWSM;
1697 break;
1698 case WM_T_ICH8:
1699 case WM_T_ICH9:
1700 case WM_T_ICH10:
1701 case WM_T_PCH:
1702 case WM_T_PCH2:
1703 case WM_T_PCH_LPT:
1704 /* FLASH */
1705 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1706 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1707 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1708 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1709 aprint_error_dev(sc->sc_dev,
1710 "can't map FLASH registers\n");
1711 return;
1712 }
1713 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1714 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1715 ICH_FLASH_SECTOR_SIZE;
1716 sc->sc_ich8_flash_bank_size =
1717 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1718 sc->sc_ich8_flash_bank_size -=
1719 (reg & ICH_GFPREG_BASE_MASK);
1720 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1721 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1722 break;
1723 case WM_T_I210:
1724 case WM_T_I211:
1725 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1726 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1727 break;
1728 default:
1729 break;
1730 }
1731
1732 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1733 switch (sc->sc_type) {
1734 case WM_T_82571:
1735 case WM_T_82572:
1736 reg = CSR_READ(sc, WMREG_SWSM2);
1737 if ((reg & SWSM2_LOCK) != 0) {
1738 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1739 force_clear_smbi = true;
1740 } else
1741 force_clear_smbi = false;
1742 break;
1743 default:
1744 force_clear_smbi = true;
1745 break;
1746 }
1747 if (force_clear_smbi) {
1748 reg = CSR_READ(sc, WMREG_SWSM);
1749 if ((reg & ~SWSM_SMBI) != 0)
1750 aprint_error_dev(sc->sc_dev,
1751 "Please update the Bootagent\n");
1752 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1753 }
1754
1755 /*
1756 * Defer printing the EEPROM type until after verifying the checksum
1757 * This allows the EEPROM type to be printed correctly in the case
1758 * that no EEPROM is attached.
1759 */
1760 /*
1761 * Validate the EEPROM checksum. If the checksum fails, flag
1762 * this for later, so we can fail future reads from the EEPROM.
1763 */
1764 if (wm_nvm_validate_checksum(sc)) {
1765 /*
1766 * Read twice again because some PCI-e parts fail the
1767 * first check due to the link being in sleep state.
1768 */
1769 if (wm_nvm_validate_checksum(sc))
1770 sc->sc_flags |= WM_F_EEPROM_INVALID;
1771 }
1772
1773 /* Set device properties (macflags) */
1774 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1775
1776 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1777 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1778 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1779 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1780 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1781 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1782 } else {
1783 if (sc->sc_flags & WM_F_EEPROM_SPI)
1784 eetype = "SPI";
1785 else
1786 eetype = "MicroWire";
1787 aprint_verbose_dev(sc->sc_dev,
1788 "%u word (%d address bits) %s EEPROM\n",
1789 1U << sc->sc_ee_addrbits,
1790 sc->sc_ee_addrbits, eetype);
1791 }
1792
1793 switch (sc->sc_type) {
1794 case WM_T_82571:
1795 case WM_T_82572:
1796 case WM_T_82573:
1797 case WM_T_82574:
1798 case WM_T_82583:
1799 case WM_T_80003:
1800 case WM_T_ICH8:
1801 case WM_T_ICH9:
1802 case WM_T_ICH10:
1803 case WM_T_PCH:
1804 case WM_T_PCH2:
1805 case WM_T_PCH_LPT:
1806 if (wm_check_mng_mode(sc) != 0)
1807 wm_get_hw_control(sc);
1808 break;
1809 default:
1810 break;
1811 }
1812 wm_get_wakeup(sc);
1813 /*
1814 * Read the Ethernet address from the EEPROM, if not first found
1815 * in device properties.
1816 */
1817 ea = prop_dictionary_get(dict, "mac-address");
1818 if (ea != NULL) {
1819 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1820 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1821 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1822 } else {
1823 if (wm_read_mac_addr(sc, enaddr) != 0) {
1824 aprint_error_dev(sc->sc_dev,
1825 "unable to read Ethernet address\n");
1826 return;
1827 }
1828 }
1829
1830 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1831 ether_sprintf(enaddr));
1832
1833 /*
1834 * Read the config info from the EEPROM, and set up various
1835 * bits in the control registers based on their contents.
1836 */
1837 pn = prop_dictionary_get(dict, "i82543-cfg1");
1838 if (pn != NULL) {
1839 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1840 cfg1 = (uint16_t) prop_number_integer_value(pn);
1841 } else {
1842 if (wm_nvm_read(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1843 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1844 return;
1845 }
1846 }
1847
1848 pn = prop_dictionary_get(dict, "i82543-cfg2");
1849 if (pn != NULL) {
1850 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1851 cfg2 = (uint16_t) prop_number_integer_value(pn);
1852 } else {
1853 if (wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1854 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1855 return;
1856 }
1857 }
1858
1859 /* check for WM_F_WOL */
1860 switch (sc->sc_type) {
1861 case WM_T_82542_2_0:
1862 case WM_T_82542_2_1:
1863 case WM_T_82543:
1864 /* dummy? */
1865 eeprom_data = 0;
1866 apme_mask = EEPROM_CFG3_APME;
1867 break;
1868 case WM_T_82544:
1869 apme_mask = EEPROM_CFG2_82544_APM_EN;
1870 eeprom_data = cfg2;
1871 break;
1872 case WM_T_82546:
1873 case WM_T_82546_3:
1874 case WM_T_82571:
1875 case WM_T_82572:
1876 case WM_T_82573:
1877 case WM_T_82574:
1878 case WM_T_82583:
1879 case WM_T_80003:
1880 default:
1881 apme_mask = EEPROM_CFG3_APME;
1882 wm_nvm_read(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1883 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1884 break;
1885 case WM_T_82575:
1886 case WM_T_82576:
1887 case WM_T_82580:
1888 case WM_T_82580ER:
1889 case WM_T_I350:
1890 case WM_T_I354: /* XXX ok? */
1891 case WM_T_ICH8:
1892 case WM_T_ICH9:
1893 case WM_T_ICH10:
1894 case WM_T_PCH:
1895 case WM_T_PCH2:
1896 case WM_T_PCH_LPT:
1897 /* XXX The funcid should be checked on some devices */
1898 apme_mask = WUC_APME;
1899 eeprom_data = CSR_READ(sc, WMREG_WUC);
1900 break;
1901 }
1902
1903 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1904 if ((eeprom_data & apme_mask) != 0)
1905 sc->sc_flags |= WM_F_WOL;
1906 #ifdef WM_DEBUG
1907 if ((sc->sc_flags & WM_F_WOL) != 0)
1908 printf("WOL\n");
1909 #endif
1910
1911 /*
1912 * XXX need special handling for some multiple port cards
1913 * to disable a paticular port.
1914 */
1915
1916 if (sc->sc_type >= WM_T_82544) {
1917 pn = prop_dictionary_get(dict, "i82543-swdpin");
1918 if (pn != NULL) {
1919 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1920 swdpin = (uint16_t) prop_number_integer_value(pn);
1921 } else {
1922 if (wm_nvm_read(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1923 aprint_error_dev(sc->sc_dev,
1924 "unable to read SWDPIN\n");
1925 return;
1926 }
1927 }
1928 }
1929
1930 if (cfg1 & EEPROM_CFG1_ILOS)
1931 sc->sc_ctrl |= CTRL_ILOS;
1932 if (sc->sc_type >= WM_T_82544) {
1933 sc->sc_ctrl |=
1934 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1935 CTRL_SWDPIO_SHIFT;
1936 sc->sc_ctrl |=
1937 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1938 CTRL_SWDPINS_SHIFT;
1939 } else {
1940 sc->sc_ctrl |=
1941 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1942 CTRL_SWDPIO_SHIFT;
1943 }
1944
1945 #if 0
1946 if (sc->sc_type >= WM_T_82544) {
1947 if (cfg1 & EEPROM_CFG1_IPS0)
1948 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1949 if (cfg1 & EEPROM_CFG1_IPS1)
1950 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1951 sc->sc_ctrl_ext |=
1952 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1953 CTRL_EXT_SWDPIO_SHIFT;
1954 sc->sc_ctrl_ext |=
1955 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1956 CTRL_EXT_SWDPINS_SHIFT;
1957 } else {
1958 sc->sc_ctrl_ext |=
1959 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1960 CTRL_EXT_SWDPIO_SHIFT;
1961 }
1962 #endif
1963
1964 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1965 #if 0
1966 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1967 #endif
1968
1969 /*
1970 * Set up some register offsets that are different between
1971 * the i82542 and the i82543 and later chips.
1972 */
1973 if (sc->sc_type < WM_T_82543) {
1974 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1975 sc->sc_tdt_reg = WMREG_OLD_TDT;
1976 } else {
1977 sc->sc_rdt_reg = WMREG_RDT;
1978 sc->sc_tdt_reg = WMREG_TDT;
1979 }
1980
1981 if (sc->sc_type == WM_T_PCH) {
1982 uint16_t val;
1983
1984 /* Save the NVM K1 bit setting */
1985 wm_nvm_read(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1986
1987 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1988 sc->sc_nvm_k1_enabled = 1;
1989 else
1990 sc->sc_nvm_k1_enabled = 0;
1991 }
1992
1993 /*
1994 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1995 * media structures accordingly.
1996 */
1997 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1998 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1999 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2000 || sc->sc_type == WM_T_82573
2001 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2002 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2003 wm_gmii_mediainit(sc, wmp->wmp_product);
2004 } else if (sc->sc_type < WM_T_82543 ||
2005 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2006 if (wmp->wmp_flags & WMP_F_1000T)
2007 aprint_error_dev(sc->sc_dev,
2008 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2009 wm_tbi_mediainit(sc);
2010 } else {
2011 switch (sc->sc_type) {
2012 case WM_T_82575:
2013 case WM_T_82576:
2014 case WM_T_82580:
2015 case WM_T_82580ER:
2016 case WM_T_I350:
2017 case WM_T_I354:
2018 case WM_T_I210:
2019 case WM_T_I211:
2020 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2021 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
2022 case CTRL_EXT_LINK_MODE_1000KX:
2023 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2024 CSR_WRITE(sc, WMREG_CTRL_EXT,
2025 reg | CTRL_EXT_I2C_ENA);
2026 panic("not supported yet\n");
2027 break;
2028 case CTRL_EXT_LINK_MODE_SGMII:
2029 if (wm_sgmii_uses_mdio(sc)) {
2030 aprint_verbose_dev(sc->sc_dev,
2031 "SGMII(MDIO)\n");
2032 sc->sc_flags |= WM_F_SGMII;
2033 wm_gmii_mediainit(sc,
2034 wmp->wmp_product);
2035 break;
2036 }
2037 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2038 /*FALLTHROUGH*/
2039 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2040 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2041 CSR_WRITE(sc, WMREG_CTRL_EXT,
2042 reg | CTRL_EXT_I2C_ENA);
2043 panic("not supported yet\n");
2044 break;
2045 case CTRL_EXT_LINK_MODE_GMII:
2046 default:
2047 CSR_WRITE(sc, WMREG_CTRL_EXT,
2048 reg & ~CTRL_EXT_I2C_ENA);
2049 wm_gmii_mediainit(sc, wmp->wmp_product);
2050 break;
2051 }
2052 break;
2053 default:
2054 if (wmp->wmp_flags & WMP_F_1000X)
2055 aprint_error_dev(sc->sc_dev,
2056 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2057 wm_gmii_mediainit(sc, wmp->wmp_product);
2058 }
2059 }
2060
2061 ifp = &sc->sc_ethercom.ec_if;
2062 xname = device_xname(sc->sc_dev);
2063 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2064 ifp->if_softc = sc;
2065 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2066 ifp->if_ioctl = wm_ioctl;
2067 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2068 ifp->if_start = wm_nq_start;
2069 else
2070 ifp->if_start = wm_start;
2071 ifp->if_watchdog = wm_watchdog;
2072 ifp->if_init = wm_init;
2073 ifp->if_stop = wm_stop;
2074 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2075 IFQ_SET_READY(&ifp->if_snd);
2076
2077 /* Check for jumbo frame */
2078 switch (sc->sc_type) {
2079 case WM_T_82573:
2080 /* XXX limited to 9234 if ASPM is disabled */
2081 wm_nvm_read(sc, EEPROM_INIT_3GIO_3, 1, &io3);
2082 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
2083 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2084 break;
2085 case WM_T_82571:
2086 case WM_T_82572:
2087 case WM_T_82574:
2088 case WM_T_82575:
2089 case WM_T_82576:
2090 case WM_T_82580:
2091 case WM_T_82580ER:
2092 case WM_T_I350:
2093 case WM_T_I354: /* XXXX ok? */
2094 case WM_T_I210:
2095 case WM_T_I211:
2096 case WM_T_80003:
2097 case WM_T_ICH9:
2098 case WM_T_ICH10:
2099 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2100 case WM_T_PCH_LPT:
2101 /* XXX limited to 9234 */
2102 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2103 break;
2104 case WM_T_PCH:
2105 /* XXX limited to 4096 */
2106 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2107 break;
2108 case WM_T_82542_2_0:
2109 case WM_T_82542_2_1:
2110 case WM_T_82583:
2111 case WM_T_ICH8:
2112 /* No support for jumbo frame */
2113 break;
2114 default:
2115 /* ETHER_MAX_LEN_JUMBO */
2116 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2117 break;
2118 }
2119
2120 /* If we're a i82543 or greater, we can support VLANs. */
2121 if (sc->sc_type >= WM_T_82543)
2122 sc->sc_ethercom.ec_capabilities |=
2123 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2124
2125 /*
2126 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2127 * on i82543 and later.
2128 */
2129 if (sc->sc_type >= WM_T_82543) {
2130 ifp->if_capabilities |=
2131 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2132 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2133 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2134 IFCAP_CSUM_TCPv6_Tx |
2135 IFCAP_CSUM_UDPv6_Tx;
2136 }
2137
2138 /*
2139 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2140 *
2141 * 82541GI (8086:1076) ... no
2142 * 82572EI (8086:10b9) ... yes
2143 */
2144 if (sc->sc_type >= WM_T_82571) {
2145 ifp->if_capabilities |=
2146 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2147 }
2148
2149 /*
2150 * If we're a i82544 or greater (except i82547), we can do
2151 * TCP segmentation offload.
2152 */
2153 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2154 ifp->if_capabilities |= IFCAP_TSOv4;
2155 }
2156
2157 if (sc->sc_type >= WM_T_82571) {
2158 ifp->if_capabilities |= IFCAP_TSOv6;
2159 }
2160
2161 #ifdef WM_MPSAFE
2162 sc->sc_txrx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2163 #else
2164 sc->sc_txrx_lock = NULL;
2165 #endif
2166
2167 /* Attach the interface. */
2168 if_attach(ifp);
2169 ether_ifattach(ifp, enaddr);
2170 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2171 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2172
2173 #ifdef WM_EVENT_COUNTERS
2174 /* Attach event counters. */
2175 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2176 NULL, xname, "txsstall");
2177 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2178 NULL, xname, "txdstall");
2179 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2180 NULL, xname, "txfifo_stall");
2181 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2182 NULL, xname, "txdw");
2183 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2184 NULL, xname, "txqe");
2185 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2186 NULL, xname, "rxintr");
2187 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2188 NULL, xname, "linkintr");
2189
2190 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2191 NULL, xname, "rxipsum");
2192 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2193 NULL, xname, "rxtusum");
2194 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2195 NULL, xname, "txipsum");
2196 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2197 NULL, xname, "txtusum");
2198 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2199 NULL, xname, "txtusum6");
2200
2201 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2202 NULL, xname, "txtso");
2203 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2204 NULL, xname, "txtso6");
2205 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2206 NULL, xname, "txtsopain");
2207
2208 for (i = 0; i < WM_NTXSEGS; i++) {
2209 snprintf(wm_txseg_evcnt_names[i],
2210 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2211 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2212 NULL, xname, wm_txseg_evcnt_names[i]);
2213 }
2214
2215 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2216 NULL, xname, "txdrop");
2217
2218 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2219 NULL, xname, "tu");
2220
2221 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2222 NULL, xname, "tx_xoff");
2223 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2224 NULL, xname, "tx_xon");
2225 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2226 NULL, xname, "rx_xoff");
2227 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2228 NULL, xname, "rx_xon");
2229 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2230 NULL, xname, "rx_macctl");
2231 #endif /* WM_EVENT_COUNTERS */
2232
2233 if (pmf_device_register(self, wm_suspend, wm_resume))
2234 pmf_class_network_register(self, ifp);
2235 else
2236 aprint_error_dev(self, "couldn't establish power handler\n");
2237
2238 return;
2239
2240 /*
2241 * Free any resources we've allocated during the failed attach
2242 * attempt. Do this in reverse order and fall through.
2243 */
2244 fail_5:
2245 for (i = 0; i < WM_NRXDESC; i++) {
2246 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2247 bus_dmamap_destroy(sc->sc_dmat,
2248 sc->sc_rxsoft[i].rxs_dmamap);
2249 }
2250 fail_4:
2251 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2252 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2253 bus_dmamap_destroy(sc->sc_dmat,
2254 sc->sc_txsoft[i].txs_dmamap);
2255 }
2256 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2257 fail_3:
2258 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2259 fail_2:
2260 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2261 sc->sc_cd_size);
2262 fail_1:
2263 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2264 fail_0:
2265 return;
2266 }
2267
2268 /* The detach function (ca_detach) */
2269 static int
2270 wm_detach(device_t self, int flags __unused)
2271 {
2272 struct wm_softc *sc = device_private(self);
2273 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2274 int i;
2275 #ifndef WM_MPSAFE
2276 int s;
2277
2278 s = splnet();
2279 #endif
2280 /* Stop the interface. Callouts are stopped in it. */
2281 wm_stop(ifp, 1);
2282
2283 #ifndef WM_MPSAFE
2284 splx(s);
2285 #endif
2286
2287 pmf_device_deregister(self);
2288
2289 /* Tell the firmware about the release */
2290 WM_LOCK(sc);
2291 wm_release_manageability(sc);
2292 wm_release_hw_control(sc);
2293 WM_UNLOCK(sc);
2294
2295 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2296
2297 /* Delete all remaining media. */
2298 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2299
2300 ether_ifdetach(ifp);
2301 if_detach(ifp);
2302
2303
2304 /* Unload RX dmamaps and free mbufs */
2305 WM_LOCK(sc);
2306 wm_rxdrain(sc);
2307 WM_UNLOCK(sc);
2308 /* Must unlock here */
2309
2310 /* Free dmamap. It's the same as the end of the wm_attach() function */
2311 for (i = 0; i < WM_NRXDESC; i++) {
2312 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2313 bus_dmamap_destroy(sc->sc_dmat,
2314 sc->sc_rxsoft[i].rxs_dmamap);
2315 }
2316 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2317 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2318 bus_dmamap_destroy(sc->sc_dmat,
2319 sc->sc_txsoft[i].txs_dmamap);
2320 }
2321 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2322 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2323 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2324 sc->sc_cd_size);
2325 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2326
2327 /* Disestablish the interrupt handler */
2328 if (sc->sc_ih != NULL) {
2329 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2330 sc->sc_ih = NULL;
2331 }
2332
2333 /* Unmap the registers */
2334 if (sc->sc_ss) {
2335 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2336 sc->sc_ss = 0;
2337 }
2338
2339 if (sc->sc_ios) {
2340 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2341 sc->sc_ios = 0;
2342 }
2343
2344 if (sc->sc_txrx_lock)
2345 mutex_obj_free(sc->sc_txrx_lock);
2346
2347 return 0;
2348 }
2349
2350 static bool
2351 wm_suspend(device_t self, const pmf_qual_t *qual)
2352 {
2353 struct wm_softc *sc = device_private(self);
2354
2355 wm_release_manageability(sc);
2356 wm_release_hw_control(sc);
2357 #ifdef WM_WOL
2358 wm_enable_wakeup(sc);
2359 #endif
2360
2361 return true;
2362 }
2363
2364 static bool
2365 wm_resume(device_t self, const pmf_qual_t *qual)
2366 {
2367 struct wm_softc *sc = device_private(self);
2368
2369 wm_init_manageability(sc);
2370
2371 return true;
2372 }
2373
2374 /*
2375 * wm_watchdog: [ifnet interface function]
2376 *
2377 * Watchdog timer handler.
2378 */
2379 static void
2380 wm_watchdog(struct ifnet *ifp)
2381 {
2382 struct wm_softc *sc = ifp->if_softc;
2383
2384 /*
2385 * Since we're using delayed interrupts, sweep up
2386 * before we report an error.
2387 */
2388 WM_LOCK(sc);
2389 wm_txintr(sc);
2390 WM_UNLOCK(sc);
2391
2392 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2393 #ifdef WM_DEBUG
2394 int i, j;
2395 struct wm_txsoft *txs;
2396 #endif
2397 log(LOG_ERR,
2398 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2399 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2400 sc->sc_txnext);
2401 ifp->if_oerrors++;
2402 #ifdef WM_DEBUG
2403 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2404 i = WM_NEXTTXS(sc, i)) {
2405 txs = &sc->sc_txsoft[i];
2406 printf("txs %d tx %d -> %d\n",
2407 i, txs->txs_firstdesc, txs->txs_lastdesc);
2408 for (j = txs->txs_firstdesc; ;
2409 j = WM_NEXTTX(sc, j)) {
2410 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2411 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2412 printf("\t %#08x%08x\n",
2413 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2414 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2415 if (j == txs->txs_lastdesc)
2416 break;
2417 }
2418 }
2419 #endif
2420 /* Reset the interface. */
2421 (void) wm_init(ifp);
2422 }
2423
2424 /* Try to get more packets going. */
2425 ifp->if_start(ifp);
2426 }
2427
2428 /*
2429 * wm_tick:
2430 *
2431 * One second timer, used to check link status, sweep up
2432 * completed transmit jobs, etc.
2433 */
2434 static void
2435 wm_tick(void *arg)
2436 {
2437 struct wm_softc *sc = arg;
2438 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2439 #ifndef WM_MPSAFE
2440 int s;
2441
2442 s = splnet();
2443 #endif
2444
2445 WM_LOCK(sc);
2446
2447 if (sc->sc_stopping)
2448 goto out;
2449
2450 if (sc->sc_type >= WM_T_82542_2_1) {
2451 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2452 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2453 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2454 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2455 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2456 }
2457
2458 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2459 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2460 + CSR_READ(sc, WMREG_CRCERRS)
2461 + CSR_READ(sc, WMREG_ALGNERRC)
2462 + CSR_READ(sc, WMREG_SYMERRC)
2463 + CSR_READ(sc, WMREG_RXERRC)
2464 + CSR_READ(sc, WMREG_SEC)
2465 + CSR_READ(sc, WMREG_CEXTERR)
2466 + CSR_READ(sc, WMREG_RLEC);
2467 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2468
2469 if (sc->sc_flags & WM_F_HAS_MII)
2470 mii_tick(&sc->sc_mii);
2471 else
2472 wm_tbi_check_link(sc);
2473
2474 out:
2475 WM_UNLOCK(sc);
2476 #ifndef WM_MPSAFE
2477 splx(s);
2478 #endif
2479
2480 if (!sc->sc_stopping)
2481 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2482 }
2483
2484 static int
2485 wm_ifflags_cb(struct ethercom *ec)
2486 {
2487 struct ifnet *ifp = &ec->ec_if;
2488 struct wm_softc *sc = ifp->if_softc;
2489 int change = ifp->if_flags ^ sc->sc_if_flags;
2490 int rc = 0;
2491
2492 WM_LOCK(sc);
2493
2494 if (change != 0)
2495 sc->sc_if_flags = ifp->if_flags;
2496
2497 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2498 rc = ENETRESET;
2499 goto out;
2500 }
2501
2502 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2503 wm_set_filter(sc);
2504
2505 wm_set_vlan(sc);
2506
2507 out:
2508 WM_UNLOCK(sc);
2509
2510 return rc;
2511 }
2512
2513 /*
2514 * wm_ioctl: [ifnet interface function]
2515 *
2516 * Handle control requests from the operator.
2517 */
2518 static int
2519 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2520 {
2521 struct wm_softc *sc = ifp->if_softc;
2522 struct ifreq *ifr = (struct ifreq *) data;
2523 struct ifaddr *ifa = (struct ifaddr *)data;
2524 struct sockaddr_dl *sdl;
2525 int s, error;
2526
2527 #ifndef WM_MPSAFE
2528 s = splnet();
2529 #endif
2530 WM_LOCK(sc);
2531
2532 switch (cmd) {
2533 case SIOCSIFMEDIA:
2534 case SIOCGIFMEDIA:
2535 /* Flow control requires full-duplex mode. */
2536 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2537 (ifr->ifr_media & IFM_FDX) == 0)
2538 ifr->ifr_media &= ~IFM_ETH_FMASK;
2539 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2540 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2541 /* We can do both TXPAUSE and RXPAUSE. */
2542 ifr->ifr_media |=
2543 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2544 }
2545 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2546 }
2547 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2548 break;
2549 case SIOCINITIFADDR:
2550 if (ifa->ifa_addr->sa_family == AF_LINK) {
2551 sdl = satosdl(ifp->if_dl->ifa_addr);
2552 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2553 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2554 /* unicast address is first multicast entry */
2555 wm_set_filter(sc);
2556 error = 0;
2557 break;
2558 }
2559 /*FALLTHROUGH*/
2560 default:
2561 WM_UNLOCK(sc);
2562 #ifdef WM_MPSAFE
2563 s = splnet();
2564 #endif
2565 /* It may call wm_start, so unlock here */
2566 error = ether_ioctl(ifp, cmd, data);
2567 #ifdef WM_MPSAFE
2568 splx(s);
2569 #endif
2570 WM_LOCK(sc);
2571
2572 if (error != ENETRESET)
2573 break;
2574
2575 error = 0;
2576
2577 if (cmd == SIOCSIFCAP) {
2578 WM_UNLOCK(sc);
2579 error = (*ifp->if_init)(ifp);
2580 WM_LOCK(sc);
2581 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2582 ;
2583 else if (ifp->if_flags & IFF_RUNNING) {
2584 /*
2585 * Multicast list has changed; set the hardware filter
2586 * accordingly.
2587 */
2588 wm_set_filter(sc);
2589 }
2590 break;
2591 }
2592
2593 WM_UNLOCK(sc);
2594
2595 /* Try to get more packets going. */
2596 ifp->if_start(ifp);
2597
2598 #ifndef WM_MPSAFE
2599 splx(s);
2600 #endif
2601 return error;
2602 }
2603
2604 /* MAC address related */
2605
2606 static int
2607 wm_check_alt_mac_addr(struct wm_softc *sc)
2608 {
2609 uint16_t myea[ETHER_ADDR_LEN / 2];
2610 uint16_t offset = EEPROM_OFF_MACADDR;
2611
2612 /* Try to read alternative MAC address pointer */
2613 if (wm_nvm_read(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2614 return -1;
2615
2616 /* Check pointer */
2617 if (offset == 0xffff)
2618 return -1;
2619
2620 /*
2621 * Check whether alternative MAC address is valid or not.
2622 * Some cards have non 0xffff pointer but those don't use
2623 * alternative MAC address in reality.
2624 *
2625 * Check whether the broadcast bit is set or not.
2626 */
2627 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2628 if (((myea[0] & 0xff) & 0x01) == 0)
2629 return 0; /* found! */
2630
2631 /* not found */
2632 return -1;
2633 }
2634
2635 static int
2636 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2637 {
2638 uint16_t myea[ETHER_ADDR_LEN / 2];
2639 uint16_t offset = EEPROM_OFF_MACADDR;
2640 int do_invert = 0;
2641
2642 switch (sc->sc_type) {
2643 case WM_T_82580:
2644 case WM_T_82580ER:
2645 case WM_T_I350:
2646 case WM_T_I354:
2647 switch (sc->sc_funcid) {
2648 case 0:
2649 /* default value (== EEPROM_OFF_MACADDR) */
2650 break;
2651 case 1:
2652 offset = EEPROM_OFF_LAN1;
2653 break;
2654 case 2:
2655 offset = EEPROM_OFF_LAN2;
2656 break;
2657 case 3:
2658 offset = EEPROM_OFF_LAN3;
2659 break;
2660 default:
2661 goto bad;
2662 /* NOTREACHED */
2663 break;
2664 }
2665 break;
2666 case WM_T_82571:
2667 case WM_T_82575:
2668 case WM_T_82576:
2669 case WM_T_80003:
2670 case WM_T_I210:
2671 case WM_T_I211:
2672 if (wm_check_alt_mac_addr(sc) != 0) {
2673 /* reset the offset to LAN0 */
2674 offset = EEPROM_OFF_MACADDR;
2675 if ((sc->sc_funcid & 0x01) == 1)
2676 do_invert = 1;
2677 goto do_read;
2678 }
2679 switch (sc->sc_funcid) {
2680 case 0:
2681 /*
2682 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
2683 * itself.
2684 */
2685 break;
2686 case 1:
2687 offset += EEPROM_OFF_MACADDR_LAN1;
2688 break;
2689 case 2:
2690 offset += EEPROM_OFF_MACADDR_LAN2;
2691 break;
2692 case 3:
2693 offset += EEPROM_OFF_MACADDR_LAN3;
2694 break;
2695 default:
2696 goto bad;
2697 /* NOTREACHED */
2698 break;
2699 }
2700 break;
2701 default:
2702 if ((sc->sc_funcid & 0x01) == 1)
2703 do_invert = 1;
2704 break;
2705 }
2706
2707 do_read:
2708 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2709 myea) != 0) {
2710 goto bad;
2711 }
2712
2713 enaddr[0] = myea[0] & 0xff;
2714 enaddr[1] = myea[0] >> 8;
2715 enaddr[2] = myea[1] & 0xff;
2716 enaddr[3] = myea[1] >> 8;
2717 enaddr[4] = myea[2] & 0xff;
2718 enaddr[5] = myea[2] >> 8;
2719
2720 /*
2721 * Toggle the LSB of the MAC address on the second port
2722 * of some dual port cards.
2723 */
2724 if (do_invert != 0)
2725 enaddr[5] ^= 1;
2726
2727 return 0;
2728
2729 bad:
2730 return -1;
2731 }
2732
2733 /*
2734 * wm_set_ral:
2735 *
2736 * Set an entery in the receive address list.
2737 */
2738 static void
2739 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2740 {
2741 uint32_t ral_lo, ral_hi;
2742
2743 if (enaddr != NULL) {
2744 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2745 (enaddr[3] << 24);
2746 ral_hi = enaddr[4] | (enaddr[5] << 8);
2747 ral_hi |= RAL_AV;
2748 } else {
2749 ral_lo = 0;
2750 ral_hi = 0;
2751 }
2752
2753 if (sc->sc_type >= WM_T_82544) {
2754 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2755 ral_lo);
2756 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2757 ral_hi);
2758 } else {
2759 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2760 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2761 }
2762 }
2763
2764 /*
2765 * wm_mchash:
2766 *
2767 * Compute the hash of the multicast address for the 4096-bit
2768 * multicast filter.
2769 */
2770 static uint32_t
2771 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2772 {
2773 static const int lo_shift[4] = { 4, 3, 2, 0 };
2774 static const int hi_shift[4] = { 4, 5, 6, 8 };
2775 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2776 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2777 uint32_t hash;
2778
2779 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2780 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2781 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2782 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2783 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2784 return (hash & 0x3ff);
2785 }
2786 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2787 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2788
2789 return (hash & 0xfff);
2790 }
2791
2792 /*
2793 * wm_set_filter:
2794 *
2795 * Set up the receive filter.
2796 */
2797 static void
2798 wm_set_filter(struct wm_softc *sc)
2799 {
2800 struct ethercom *ec = &sc->sc_ethercom;
2801 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2802 struct ether_multi *enm;
2803 struct ether_multistep step;
2804 bus_addr_t mta_reg;
2805 uint32_t hash, reg, bit;
2806 int i, size;
2807
2808 if (sc->sc_type >= WM_T_82544)
2809 mta_reg = WMREG_CORDOVA_MTA;
2810 else
2811 mta_reg = WMREG_MTA;
2812
2813 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2814
2815 if (ifp->if_flags & IFF_BROADCAST)
2816 sc->sc_rctl |= RCTL_BAM;
2817 if (ifp->if_flags & IFF_PROMISC) {
2818 sc->sc_rctl |= RCTL_UPE;
2819 goto allmulti;
2820 }
2821
2822 /*
2823 * Set the station address in the first RAL slot, and
2824 * clear the remaining slots.
2825 */
2826 if (sc->sc_type == WM_T_ICH8)
2827 size = WM_RAL_TABSIZE_ICH8 -1;
2828 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2829 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2830 || (sc->sc_type == WM_T_PCH_LPT))
2831 size = WM_RAL_TABSIZE_ICH8;
2832 else if (sc->sc_type == WM_T_82575)
2833 size = WM_RAL_TABSIZE_82575;
2834 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2835 size = WM_RAL_TABSIZE_82576;
2836 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2837 size = WM_RAL_TABSIZE_I350;
2838 else
2839 size = WM_RAL_TABSIZE;
2840 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2841 for (i = 1; i < size; i++)
2842 wm_set_ral(sc, NULL, i);
2843
2844 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2845 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2846 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2847 size = WM_ICH8_MC_TABSIZE;
2848 else
2849 size = WM_MC_TABSIZE;
2850 /* Clear out the multicast table. */
2851 for (i = 0; i < size; i++)
2852 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2853
2854 ETHER_FIRST_MULTI(step, ec, enm);
2855 while (enm != NULL) {
2856 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2857 /*
2858 * We must listen to a range of multicast addresses.
2859 * For now, just accept all multicasts, rather than
2860 * trying to set only those filter bits needed to match
2861 * the range. (At this time, the only use of address
2862 * ranges is for IP multicast routing, for which the
2863 * range is big enough to require all bits set.)
2864 */
2865 goto allmulti;
2866 }
2867
2868 hash = wm_mchash(sc, enm->enm_addrlo);
2869
2870 reg = (hash >> 5);
2871 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2872 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2873 || (sc->sc_type == WM_T_PCH2)
2874 || (sc->sc_type == WM_T_PCH_LPT))
2875 reg &= 0x1f;
2876 else
2877 reg &= 0x7f;
2878 bit = hash & 0x1f;
2879
2880 hash = CSR_READ(sc, mta_reg + (reg << 2));
2881 hash |= 1U << bit;
2882
2883 /* XXX Hardware bug?? */
2884 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2885 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2886 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2887 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2888 } else
2889 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2890
2891 ETHER_NEXT_MULTI(step, enm);
2892 }
2893
2894 ifp->if_flags &= ~IFF_ALLMULTI;
2895 goto setit;
2896
2897 allmulti:
2898 ifp->if_flags |= IFF_ALLMULTI;
2899 sc->sc_rctl |= RCTL_MPE;
2900
2901 setit:
2902 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2903 }
2904
2905 /* Reset and init related */
2906
2907 static void
2908 wm_set_vlan(struct wm_softc *sc)
2909 {
2910 /* Deal with VLAN enables. */
2911 if (VLAN_ATTACHED(&sc->sc_ethercom))
2912 sc->sc_ctrl |= CTRL_VME;
2913 else
2914 sc->sc_ctrl &= ~CTRL_VME;
2915
2916 /* Write the control registers. */
2917 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2918 }
2919
2920 static void
2921 wm_set_pcie_completion_timeout(struct wm_softc *sc)
2922 {
2923 uint32_t gcr;
2924 pcireg_t ctrl2;
2925
2926 gcr = CSR_READ(sc, WMREG_GCR);
2927
2928 /* Only take action if timeout value is defaulted to 0 */
2929 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
2930 goto out;
2931
2932 if ((gcr & GCR_CAP_VER2) == 0) {
2933 gcr |= GCR_CMPL_TMOUT_10MS;
2934 goto out;
2935 }
2936
2937 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
2938 sc->sc_pcixe_capoff + PCIE_DCSR2);
2939 ctrl2 |= WM_PCIE_DCSR2_16MS;
2940 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
2941 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
2942
2943 out:
2944 /* Disable completion timeout resend */
2945 gcr &= ~GCR_CMPL_TMOUT_RESEND;
2946
2947 CSR_WRITE(sc, WMREG_GCR, gcr);
2948 }
2949
2950 void
2951 wm_get_auto_rd_done(struct wm_softc *sc)
2952 {
2953 int i;
2954
2955 /* wait for eeprom to reload */
2956 switch (sc->sc_type) {
2957 case WM_T_82571:
2958 case WM_T_82572:
2959 case WM_T_82573:
2960 case WM_T_82574:
2961 case WM_T_82583:
2962 case WM_T_82575:
2963 case WM_T_82576:
2964 case WM_T_82580:
2965 case WM_T_82580ER:
2966 case WM_T_I350:
2967 case WM_T_I354:
2968 case WM_T_I210:
2969 case WM_T_I211:
2970 case WM_T_80003:
2971 case WM_T_ICH8:
2972 case WM_T_ICH9:
2973 for (i = 0; i < 10; i++) {
2974 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
2975 break;
2976 delay(1000);
2977 }
2978 if (i == 10) {
2979 log(LOG_ERR, "%s: auto read from eeprom failed to "
2980 "complete\n", device_xname(sc->sc_dev));
2981 }
2982 break;
2983 default:
2984 break;
2985 }
2986 }
2987
2988 void
2989 wm_lan_init_done(struct wm_softc *sc)
2990 {
2991 uint32_t reg = 0;
2992 int i;
2993
2994 /* wait for eeprom to reload */
2995 switch (sc->sc_type) {
2996 case WM_T_ICH10:
2997 case WM_T_PCH:
2998 case WM_T_PCH2:
2999 case WM_T_PCH_LPT:
3000 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3001 reg = CSR_READ(sc, WMREG_STATUS);
3002 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3003 break;
3004 delay(100);
3005 }
3006 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3007 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3008 "complete\n", device_xname(sc->sc_dev), __func__);
3009 }
3010 break;
3011 default:
3012 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3013 __func__);
3014 break;
3015 }
3016
3017 reg &= ~STATUS_LAN_INIT_DONE;
3018 CSR_WRITE(sc, WMREG_STATUS, reg);
3019 }
3020
3021 void
3022 wm_get_cfg_done(struct wm_softc *sc)
3023 {
3024 int mask;
3025 uint32_t reg;
3026 int i;
3027
3028 /* wait for eeprom to reload */
3029 switch (sc->sc_type) {
3030 case WM_T_82542_2_0:
3031 case WM_T_82542_2_1:
3032 /* null */
3033 break;
3034 case WM_T_82543:
3035 case WM_T_82544:
3036 case WM_T_82540:
3037 case WM_T_82545:
3038 case WM_T_82545_3:
3039 case WM_T_82546:
3040 case WM_T_82546_3:
3041 case WM_T_82541:
3042 case WM_T_82541_2:
3043 case WM_T_82547:
3044 case WM_T_82547_2:
3045 case WM_T_82573:
3046 case WM_T_82574:
3047 case WM_T_82583:
3048 /* generic */
3049 delay(10*1000);
3050 break;
3051 case WM_T_80003:
3052 case WM_T_82571:
3053 case WM_T_82572:
3054 case WM_T_82575:
3055 case WM_T_82576:
3056 case WM_T_82580:
3057 case WM_T_82580ER:
3058 case WM_T_I350:
3059 case WM_T_I354:
3060 case WM_T_I210:
3061 case WM_T_I211:
3062 if (sc->sc_type == WM_T_82571) {
3063 /* Only 82571 shares port 0 */
3064 mask = EEMNGCTL_CFGDONE_0;
3065 } else
3066 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3067 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3068 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3069 break;
3070 delay(1000);
3071 }
3072 if (i >= WM_PHY_CFG_TIMEOUT) {
3073 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3074 device_xname(sc->sc_dev), __func__));
3075 }
3076 break;
3077 case WM_T_ICH8:
3078 case WM_T_ICH9:
3079 case WM_T_ICH10:
3080 case WM_T_PCH:
3081 case WM_T_PCH2:
3082 case WM_T_PCH_LPT:
3083 delay(10*1000);
3084 if (sc->sc_type >= WM_T_ICH10)
3085 wm_lan_init_done(sc);
3086 else
3087 wm_get_auto_rd_done(sc);
3088
3089 reg = CSR_READ(sc, WMREG_STATUS);
3090 if ((reg & STATUS_PHYRA) != 0)
3091 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3092 break;
3093 default:
3094 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3095 __func__);
3096 break;
3097 }
3098 }
3099
3100 /*
3101 * wm_reset:
3102 *
3103 * Reset the i82542 chip.
3104 */
3105 static void
3106 wm_reset(struct wm_softc *sc)
3107 {
3108 int phy_reset = 0;
3109 int error = 0;
3110 uint32_t reg, mask;
3111
3112 /*
3113 * Allocate on-chip memory according to the MTU size.
3114 * The Packet Buffer Allocation register must be written
3115 * before the chip is reset.
3116 */
3117 switch (sc->sc_type) {
3118 case WM_T_82547:
3119 case WM_T_82547_2:
3120 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3121 PBA_22K : PBA_30K;
3122 sc->sc_txfifo_head = 0;
3123 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3124 sc->sc_txfifo_size =
3125 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3126 sc->sc_txfifo_stall = 0;
3127 break;
3128 case WM_T_82571:
3129 case WM_T_82572:
3130 case WM_T_82575: /* XXX need special handing for jumbo frames */
3131 case WM_T_I350:
3132 case WM_T_I354:
3133 case WM_T_80003:
3134 sc->sc_pba = PBA_32K;
3135 break;
3136 case WM_T_82580:
3137 case WM_T_82580ER:
3138 sc->sc_pba = PBA_35K;
3139 break;
3140 case WM_T_I210:
3141 case WM_T_I211:
3142 sc->sc_pba = PBA_34K;
3143 break;
3144 case WM_T_82576:
3145 sc->sc_pba = PBA_64K;
3146 break;
3147 case WM_T_82573:
3148 sc->sc_pba = PBA_12K;
3149 break;
3150 case WM_T_82574:
3151 case WM_T_82583:
3152 sc->sc_pba = PBA_20K;
3153 break;
3154 case WM_T_ICH8:
3155 sc->sc_pba = PBA_8K;
3156 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3157 break;
3158 case WM_T_ICH9:
3159 case WM_T_ICH10:
3160 sc->sc_pba = PBA_10K;
3161 break;
3162 case WM_T_PCH:
3163 case WM_T_PCH2:
3164 case WM_T_PCH_LPT:
3165 sc->sc_pba = PBA_26K;
3166 break;
3167 default:
3168 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3169 PBA_40K : PBA_48K;
3170 break;
3171 }
3172 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3173
3174 /* Prevent the PCI-E bus from sticking */
3175 if (sc->sc_flags & WM_F_PCIE) {
3176 int timeout = 800;
3177
3178 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3179 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3180
3181 while (timeout--) {
3182 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3183 == 0)
3184 break;
3185 delay(100);
3186 }
3187 }
3188
3189 /* Set the completion timeout for interface */
3190 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3191 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3192 wm_set_pcie_completion_timeout(sc);
3193
3194 /* Clear interrupt */
3195 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3196
3197 /* Stop the transmit and receive processes. */
3198 CSR_WRITE(sc, WMREG_RCTL, 0);
3199 sc->sc_rctl &= ~RCTL_EN;
3200 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3201 CSR_WRITE_FLUSH(sc);
3202
3203 /* XXX set_tbi_sbp_82543() */
3204
3205 delay(10*1000);
3206
3207 /* Must acquire the MDIO ownership before MAC reset */
3208 switch (sc->sc_type) {
3209 case WM_T_82573:
3210 case WM_T_82574:
3211 case WM_T_82583:
3212 error = wm_get_hw_semaphore_82573(sc);
3213 break;
3214 default:
3215 break;
3216 }
3217
3218 /*
3219 * 82541 Errata 29? & 82547 Errata 28?
3220 * See also the description about PHY_RST bit in CTRL register
3221 * in 8254x_GBe_SDM.pdf.
3222 */
3223 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3224 CSR_WRITE(sc, WMREG_CTRL,
3225 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3226 CSR_WRITE_FLUSH(sc);
3227 delay(5000);
3228 }
3229
3230 switch (sc->sc_type) {
3231 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3232 case WM_T_82541:
3233 case WM_T_82541_2:
3234 case WM_T_82547:
3235 case WM_T_82547_2:
3236 /*
3237 * On some chipsets, a reset through a memory-mapped write
3238 * cycle can cause the chip to reset before completing the
3239 * write cycle. This causes major headache that can be
3240 * avoided by issuing the reset via indirect register writes
3241 * through I/O space.
3242 *
3243 * So, if we successfully mapped the I/O BAR at attach time,
3244 * use that. Otherwise, try our luck with a memory-mapped
3245 * reset.
3246 */
3247 if (sc->sc_flags & WM_F_IOH_VALID)
3248 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3249 else
3250 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3251 break;
3252 case WM_T_82545_3:
3253 case WM_T_82546_3:
3254 /* Use the shadow control register on these chips. */
3255 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3256 break;
3257 case WM_T_80003:
3258 mask = swfwphysem[sc->sc_funcid];
3259 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3260 wm_get_swfw_semaphore(sc, mask);
3261 CSR_WRITE(sc, WMREG_CTRL, reg);
3262 wm_put_swfw_semaphore(sc, mask);
3263 break;
3264 case WM_T_ICH8:
3265 case WM_T_ICH9:
3266 case WM_T_ICH10:
3267 case WM_T_PCH:
3268 case WM_T_PCH2:
3269 case WM_T_PCH_LPT:
3270 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3271 if (wm_check_reset_block(sc) == 0) {
3272 /*
3273 * Gate automatic PHY configuration by hardware on
3274 * non-managed 82579
3275 */
3276 if ((sc->sc_type == WM_T_PCH2)
3277 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3278 != 0))
3279 wm_gate_hw_phy_config_ich8lan(sc, 1);
3280
3281
3282 reg |= CTRL_PHY_RESET;
3283 phy_reset = 1;
3284 }
3285 wm_get_swfwhw_semaphore(sc);
3286 CSR_WRITE(sc, WMREG_CTRL, reg);
3287 /* Don't insert a completion barrier when reset */
3288 delay(20*1000);
3289 wm_put_swfwhw_semaphore(sc);
3290 break;
3291 case WM_T_82542_2_0:
3292 case WM_T_82542_2_1:
3293 case WM_T_82543:
3294 case WM_T_82540:
3295 case WM_T_82545:
3296 case WM_T_82546:
3297 case WM_T_82571:
3298 case WM_T_82572:
3299 case WM_T_82573:
3300 case WM_T_82574:
3301 case WM_T_82575:
3302 case WM_T_82576:
3303 case WM_T_82580:
3304 case WM_T_82580ER:
3305 case WM_T_82583:
3306 case WM_T_I350:
3307 case WM_T_I354:
3308 case WM_T_I210:
3309 case WM_T_I211:
3310 default:
3311 /* Everything else can safely use the documented method. */
3312 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3313 break;
3314 }
3315
3316 /* Must release the MDIO ownership after MAC reset */
3317 switch (sc->sc_type) {
3318 case WM_T_82573:
3319 case WM_T_82574:
3320 case WM_T_82583:
3321 if (error == 0)
3322 wm_put_hw_semaphore_82573(sc);
3323 break;
3324 default:
3325 break;
3326 }
3327
3328 if (phy_reset != 0)
3329 wm_get_cfg_done(sc);
3330
3331 /* reload EEPROM */
3332 switch (sc->sc_type) {
3333 case WM_T_82542_2_0:
3334 case WM_T_82542_2_1:
3335 case WM_T_82543:
3336 case WM_T_82544:
3337 delay(10);
3338 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3339 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3340 CSR_WRITE_FLUSH(sc);
3341 delay(2000);
3342 break;
3343 case WM_T_82540:
3344 case WM_T_82545:
3345 case WM_T_82545_3:
3346 case WM_T_82546:
3347 case WM_T_82546_3:
3348 delay(5*1000);
3349 /* XXX Disable HW ARPs on ASF enabled adapters */
3350 break;
3351 case WM_T_82541:
3352 case WM_T_82541_2:
3353 case WM_T_82547:
3354 case WM_T_82547_2:
3355 delay(20000);
3356 /* XXX Disable HW ARPs on ASF enabled adapters */
3357 break;
3358 case WM_T_82571:
3359 case WM_T_82572:
3360 case WM_T_82573:
3361 case WM_T_82574:
3362 case WM_T_82583:
3363 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3364 delay(10);
3365 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3366 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3367 CSR_WRITE_FLUSH(sc);
3368 }
3369 /* check EECD_EE_AUTORD */
3370 wm_get_auto_rd_done(sc);
3371 /*
3372 * Phy configuration from NVM just starts after EECD_AUTO_RD
3373 * is set.
3374 */
3375 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3376 || (sc->sc_type == WM_T_82583))
3377 delay(25*1000);
3378 break;
3379 case WM_T_82575:
3380 case WM_T_82576:
3381 case WM_T_82580:
3382 case WM_T_82580ER:
3383 case WM_T_I350:
3384 case WM_T_I354:
3385 case WM_T_I210:
3386 case WM_T_I211:
3387 case WM_T_80003:
3388 /* check EECD_EE_AUTORD */
3389 wm_get_auto_rd_done(sc);
3390 break;
3391 case WM_T_ICH8:
3392 case WM_T_ICH9:
3393 case WM_T_ICH10:
3394 case WM_T_PCH:
3395 case WM_T_PCH2:
3396 case WM_T_PCH_LPT:
3397 break;
3398 default:
3399 panic("%s: unknown type\n", __func__);
3400 }
3401
3402 /* Check whether EEPROM is present or not */
3403 switch (sc->sc_type) {
3404 case WM_T_82575:
3405 case WM_T_82576:
3406 #if 0 /* XXX */
3407 case WM_T_82580:
3408 case WM_T_82580ER:
3409 #endif
3410 case WM_T_I350:
3411 case WM_T_I354:
3412 case WM_T_ICH8:
3413 case WM_T_ICH9:
3414 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3415 /* Not found */
3416 sc->sc_flags |= WM_F_EEPROM_INVALID;
3417 if ((sc->sc_type == WM_T_82575)
3418 || (sc->sc_type == WM_T_82576)
3419 || (sc->sc_type == WM_T_82580)
3420 || (sc->sc_type == WM_T_82580ER)
3421 || (sc->sc_type == WM_T_I350)
3422 || (sc->sc_type == WM_T_I354))
3423 wm_reset_init_script_82575(sc);
3424 }
3425 break;
3426 default:
3427 break;
3428 }
3429
3430 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3431 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3432 /* clear global device reset status bit */
3433 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3434 }
3435
3436 /* Clear any pending interrupt events. */
3437 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3438 reg = CSR_READ(sc, WMREG_ICR);
3439
3440 /* reload sc_ctrl */
3441 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3442
3443 if (sc->sc_type == WM_T_I350)
3444 wm_set_eee_i350(sc);
3445
3446 /* dummy read from WUC */
3447 if (sc->sc_type == WM_T_PCH)
3448 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3449 /*
3450 * For PCH, this write will make sure that any noise will be detected
3451 * as a CRC error and be dropped rather than show up as a bad packet
3452 * to the DMA engine
3453 */
3454 if (sc->sc_type == WM_T_PCH)
3455 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3456
3457 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3458 CSR_WRITE(sc, WMREG_WUC, 0);
3459
3460 /* XXX need special handling for 82580 */
3461 }
3462
3463 /*
3464 * wm_add_rxbuf:
3465 *
3466 * Add a receive buffer to the indiciated descriptor.
3467 */
3468 static int
3469 wm_add_rxbuf(struct wm_softc *sc, int idx)
3470 {
3471 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3472 struct mbuf *m;
3473 int error;
3474
3475 KASSERT(WM_LOCKED(sc));
3476
3477 MGETHDR(m, M_DONTWAIT, MT_DATA);
3478 if (m == NULL)
3479 return ENOBUFS;
3480
3481 MCLGET(m, M_DONTWAIT);
3482 if ((m->m_flags & M_EXT) == 0) {
3483 m_freem(m);
3484 return ENOBUFS;
3485 }
3486
3487 if (rxs->rxs_mbuf != NULL)
3488 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3489
3490 rxs->rxs_mbuf = m;
3491
3492 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3493 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3494 BUS_DMA_READ|BUS_DMA_NOWAIT);
3495 if (error) {
3496 /* XXX XXX XXX */
3497 aprint_error_dev(sc->sc_dev,
3498 "unable to load rx DMA map %d, error = %d\n",
3499 idx, error);
3500 panic("wm_add_rxbuf");
3501 }
3502
3503 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3504 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3505
3506 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3507 if ((sc->sc_rctl & RCTL_EN) != 0)
3508 WM_INIT_RXDESC(sc, idx);
3509 } else
3510 WM_INIT_RXDESC(sc, idx);
3511
3512 return 0;
3513 }
3514
3515 /*
3516 * wm_rxdrain:
3517 *
3518 * Drain the receive queue.
3519 */
3520 static void
3521 wm_rxdrain(struct wm_softc *sc)
3522 {
3523 struct wm_rxsoft *rxs;
3524 int i;
3525
3526 KASSERT(WM_LOCKED(sc));
3527
3528 for (i = 0; i < WM_NRXDESC; i++) {
3529 rxs = &sc->sc_rxsoft[i];
3530 if (rxs->rxs_mbuf != NULL) {
3531 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3532 m_freem(rxs->rxs_mbuf);
3533 rxs->rxs_mbuf = NULL;
3534 }
3535 }
3536 }
3537
3538 /*
3539 * wm_init: [ifnet interface function]
3540 *
3541 * Initialize the interface.
3542 */
3543 static int
3544 wm_init(struct ifnet *ifp)
3545 {
3546 struct wm_softc *sc = ifp->if_softc;
3547 int ret;
3548
3549 WM_LOCK(sc);
3550 ret = wm_init_locked(ifp);
3551 WM_UNLOCK(sc);
3552
3553 return ret;
3554 }
3555
3556 static int
3557 wm_init_locked(struct ifnet *ifp)
3558 {
3559 struct wm_softc *sc = ifp->if_softc;
3560 struct wm_rxsoft *rxs;
3561 int i, j, trynum, error = 0;
3562 uint32_t reg;
3563
3564 KASSERT(WM_LOCKED(sc));
3565 /*
3566 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3567 * There is a small but measurable benefit to avoiding the adjusment
3568 * of the descriptor so that the headers are aligned, for normal mtu,
3569 * on such platforms. One possibility is that the DMA itself is
3570 * slightly more efficient if the front of the entire packet (instead
3571 * of the front of the headers) is aligned.
3572 *
3573 * Note we must always set align_tweak to 0 if we are using
3574 * jumbo frames.
3575 */
3576 #ifdef __NO_STRICT_ALIGNMENT
3577 sc->sc_align_tweak = 0;
3578 #else
3579 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3580 sc->sc_align_tweak = 0;
3581 else
3582 sc->sc_align_tweak = 2;
3583 #endif /* __NO_STRICT_ALIGNMENT */
3584
3585 /* Cancel any pending I/O. */
3586 wm_stop_locked(ifp, 0);
3587
3588 /* update statistics before reset */
3589 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3590 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3591
3592 /* Reset the chip to a known state. */
3593 wm_reset(sc);
3594
3595 switch (sc->sc_type) {
3596 case WM_T_82571:
3597 case WM_T_82572:
3598 case WM_T_82573:
3599 case WM_T_82574:
3600 case WM_T_82583:
3601 case WM_T_80003:
3602 case WM_T_ICH8:
3603 case WM_T_ICH9:
3604 case WM_T_ICH10:
3605 case WM_T_PCH:
3606 case WM_T_PCH2:
3607 case WM_T_PCH_LPT:
3608 if (wm_check_mng_mode(sc) != 0)
3609 wm_get_hw_control(sc);
3610 break;
3611 default:
3612 break;
3613 }
3614
3615 /* Reset the PHY. */
3616 if (sc->sc_flags & WM_F_HAS_MII)
3617 wm_gmii_reset(sc);
3618
3619 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3620 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3621 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3622 || (sc->sc_type == WM_T_PCH_LPT))
3623 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3624
3625 /* Initialize the transmit descriptor ring. */
3626 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3627 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3628 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3629 sc->sc_txfree = WM_NTXDESC(sc);
3630 sc->sc_txnext = 0;
3631
3632 if (sc->sc_type < WM_T_82543) {
3633 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3634 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3635 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3636 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3637 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3638 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3639 } else {
3640 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3641 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3642 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3643 CSR_WRITE(sc, WMREG_TDH, 0);
3644 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3645 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3646
3647 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3648 /*
3649 * Don't write TDT before TCTL.EN is set.
3650 * See the document.
3651 */
3652 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3653 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3654 | TXDCTL_WTHRESH(0));
3655 else {
3656 CSR_WRITE(sc, WMREG_TDT, 0);
3657 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3658 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3659 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3660 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3661 }
3662 }
3663 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3664 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3665
3666 /* Initialize the transmit job descriptors. */
3667 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3668 sc->sc_txsoft[i].txs_mbuf = NULL;
3669 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3670 sc->sc_txsnext = 0;
3671 sc->sc_txsdirty = 0;
3672
3673 /*
3674 * Initialize the receive descriptor and receive job
3675 * descriptor rings.
3676 */
3677 if (sc->sc_type < WM_T_82543) {
3678 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3679 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3680 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3681 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3682 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3683 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3684
3685 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3686 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3687 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3688 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3689 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3690 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3691 } else {
3692 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3693 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3694 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3695 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3696 CSR_WRITE(sc, WMREG_EITR(0), 450);
3697 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3698 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3699 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3700 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3701 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3702 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3703 | RXDCTL_WTHRESH(1));
3704 } else {
3705 CSR_WRITE(sc, WMREG_RDH, 0);
3706 CSR_WRITE(sc, WMREG_RDT, 0);
3707 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3708 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3709 }
3710 }
3711 for (i = 0; i < WM_NRXDESC; i++) {
3712 rxs = &sc->sc_rxsoft[i];
3713 if (rxs->rxs_mbuf == NULL) {
3714 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3715 log(LOG_ERR, "%s: unable to allocate or map "
3716 "rx buffer %d, error = %d\n",
3717 device_xname(sc->sc_dev), i, error);
3718 /*
3719 * XXX Should attempt to run with fewer receive
3720 * XXX buffers instead of just failing.
3721 */
3722 wm_rxdrain(sc);
3723 goto out;
3724 }
3725 } else {
3726 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3727 WM_INIT_RXDESC(sc, i);
3728 /*
3729 * For 82575 and newer device, the RX descriptors
3730 * must be initialized after the setting of RCTL.EN in
3731 * wm_set_filter()
3732 */
3733 }
3734 }
3735 sc->sc_rxptr = 0;
3736 sc->sc_rxdiscard = 0;
3737 WM_RXCHAIN_RESET(sc);
3738
3739 /*
3740 * Clear out the VLAN table -- we don't use it (yet).
3741 */
3742 CSR_WRITE(sc, WMREG_VET, 0);
3743 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3744 trynum = 10; /* Due to hw errata */
3745 else
3746 trynum = 1;
3747 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3748 for (j = 0; j < trynum; j++)
3749 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3750
3751 /*
3752 * Set up flow-control parameters.
3753 *
3754 * XXX Values could probably stand some tuning.
3755 */
3756 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3757 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3758 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3759 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3760 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3761 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3762 }
3763
3764 sc->sc_fcrtl = FCRTL_DFLT;
3765 if (sc->sc_type < WM_T_82543) {
3766 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3767 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3768 } else {
3769 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3770 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3771 }
3772
3773 if (sc->sc_type == WM_T_80003)
3774 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3775 else
3776 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3777
3778 /* Writes the control register. */
3779 wm_set_vlan(sc);
3780
3781 if (sc->sc_flags & WM_F_HAS_MII) {
3782 int val;
3783
3784 switch (sc->sc_type) {
3785 case WM_T_80003:
3786 case WM_T_ICH8:
3787 case WM_T_ICH9:
3788 case WM_T_ICH10:
3789 case WM_T_PCH:
3790 case WM_T_PCH2:
3791 case WM_T_PCH_LPT:
3792 /*
3793 * Set the mac to wait the maximum time between each
3794 * iteration and increase the max iterations when
3795 * polling the phy; this fixes erroneous timeouts at
3796 * 10Mbps.
3797 */
3798 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3799 0xFFFF);
3800 val = wm_kmrn_readreg(sc,
3801 KUMCTRLSTA_OFFSET_INB_PARAM);
3802 val |= 0x3F;
3803 wm_kmrn_writereg(sc,
3804 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3805 break;
3806 default:
3807 break;
3808 }
3809
3810 if (sc->sc_type == WM_T_80003) {
3811 val = CSR_READ(sc, WMREG_CTRL_EXT);
3812 val &= ~CTRL_EXT_LINK_MODE_MASK;
3813 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3814
3815 /* Bypass RX and TX FIFO's */
3816 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3817 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3818 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3819 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3820 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3821 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3822 }
3823 }
3824 #if 0
3825 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3826 #endif
3827
3828 /* Set up checksum offload parameters. */
3829 reg = CSR_READ(sc, WMREG_RXCSUM);
3830 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3831 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3832 reg |= RXCSUM_IPOFL;
3833 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3834 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3835 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3836 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3837 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3838
3839 /* Reset TBI's RXCFG count */
3840 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3841
3842 /* Set up the interrupt registers. */
3843 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3844 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3845 ICR_RXO | ICR_RXT0;
3846 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3847 sc->sc_icr |= ICR_RXCFG;
3848 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3849
3850 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3851 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3852 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3853 reg = CSR_READ(sc, WMREG_KABGTXD);
3854 reg |= KABGTXD_BGSQLBIAS;
3855 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3856 }
3857
3858 /* Set up the inter-packet gap. */
3859 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3860
3861 if (sc->sc_type >= WM_T_82543) {
3862 /*
3863 * Set up the interrupt throttling register (units of 256ns)
3864 * Note that a footnote in Intel's documentation says this
3865 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3866 * or 10Mbit mode. Empirically, it appears to be the case
3867 * that that is also true for the 1024ns units of the other
3868 * interrupt-related timer registers -- so, really, we ought
3869 * to divide this value by 4 when the link speed is low.
3870 *
3871 * XXX implement this division at link speed change!
3872 */
3873
3874 /*
3875 * For N interrupts/sec, set this value to:
3876 * 1000000000 / (N * 256). Note that we set the
3877 * absolute and packet timer values to this value
3878 * divided by 4 to get "simple timer" behavior.
3879 */
3880
3881 sc->sc_itr = 1500; /* 2604 ints/sec */
3882 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3883 }
3884
3885 /* Set the VLAN ethernetype. */
3886 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3887
3888 /*
3889 * Set up the transmit control register; we start out with
3890 * a collision distance suitable for FDX, but update it whe
3891 * we resolve the media type.
3892 */
3893 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3894 | TCTL_CT(TX_COLLISION_THRESHOLD)
3895 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3896 if (sc->sc_type >= WM_T_82571)
3897 sc->sc_tctl |= TCTL_MULR;
3898 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3899
3900 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3901 /* Write TDT after TCTL.EN is set. See the document. */
3902 CSR_WRITE(sc, WMREG_TDT, 0);
3903 }
3904
3905 if (sc->sc_type == WM_T_80003) {
3906 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3907 reg &= ~TCTL_EXT_GCEX_MASK;
3908 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3909 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3910 }
3911
3912 /* Set the media. */
3913 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3914 goto out;
3915
3916 /* Configure for OS presence */
3917 wm_init_manageability(sc);
3918
3919 /*
3920 * Set up the receive control register; we actually program
3921 * the register when we set the receive filter. Use multicast
3922 * address offset type 0.
3923 *
3924 * Only the i82544 has the ability to strip the incoming
3925 * CRC, so we don't enable that feature.
3926 */
3927 sc->sc_mchash_type = 0;
3928 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3929 | RCTL_MO(sc->sc_mchash_type);
3930
3931 /*
3932 * The I350 has a bug where it always strips the CRC whether
3933 * asked to or not. So ask for stripped CRC here and cope in rxeof
3934 */
3935 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3936 || (sc->sc_type == WM_T_I210))
3937 sc->sc_rctl |= RCTL_SECRC;
3938
3939 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3940 && (ifp->if_mtu > ETHERMTU)) {
3941 sc->sc_rctl |= RCTL_LPE;
3942 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3943 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
3944 }
3945
3946 if (MCLBYTES == 2048) {
3947 sc->sc_rctl |= RCTL_2k;
3948 } else {
3949 if (sc->sc_type >= WM_T_82543) {
3950 switch (MCLBYTES) {
3951 case 4096:
3952 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3953 break;
3954 case 8192:
3955 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3956 break;
3957 case 16384:
3958 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3959 break;
3960 default:
3961 panic("wm_init: MCLBYTES %d unsupported",
3962 MCLBYTES);
3963 break;
3964 }
3965 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3966 }
3967
3968 /* Set the receive filter. */
3969 wm_set_filter(sc);
3970
3971 /* Enable ECC */
3972 switch (sc->sc_type) {
3973 case WM_T_82571:
3974 reg = CSR_READ(sc, WMREG_PBA_ECC);
3975 reg |= PBA_ECC_CORR_EN;
3976 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
3977 break;
3978 case WM_T_PCH_LPT:
3979 reg = CSR_READ(sc, WMREG_PBECCSTS);
3980 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
3981 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
3982
3983 reg = CSR_READ(sc, WMREG_CTRL);
3984 reg |= CTRL_MEHE;
3985 CSR_WRITE(sc, WMREG_CTRL, reg);
3986 break;
3987 default:
3988 break;
3989 }
3990
3991 /* On 575 and later set RDT only if RX enabled */
3992 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3993 for (i = 0; i < WM_NRXDESC; i++)
3994 WM_INIT_RXDESC(sc, i);
3995
3996 sc->sc_stopping = false;
3997
3998 /* Start the one second link check clock. */
3999 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4000
4001 /* ...all done! */
4002 ifp->if_flags |= IFF_RUNNING;
4003 ifp->if_flags &= ~IFF_OACTIVE;
4004
4005 out:
4006 sc->sc_if_flags = ifp->if_flags;
4007 if (error)
4008 log(LOG_ERR, "%s: interface not running\n",
4009 device_xname(sc->sc_dev));
4010 return error;
4011 }
4012
4013 /*
4014 * wm_stop: [ifnet interface function]
4015 *
4016 * Stop transmission on the interface.
4017 */
4018 static void
4019 wm_stop(struct ifnet *ifp, int disable)
4020 {
4021 struct wm_softc *sc = ifp->if_softc;
4022
4023 WM_LOCK(sc);
4024 wm_stop_locked(ifp, disable);
4025 WM_UNLOCK(sc);
4026 }
4027
4028 static void
4029 wm_stop_locked(struct ifnet *ifp, int disable)
4030 {
4031 struct wm_softc *sc = ifp->if_softc;
4032 struct wm_txsoft *txs;
4033 int i;
4034
4035 KASSERT(WM_LOCKED(sc));
4036
4037 sc->sc_stopping = true;
4038
4039 /* Stop the one second clock. */
4040 callout_stop(&sc->sc_tick_ch);
4041
4042 /* Stop the 82547 Tx FIFO stall check timer. */
4043 if (sc->sc_type == WM_T_82547)
4044 callout_stop(&sc->sc_txfifo_ch);
4045
4046 if (sc->sc_flags & WM_F_HAS_MII) {
4047 /* Down the MII. */
4048 mii_down(&sc->sc_mii);
4049 } else {
4050 #if 0
4051 /* Should we clear PHY's status properly? */
4052 wm_reset(sc);
4053 #endif
4054 }
4055
4056 /* Stop the transmit and receive processes. */
4057 CSR_WRITE(sc, WMREG_TCTL, 0);
4058 CSR_WRITE(sc, WMREG_RCTL, 0);
4059 sc->sc_rctl &= ~RCTL_EN;
4060
4061 /*
4062 * Clear the interrupt mask to ensure the device cannot assert its
4063 * interrupt line.
4064 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4065 * any currently pending or shared interrupt.
4066 */
4067 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4068 sc->sc_icr = 0;
4069
4070 /* Release any queued transmit buffers. */
4071 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4072 txs = &sc->sc_txsoft[i];
4073 if (txs->txs_mbuf != NULL) {
4074 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4075 m_freem(txs->txs_mbuf);
4076 txs->txs_mbuf = NULL;
4077 }
4078 }
4079
4080 /* Mark the interface as down and cancel the watchdog timer. */
4081 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4082 ifp->if_timer = 0;
4083
4084 if (disable)
4085 wm_rxdrain(sc);
4086
4087 #if 0 /* notyet */
4088 if (sc->sc_type >= WM_T_82544)
4089 CSR_WRITE(sc, WMREG_WUC, 0);
4090 #endif
4091 }
4092
4093 /*
4094 * wm_tx_offload:
4095 *
4096 * Set up TCP/IP checksumming parameters for the
4097 * specified packet.
4098 */
4099 static int
4100 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4101 uint8_t *fieldsp)
4102 {
4103 struct mbuf *m0 = txs->txs_mbuf;
4104 struct livengood_tcpip_ctxdesc *t;
4105 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4106 uint32_t ipcse;
4107 struct ether_header *eh;
4108 int offset, iphl;
4109 uint8_t fields;
4110
4111 /*
4112 * XXX It would be nice if the mbuf pkthdr had offset
4113 * fields for the protocol headers.
4114 */
4115
4116 eh = mtod(m0, struct ether_header *);
4117 switch (htons(eh->ether_type)) {
4118 case ETHERTYPE_IP:
4119 case ETHERTYPE_IPV6:
4120 offset = ETHER_HDR_LEN;
4121 break;
4122
4123 case ETHERTYPE_VLAN:
4124 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4125 break;
4126
4127 default:
4128 /*
4129 * Don't support this protocol or encapsulation.
4130 */
4131 *fieldsp = 0;
4132 *cmdp = 0;
4133 return 0;
4134 }
4135
4136 if ((m0->m_pkthdr.csum_flags &
4137 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4138 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4139 } else {
4140 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4141 }
4142 ipcse = offset + iphl - 1;
4143
4144 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4145 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4146 seg = 0;
4147 fields = 0;
4148
4149 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4150 int hlen = offset + iphl;
4151 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4152
4153 if (__predict_false(m0->m_len <
4154 (hlen + sizeof(struct tcphdr)))) {
4155 /*
4156 * TCP/IP headers are not in the first mbuf; we need
4157 * to do this the slow and painful way. Let's just
4158 * hope this doesn't happen very often.
4159 */
4160 struct tcphdr th;
4161
4162 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4163
4164 m_copydata(m0, hlen, sizeof(th), &th);
4165 if (v4) {
4166 struct ip ip;
4167
4168 m_copydata(m0, offset, sizeof(ip), &ip);
4169 ip.ip_len = 0;
4170 m_copyback(m0,
4171 offset + offsetof(struct ip, ip_len),
4172 sizeof(ip.ip_len), &ip.ip_len);
4173 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4174 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4175 } else {
4176 struct ip6_hdr ip6;
4177
4178 m_copydata(m0, offset, sizeof(ip6), &ip6);
4179 ip6.ip6_plen = 0;
4180 m_copyback(m0,
4181 offset + offsetof(struct ip6_hdr, ip6_plen),
4182 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4183 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4184 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4185 }
4186 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4187 sizeof(th.th_sum), &th.th_sum);
4188
4189 hlen += th.th_off << 2;
4190 } else {
4191 /*
4192 * TCP/IP headers are in the first mbuf; we can do
4193 * this the easy way.
4194 */
4195 struct tcphdr *th;
4196
4197 if (v4) {
4198 struct ip *ip =
4199 (void *)(mtod(m0, char *) + offset);
4200 th = (void *)(mtod(m0, char *) + hlen);
4201
4202 ip->ip_len = 0;
4203 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4204 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4205 } else {
4206 struct ip6_hdr *ip6 =
4207 (void *)(mtod(m0, char *) + offset);
4208 th = (void *)(mtod(m0, char *) + hlen);
4209
4210 ip6->ip6_plen = 0;
4211 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4212 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4213 }
4214 hlen += th->th_off << 2;
4215 }
4216
4217 if (v4) {
4218 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4219 cmdlen |= WTX_TCPIP_CMD_IP;
4220 } else {
4221 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4222 ipcse = 0;
4223 }
4224 cmd |= WTX_TCPIP_CMD_TSE;
4225 cmdlen |= WTX_TCPIP_CMD_TSE |
4226 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4227 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4228 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4229 }
4230
4231 /*
4232 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4233 * offload feature, if we load the context descriptor, we
4234 * MUST provide valid values for IPCSS and TUCSS fields.
4235 */
4236
4237 ipcs = WTX_TCPIP_IPCSS(offset) |
4238 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4239 WTX_TCPIP_IPCSE(ipcse);
4240 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4241 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4242 fields |= WTX_IXSM;
4243 }
4244
4245 offset += iphl;
4246
4247 if (m0->m_pkthdr.csum_flags &
4248 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4249 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4250 fields |= WTX_TXSM;
4251 tucs = WTX_TCPIP_TUCSS(offset) |
4252 WTX_TCPIP_TUCSO(offset +
4253 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4254 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4255 } else if ((m0->m_pkthdr.csum_flags &
4256 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4257 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4258 fields |= WTX_TXSM;
4259 tucs = WTX_TCPIP_TUCSS(offset) |
4260 WTX_TCPIP_TUCSO(offset +
4261 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4262 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4263 } else {
4264 /* Just initialize it to a valid TCP context. */
4265 tucs = WTX_TCPIP_TUCSS(offset) |
4266 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4267 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4268 }
4269
4270 /* Fill in the context descriptor. */
4271 t = (struct livengood_tcpip_ctxdesc *)
4272 &sc->sc_txdescs[sc->sc_txnext];
4273 t->tcpip_ipcs = htole32(ipcs);
4274 t->tcpip_tucs = htole32(tucs);
4275 t->tcpip_cmdlen = htole32(cmdlen);
4276 t->tcpip_seg = htole32(seg);
4277 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4278
4279 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4280 txs->txs_ndesc++;
4281
4282 *cmdp = cmd;
4283 *fieldsp = fields;
4284
4285 return 0;
4286 }
4287
4288 static void
4289 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4290 {
4291 struct mbuf *m;
4292 int i;
4293
4294 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4295 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4296 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4297 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4298 m->m_data, m->m_len, m->m_flags);
4299 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4300 i, i == 1 ? "" : "s");
4301 }
4302
4303 /*
4304 * wm_82547_txfifo_stall:
4305 *
4306 * Callout used to wait for the 82547 Tx FIFO to drain,
4307 * reset the FIFO pointers, and restart packet transmission.
4308 */
4309 static void
4310 wm_82547_txfifo_stall(void *arg)
4311 {
4312 struct wm_softc *sc = arg;
4313 #ifndef WM_MPSAFE
4314 int s;
4315
4316 s = splnet();
4317 #endif
4318 WM_LOCK(sc);
4319
4320 if (sc->sc_stopping)
4321 goto out;
4322
4323 if (sc->sc_txfifo_stall) {
4324 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4325 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4326 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4327 /*
4328 * Packets have drained. Stop transmitter, reset
4329 * FIFO pointers, restart transmitter, and kick
4330 * the packet queue.
4331 */
4332 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4333 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4334 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4335 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4336 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4337 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4338 CSR_WRITE(sc, WMREG_TCTL, tctl);
4339 CSR_WRITE_FLUSH(sc);
4340
4341 sc->sc_txfifo_head = 0;
4342 sc->sc_txfifo_stall = 0;
4343 wm_start_locked(&sc->sc_ethercom.ec_if);
4344 } else {
4345 /*
4346 * Still waiting for packets to drain; try again in
4347 * another tick.
4348 */
4349 callout_schedule(&sc->sc_txfifo_ch, 1);
4350 }
4351 }
4352
4353 out:
4354 WM_UNLOCK(sc);
4355 #ifndef WM_MPSAFE
4356 splx(s);
4357 #endif
4358 }
4359
4360 /*
4361 * wm_82547_txfifo_bugchk:
4362 *
4363 * Check for bug condition in the 82547 Tx FIFO. We need to
4364 * prevent enqueueing a packet that would wrap around the end
4365 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4366 *
4367 * We do this by checking the amount of space before the end
4368 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4369 * the Tx FIFO, wait for all remaining packets to drain, reset
4370 * the internal FIFO pointers to the beginning, and restart
4371 * transmission on the interface.
4372 */
4373 #define WM_FIFO_HDR 0x10
4374 #define WM_82547_PAD_LEN 0x3e0
4375 static int
4376 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4377 {
4378 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4379 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4380
4381 /* Just return if already stalled. */
4382 if (sc->sc_txfifo_stall)
4383 return 1;
4384
4385 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4386 /* Stall only occurs in half-duplex mode. */
4387 goto send_packet;
4388 }
4389
4390 if (len >= WM_82547_PAD_LEN + space) {
4391 sc->sc_txfifo_stall = 1;
4392 callout_schedule(&sc->sc_txfifo_ch, 1);
4393 return 1;
4394 }
4395
4396 send_packet:
4397 sc->sc_txfifo_head += len;
4398 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4399 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4400
4401 return 0;
4402 }
4403
4404 /*
4405 * wm_start: [ifnet interface function]
4406 *
4407 * Start packet transmission on the interface.
4408 */
4409 static void
4410 wm_start(struct ifnet *ifp)
4411 {
4412 struct wm_softc *sc = ifp->if_softc;
4413
4414 WM_LOCK(sc);
4415 if (!sc->sc_stopping)
4416 wm_start_locked(ifp);
4417 WM_UNLOCK(sc);
4418 }
4419
4420 static void
4421 wm_start_locked(struct ifnet *ifp)
4422 {
4423 struct wm_softc *sc = ifp->if_softc;
4424 struct mbuf *m0;
4425 struct m_tag *mtag;
4426 struct wm_txsoft *txs;
4427 bus_dmamap_t dmamap;
4428 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4429 bus_addr_t curaddr;
4430 bus_size_t seglen, curlen;
4431 uint32_t cksumcmd;
4432 uint8_t cksumfields;
4433
4434 KASSERT(WM_LOCKED(sc));
4435
4436 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4437 return;
4438
4439 /* Remember the previous number of free descriptors. */
4440 ofree = sc->sc_txfree;
4441
4442 /*
4443 * Loop through the send queue, setting up transmit descriptors
4444 * until we drain the queue, or use up all available transmit
4445 * descriptors.
4446 */
4447 for (;;) {
4448 m0 = NULL;
4449
4450 /* Get a work queue entry. */
4451 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4452 wm_txintr(sc);
4453 if (sc->sc_txsfree == 0) {
4454 DPRINTF(WM_DEBUG_TX,
4455 ("%s: TX: no free job descriptors\n",
4456 device_xname(sc->sc_dev)));
4457 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4458 break;
4459 }
4460 }
4461
4462 /* Grab a packet off the queue. */
4463 IFQ_DEQUEUE(&ifp->if_snd, m0);
4464 if (m0 == NULL)
4465 break;
4466
4467 DPRINTF(WM_DEBUG_TX,
4468 ("%s: TX: have packet to transmit: %p\n",
4469 device_xname(sc->sc_dev), m0));
4470
4471 txs = &sc->sc_txsoft[sc->sc_txsnext];
4472 dmamap = txs->txs_dmamap;
4473
4474 use_tso = (m0->m_pkthdr.csum_flags &
4475 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4476
4477 /*
4478 * So says the Linux driver:
4479 * The controller does a simple calculation to make sure
4480 * there is enough room in the FIFO before initiating the
4481 * DMA for each buffer. The calc is:
4482 * 4 = ceil(buffer len / MSS)
4483 * To make sure we don't overrun the FIFO, adjust the max
4484 * buffer len if the MSS drops.
4485 */
4486 dmamap->dm_maxsegsz =
4487 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4488 ? m0->m_pkthdr.segsz << 2
4489 : WTX_MAX_LEN;
4490
4491 /*
4492 * Load the DMA map. If this fails, the packet either
4493 * didn't fit in the allotted number of segments, or we
4494 * were short on resources. For the too-many-segments
4495 * case, we simply report an error and drop the packet,
4496 * since we can't sanely copy a jumbo packet to a single
4497 * buffer.
4498 */
4499 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4500 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4501 if (error) {
4502 if (error == EFBIG) {
4503 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4504 log(LOG_ERR, "%s: Tx packet consumes too many "
4505 "DMA segments, dropping...\n",
4506 device_xname(sc->sc_dev));
4507 wm_dump_mbuf_chain(sc, m0);
4508 m_freem(m0);
4509 continue;
4510 }
4511 /* Short on resources, just stop for now. */
4512 DPRINTF(WM_DEBUG_TX,
4513 ("%s: TX: dmamap load failed: %d\n",
4514 device_xname(sc->sc_dev), error));
4515 break;
4516 }
4517
4518 segs_needed = dmamap->dm_nsegs;
4519 if (use_tso) {
4520 /* For sentinel descriptor; see below. */
4521 segs_needed++;
4522 }
4523
4524 /*
4525 * Ensure we have enough descriptors free to describe
4526 * the packet. Note, we always reserve one descriptor
4527 * at the end of the ring due to the semantics of the
4528 * TDT register, plus one more in the event we need
4529 * to load offload context.
4530 */
4531 if (segs_needed > sc->sc_txfree - 2) {
4532 /*
4533 * Not enough free descriptors to transmit this
4534 * packet. We haven't committed anything yet,
4535 * so just unload the DMA map, put the packet
4536 * pack on the queue, and punt. Notify the upper
4537 * layer that there are no more slots left.
4538 */
4539 DPRINTF(WM_DEBUG_TX,
4540 ("%s: TX: need %d (%d) descriptors, have %d\n",
4541 device_xname(sc->sc_dev), dmamap->dm_nsegs,
4542 segs_needed, sc->sc_txfree - 1));
4543 ifp->if_flags |= IFF_OACTIVE;
4544 bus_dmamap_unload(sc->sc_dmat, dmamap);
4545 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4546 break;
4547 }
4548
4549 /*
4550 * Check for 82547 Tx FIFO bug. We need to do this
4551 * once we know we can transmit the packet, since we
4552 * do some internal FIFO space accounting here.
4553 */
4554 if (sc->sc_type == WM_T_82547 &&
4555 wm_82547_txfifo_bugchk(sc, m0)) {
4556 DPRINTF(WM_DEBUG_TX,
4557 ("%s: TX: 82547 Tx FIFO bug detected\n",
4558 device_xname(sc->sc_dev)));
4559 ifp->if_flags |= IFF_OACTIVE;
4560 bus_dmamap_unload(sc->sc_dmat, dmamap);
4561 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4562 break;
4563 }
4564
4565 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4566
4567 DPRINTF(WM_DEBUG_TX,
4568 ("%s: TX: packet has %d (%d) DMA segments\n",
4569 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4570
4571 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4572
4573 /*
4574 * Store a pointer to the packet so that we can free it
4575 * later.
4576 *
4577 * Initially, we consider the number of descriptors the
4578 * packet uses the number of DMA segments. This may be
4579 * incremented by 1 if we do checksum offload (a descriptor
4580 * is used to set the checksum context).
4581 */
4582 txs->txs_mbuf = m0;
4583 txs->txs_firstdesc = sc->sc_txnext;
4584 txs->txs_ndesc = segs_needed;
4585
4586 /* Set up offload parameters for this packet. */
4587 if (m0->m_pkthdr.csum_flags &
4588 (M_CSUM_TSOv4|M_CSUM_TSOv6|
4589 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4590 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4591 if (wm_tx_offload(sc, txs, &cksumcmd,
4592 &cksumfields) != 0) {
4593 /* Error message already displayed. */
4594 bus_dmamap_unload(sc->sc_dmat, dmamap);
4595 continue;
4596 }
4597 } else {
4598 cksumcmd = 0;
4599 cksumfields = 0;
4600 }
4601
4602 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4603
4604 /* Sync the DMA map. */
4605 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4606 BUS_DMASYNC_PREWRITE);
4607
4608 /* Initialize the transmit descriptor. */
4609 for (nexttx = sc->sc_txnext, seg = 0;
4610 seg < dmamap->dm_nsegs; seg++) {
4611 for (seglen = dmamap->dm_segs[seg].ds_len,
4612 curaddr = dmamap->dm_segs[seg].ds_addr;
4613 seglen != 0;
4614 curaddr += curlen, seglen -= curlen,
4615 nexttx = WM_NEXTTX(sc, nexttx)) {
4616 curlen = seglen;
4617
4618 /*
4619 * So says the Linux driver:
4620 * Work around for premature descriptor
4621 * write-backs in TSO mode. Append a
4622 * 4-byte sentinel descriptor.
4623 */
4624 if (use_tso &&
4625 seg == dmamap->dm_nsegs - 1 &&
4626 curlen > 8)
4627 curlen -= 4;
4628
4629 wm_set_dma_addr(
4630 &sc->sc_txdescs[nexttx].wtx_addr,
4631 curaddr);
4632 sc->sc_txdescs[nexttx].wtx_cmdlen =
4633 htole32(cksumcmd | curlen);
4634 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4635 0;
4636 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4637 cksumfields;
4638 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4639 lasttx = nexttx;
4640
4641 DPRINTF(WM_DEBUG_TX,
4642 ("%s: TX: desc %d: low %#" PRIx64 ", "
4643 "len %#04zx\n",
4644 device_xname(sc->sc_dev), nexttx,
4645 (uint64_t)curaddr, curlen));
4646 }
4647 }
4648
4649 KASSERT(lasttx != -1);
4650
4651 /*
4652 * Set up the command byte on the last descriptor of
4653 * the packet. If we're in the interrupt delay window,
4654 * delay the interrupt.
4655 */
4656 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4657 htole32(WTX_CMD_EOP | WTX_CMD_RS);
4658
4659 /*
4660 * If VLANs are enabled and the packet has a VLAN tag, set
4661 * up the descriptor to encapsulate the packet for us.
4662 *
4663 * This is only valid on the last descriptor of the packet.
4664 */
4665 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4666 sc->sc_txdescs[lasttx].wtx_cmdlen |=
4667 htole32(WTX_CMD_VLE);
4668 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4669 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4670 }
4671
4672 txs->txs_lastdesc = lasttx;
4673
4674 DPRINTF(WM_DEBUG_TX,
4675 ("%s: TX: desc %d: cmdlen 0x%08x\n",
4676 device_xname(sc->sc_dev),
4677 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4678
4679 /* Sync the descriptors we're using. */
4680 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4681 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4682
4683 /* Give the packet to the chip. */
4684 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4685
4686 DPRINTF(WM_DEBUG_TX,
4687 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4688
4689 DPRINTF(WM_DEBUG_TX,
4690 ("%s: TX: finished transmitting packet, job %d\n",
4691 device_xname(sc->sc_dev), sc->sc_txsnext));
4692
4693 /* Advance the tx pointer. */
4694 sc->sc_txfree -= txs->txs_ndesc;
4695 sc->sc_txnext = nexttx;
4696
4697 sc->sc_txsfree--;
4698 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4699
4700 /* Pass the packet to any BPF listeners. */
4701 bpf_mtap(ifp, m0);
4702 }
4703
4704 if (m0 != NULL) {
4705 ifp->if_flags |= IFF_OACTIVE;
4706 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4707 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4708 m_freem(m0);
4709 }
4710
4711 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4712 /* No more slots; notify upper layer. */
4713 ifp->if_flags |= IFF_OACTIVE;
4714 }
4715
4716 if (sc->sc_txfree != ofree) {
4717 /* Set a watchdog timer in case the chip flakes out. */
4718 ifp->if_timer = 5;
4719 }
4720 }
4721
4722 /*
4723 * wm_nq_tx_offload:
4724 *
4725 * Set up TCP/IP checksumming parameters for the
4726 * specified packet, for NEWQUEUE devices
4727 */
4728 static int
4729 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4730 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4731 {
4732 struct mbuf *m0 = txs->txs_mbuf;
4733 struct m_tag *mtag;
4734 uint32_t vl_len, mssidx, cmdc;
4735 struct ether_header *eh;
4736 int offset, iphl;
4737
4738 /*
4739 * XXX It would be nice if the mbuf pkthdr had offset
4740 * fields for the protocol headers.
4741 */
4742 *cmdlenp = 0;
4743 *fieldsp = 0;
4744
4745 eh = mtod(m0, struct ether_header *);
4746 switch (htons(eh->ether_type)) {
4747 case ETHERTYPE_IP:
4748 case ETHERTYPE_IPV6:
4749 offset = ETHER_HDR_LEN;
4750 break;
4751
4752 case ETHERTYPE_VLAN:
4753 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4754 break;
4755
4756 default:
4757 /* Don't support this protocol or encapsulation. */
4758 *do_csum = false;
4759 return 0;
4760 }
4761 *do_csum = true;
4762 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4763 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4764
4765 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4766 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4767
4768 if ((m0->m_pkthdr.csum_flags &
4769 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4770 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4771 } else {
4772 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4773 }
4774 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4775 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4776
4777 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4778 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4779 << NQTXC_VLLEN_VLAN_SHIFT);
4780 *cmdlenp |= NQTX_CMD_VLE;
4781 }
4782
4783 mssidx = 0;
4784
4785 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4786 int hlen = offset + iphl;
4787 int tcp_hlen;
4788 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4789
4790 if (__predict_false(m0->m_len <
4791 (hlen + sizeof(struct tcphdr)))) {
4792 /*
4793 * TCP/IP headers are not in the first mbuf; we need
4794 * to do this the slow and painful way. Let's just
4795 * hope this doesn't happen very often.
4796 */
4797 struct tcphdr th;
4798
4799 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4800
4801 m_copydata(m0, hlen, sizeof(th), &th);
4802 if (v4) {
4803 struct ip ip;
4804
4805 m_copydata(m0, offset, sizeof(ip), &ip);
4806 ip.ip_len = 0;
4807 m_copyback(m0,
4808 offset + offsetof(struct ip, ip_len),
4809 sizeof(ip.ip_len), &ip.ip_len);
4810 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4811 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4812 } else {
4813 struct ip6_hdr ip6;
4814
4815 m_copydata(m0, offset, sizeof(ip6), &ip6);
4816 ip6.ip6_plen = 0;
4817 m_copyback(m0,
4818 offset + offsetof(struct ip6_hdr, ip6_plen),
4819 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4820 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4821 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4822 }
4823 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4824 sizeof(th.th_sum), &th.th_sum);
4825
4826 tcp_hlen = th.th_off << 2;
4827 } else {
4828 /*
4829 * TCP/IP headers are in the first mbuf; we can do
4830 * this the easy way.
4831 */
4832 struct tcphdr *th;
4833
4834 if (v4) {
4835 struct ip *ip =
4836 (void *)(mtod(m0, char *) + offset);
4837 th = (void *)(mtod(m0, char *) + hlen);
4838
4839 ip->ip_len = 0;
4840 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4841 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4842 } else {
4843 struct ip6_hdr *ip6 =
4844 (void *)(mtod(m0, char *) + offset);
4845 th = (void *)(mtod(m0, char *) + hlen);
4846
4847 ip6->ip6_plen = 0;
4848 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4849 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4850 }
4851 tcp_hlen = th->th_off << 2;
4852 }
4853 hlen += tcp_hlen;
4854 *cmdlenp |= NQTX_CMD_TSE;
4855
4856 if (v4) {
4857 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4858 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4859 } else {
4860 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4861 *fieldsp |= NQTXD_FIELDS_TUXSM;
4862 }
4863 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4864 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4865 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4866 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4867 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4868 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4869 } else {
4870 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4871 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4872 }
4873
4874 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
4875 *fieldsp |= NQTXD_FIELDS_IXSM;
4876 cmdc |= NQTXC_CMD_IP4;
4877 }
4878
4879 if (m0->m_pkthdr.csum_flags &
4880 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4881 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4882 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4883 cmdc |= NQTXC_CMD_TCP;
4884 } else {
4885 cmdc |= NQTXC_CMD_UDP;
4886 }
4887 cmdc |= NQTXC_CMD_IP4;
4888 *fieldsp |= NQTXD_FIELDS_TUXSM;
4889 }
4890 if (m0->m_pkthdr.csum_flags &
4891 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4892 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4893 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4894 cmdc |= NQTXC_CMD_TCP;
4895 } else {
4896 cmdc |= NQTXC_CMD_UDP;
4897 }
4898 cmdc |= NQTXC_CMD_IP6;
4899 *fieldsp |= NQTXD_FIELDS_TUXSM;
4900 }
4901
4902 /* Fill in the context descriptor. */
4903 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
4904 htole32(vl_len);
4905 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
4906 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
4907 htole32(cmdc);
4908 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
4909 htole32(mssidx);
4910 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4911 DPRINTF(WM_DEBUG_TX,
4912 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
4913 sc->sc_txnext, 0, vl_len));
4914 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
4915 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4916 txs->txs_ndesc++;
4917 return 0;
4918 }
4919
4920 /*
4921 * wm_nq_start: [ifnet interface function]
4922 *
4923 * Start packet transmission on the interface for NEWQUEUE devices
4924 */
4925 static void
4926 wm_nq_start(struct ifnet *ifp)
4927 {
4928 struct wm_softc *sc = ifp->if_softc;
4929
4930 WM_LOCK(sc);
4931 if (!sc->sc_stopping)
4932 wm_nq_start_locked(ifp);
4933 WM_UNLOCK(sc);
4934 }
4935
4936 static void
4937 wm_nq_start_locked(struct ifnet *ifp)
4938 {
4939 struct wm_softc *sc = ifp->if_softc;
4940 struct mbuf *m0;
4941 struct m_tag *mtag;
4942 struct wm_txsoft *txs;
4943 bus_dmamap_t dmamap;
4944 int error, nexttx, lasttx = -1, seg, segs_needed;
4945 bool do_csum, sent;
4946
4947 KASSERT(WM_LOCKED(sc));
4948
4949 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4950 return;
4951
4952 sent = false;
4953
4954 /*
4955 * Loop through the send queue, setting up transmit descriptors
4956 * until we drain the queue, or use up all available transmit
4957 * descriptors.
4958 */
4959 for (;;) {
4960 m0 = NULL;
4961
4962 /* Get a work queue entry. */
4963 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4964 wm_txintr(sc);
4965 if (sc->sc_txsfree == 0) {
4966 DPRINTF(WM_DEBUG_TX,
4967 ("%s: TX: no free job descriptors\n",
4968 device_xname(sc->sc_dev)));
4969 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4970 break;
4971 }
4972 }
4973
4974 /* Grab a packet off the queue. */
4975 IFQ_DEQUEUE(&ifp->if_snd, m0);
4976 if (m0 == NULL)
4977 break;
4978
4979 DPRINTF(WM_DEBUG_TX,
4980 ("%s: TX: have packet to transmit: %p\n",
4981 device_xname(sc->sc_dev), m0));
4982
4983 txs = &sc->sc_txsoft[sc->sc_txsnext];
4984 dmamap = txs->txs_dmamap;
4985
4986 /*
4987 * Load the DMA map. If this fails, the packet either
4988 * didn't fit in the allotted number of segments, or we
4989 * were short on resources. For the too-many-segments
4990 * case, we simply report an error and drop the packet,
4991 * since we can't sanely copy a jumbo packet to a single
4992 * buffer.
4993 */
4994 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4995 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4996 if (error) {
4997 if (error == EFBIG) {
4998 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4999 log(LOG_ERR, "%s: Tx packet consumes too many "
5000 "DMA segments, dropping...\n",
5001 device_xname(sc->sc_dev));
5002 wm_dump_mbuf_chain(sc, m0);
5003 m_freem(m0);
5004 continue;
5005 }
5006 /* Short on resources, just stop for now. */
5007 DPRINTF(WM_DEBUG_TX,
5008 ("%s: TX: dmamap load failed: %d\n",
5009 device_xname(sc->sc_dev), error));
5010 break;
5011 }
5012
5013 segs_needed = dmamap->dm_nsegs;
5014
5015 /*
5016 * Ensure we have enough descriptors free to describe
5017 * the packet. Note, we always reserve one descriptor
5018 * at the end of the ring due to the semantics of the
5019 * TDT register, plus one more in the event we need
5020 * to load offload context.
5021 */
5022 if (segs_needed > sc->sc_txfree - 2) {
5023 /*
5024 * Not enough free descriptors to transmit this
5025 * packet. We haven't committed anything yet,
5026 * so just unload the DMA map, put the packet
5027 * pack on the queue, and punt. Notify the upper
5028 * layer that there are no more slots left.
5029 */
5030 DPRINTF(WM_DEBUG_TX,
5031 ("%s: TX: need %d (%d) descriptors, have %d\n",
5032 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5033 segs_needed, sc->sc_txfree - 1));
5034 ifp->if_flags |= IFF_OACTIVE;
5035 bus_dmamap_unload(sc->sc_dmat, dmamap);
5036 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5037 break;
5038 }
5039
5040 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5041
5042 DPRINTF(WM_DEBUG_TX,
5043 ("%s: TX: packet has %d (%d) DMA segments\n",
5044 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5045
5046 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5047
5048 /*
5049 * Store a pointer to the packet so that we can free it
5050 * later.
5051 *
5052 * Initially, we consider the number of descriptors the
5053 * packet uses the number of DMA segments. This may be
5054 * incremented by 1 if we do checksum offload (a descriptor
5055 * is used to set the checksum context).
5056 */
5057 txs->txs_mbuf = m0;
5058 txs->txs_firstdesc = sc->sc_txnext;
5059 txs->txs_ndesc = segs_needed;
5060
5061 /* Set up offload parameters for this packet. */
5062 uint32_t cmdlen, fields, dcmdlen;
5063 if (m0->m_pkthdr.csum_flags &
5064 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5065 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5066 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5067 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5068 &do_csum) != 0) {
5069 /* Error message already displayed. */
5070 bus_dmamap_unload(sc->sc_dmat, dmamap);
5071 continue;
5072 }
5073 } else {
5074 do_csum = false;
5075 cmdlen = 0;
5076 fields = 0;
5077 }
5078
5079 /* Sync the DMA map. */
5080 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5081 BUS_DMASYNC_PREWRITE);
5082
5083 /* Initialize the first transmit descriptor. */
5084 nexttx = sc->sc_txnext;
5085 if (!do_csum) {
5086 /* setup a legacy descriptor */
5087 wm_set_dma_addr(
5088 &sc->sc_txdescs[nexttx].wtx_addr,
5089 dmamap->dm_segs[0].ds_addr);
5090 sc->sc_txdescs[nexttx].wtx_cmdlen =
5091 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5092 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5093 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5094 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5095 NULL) {
5096 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5097 htole32(WTX_CMD_VLE);
5098 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5099 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5100 } else {
5101 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5102 }
5103 dcmdlen = 0;
5104 } else {
5105 /* setup an advanced data descriptor */
5106 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5107 htole64(dmamap->dm_segs[0].ds_addr);
5108 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5109 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5110 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5111 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5112 htole32(fields);
5113 DPRINTF(WM_DEBUG_TX,
5114 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5115 device_xname(sc->sc_dev), nexttx,
5116 (uint64_t)dmamap->dm_segs[0].ds_addr));
5117 DPRINTF(WM_DEBUG_TX,
5118 ("\t 0x%08x%08x\n", fields,
5119 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5120 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5121 }
5122
5123 lasttx = nexttx;
5124 nexttx = WM_NEXTTX(sc, nexttx);
5125 /*
5126 * fill in the next descriptors. legacy or adcanced format
5127 * is the same here
5128 */
5129 for (seg = 1; seg < dmamap->dm_nsegs;
5130 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5131 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5132 htole64(dmamap->dm_segs[seg].ds_addr);
5133 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5134 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5135 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5136 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5137 lasttx = nexttx;
5138
5139 DPRINTF(WM_DEBUG_TX,
5140 ("%s: TX: desc %d: %#" PRIx64 ", "
5141 "len %#04zx\n",
5142 device_xname(sc->sc_dev), nexttx,
5143 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5144 dmamap->dm_segs[seg].ds_len));
5145 }
5146
5147 KASSERT(lasttx != -1);
5148
5149 /*
5150 * Set up the command byte on the last descriptor of
5151 * the packet. If we're in the interrupt delay window,
5152 * delay the interrupt.
5153 */
5154 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5155 (NQTX_CMD_EOP | NQTX_CMD_RS));
5156 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5157 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5158
5159 txs->txs_lastdesc = lasttx;
5160
5161 DPRINTF(WM_DEBUG_TX,
5162 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5163 device_xname(sc->sc_dev),
5164 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5165
5166 /* Sync the descriptors we're using. */
5167 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5168 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5169
5170 /* Give the packet to the chip. */
5171 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5172 sent = true;
5173
5174 DPRINTF(WM_DEBUG_TX,
5175 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5176
5177 DPRINTF(WM_DEBUG_TX,
5178 ("%s: TX: finished transmitting packet, job %d\n",
5179 device_xname(sc->sc_dev), sc->sc_txsnext));
5180
5181 /* Advance the tx pointer. */
5182 sc->sc_txfree -= txs->txs_ndesc;
5183 sc->sc_txnext = nexttx;
5184
5185 sc->sc_txsfree--;
5186 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5187
5188 /* Pass the packet to any BPF listeners. */
5189 bpf_mtap(ifp, m0);
5190 }
5191
5192 if (m0 != NULL) {
5193 ifp->if_flags |= IFF_OACTIVE;
5194 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5195 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5196 m_freem(m0);
5197 }
5198
5199 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5200 /* No more slots; notify upper layer. */
5201 ifp->if_flags |= IFF_OACTIVE;
5202 }
5203
5204 if (sent) {
5205 /* Set a watchdog timer in case the chip flakes out. */
5206 ifp->if_timer = 5;
5207 }
5208 }
5209
5210 /* Interrupt */
5211
5212 /*
5213 * wm_txintr:
5214 *
5215 * Helper; handle transmit interrupts.
5216 */
5217 static void
5218 wm_txintr(struct wm_softc *sc)
5219 {
5220 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5221 struct wm_txsoft *txs;
5222 uint8_t status;
5223 int i;
5224
5225 if (sc->sc_stopping)
5226 return;
5227
5228 ifp->if_flags &= ~IFF_OACTIVE;
5229
5230 /*
5231 * Go through the Tx list and free mbufs for those
5232 * frames which have been transmitted.
5233 */
5234 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5235 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5236 txs = &sc->sc_txsoft[i];
5237
5238 DPRINTF(WM_DEBUG_TX,
5239 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5240
5241 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5242 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5243
5244 status =
5245 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5246 if ((status & WTX_ST_DD) == 0) {
5247 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5248 BUS_DMASYNC_PREREAD);
5249 break;
5250 }
5251
5252 DPRINTF(WM_DEBUG_TX,
5253 ("%s: TX: job %d done: descs %d..%d\n",
5254 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5255 txs->txs_lastdesc));
5256
5257 /*
5258 * XXX We should probably be using the statistics
5259 * XXX registers, but I don't know if they exist
5260 * XXX on chips before the i82544.
5261 */
5262
5263 #ifdef WM_EVENT_COUNTERS
5264 if (status & WTX_ST_TU)
5265 WM_EVCNT_INCR(&sc->sc_ev_tu);
5266 #endif /* WM_EVENT_COUNTERS */
5267
5268 if (status & (WTX_ST_EC|WTX_ST_LC)) {
5269 ifp->if_oerrors++;
5270 if (status & WTX_ST_LC)
5271 log(LOG_WARNING, "%s: late collision\n",
5272 device_xname(sc->sc_dev));
5273 else if (status & WTX_ST_EC) {
5274 ifp->if_collisions += 16;
5275 log(LOG_WARNING, "%s: excessive collisions\n",
5276 device_xname(sc->sc_dev));
5277 }
5278 } else
5279 ifp->if_opackets++;
5280
5281 sc->sc_txfree += txs->txs_ndesc;
5282 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5283 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5284 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5285 m_freem(txs->txs_mbuf);
5286 txs->txs_mbuf = NULL;
5287 }
5288
5289 /* Update the dirty transmit buffer pointer. */
5290 sc->sc_txsdirty = i;
5291 DPRINTF(WM_DEBUG_TX,
5292 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5293
5294 /*
5295 * If there are no more pending transmissions, cancel the watchdog
5296 * timer.
5297 */
5298 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5299 ifp->if_timer = 0;
5300 }
5301
5302 /*
5303 * wm_rxintr:
5304 *
5305 * Helper; handle receive interrupts.
5306 */
5307 static void
5308 wm_rxintr(struct wm_softc *sc)
5309 {
5310 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5311 struct wm_rxsoft *rxs;
5312 struct mbuf *m;
5313 int i, len;
5314 uint8_t status, errors;
5315 uint16_t vlantag;
5316
5317 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5318 rxs = &sc->sc_rxsoft[i];
5319
5320 DPRINTF(WM_DEBUG_RX,
5321 ("%s: RX: checking descriptor %d\n",
5322 device_xname(sc->sc_dev), i));
5323
5324 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5325
5326 status = sc->sc_rxdescs[i].wrx_status;
5327 errors = sc->sc_rxdescs[i].wrx_errors;
5328 len = le16toh(sc->sc_rxdescs[i].wrx_len);
5329 vlantag = sc->sc_rxdescs[i].wrx_special;
5330
5331 if ((status & WRX_ST_DD) == 0) {
5332 /* We have processed all of the receive descriptors. */
5333 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5334 break;
5335 }
5336
5337 if (__predict_false(sc->sc_rxdiscard)) {
5338 DPRINTF(WM_DEBUG_RX,
5339 ("%s: RX: discarding contents of descriptor %d\n",
5340 device_xname(sc->sc_dev), i));
5341 WM_INIT_RXDESC(sc, i);
5342 if (status & WRX_ST_EOP) {
5343 /* Reset our state. */
5344 DPRINTF(WM_DEBUG_RX,
5345 ("%s: RX: resetting rxdiscard -> 0\n",
5346 device_xname(sc->sc_dev)));
5347 sc->sc_rxdiscard = 0;
5348 }
5349 continue;
5350 }
5351
5352 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5353 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5354
5355 m = rxs->rxs_mbuf;
5356
5357 /*
5358 * Add a new receive buffer to the ring, unless of
5359 * course the length is zero. Treat the latter as a
5360 * failed mapping.
5361 */
5362 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5363 /*
5364 * Failed, throw away what we've done so
5365 * far, and discard the rest of the packet.
5366 */
5367 ifp->if_ierrors++;
5368 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5369 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5370 WM_INIT_RXDESC(sc, i);
5371 if ((status & WRX_ST_EOP) == 0)
5372 sc->sc_rxdiscard = 1;
5373 if (sc->sc_rxhead != NULL)
5374 m_freem(sc->sc_rxhead);
5375 WM_RXCHAIN_RESET(sc);
5376 DPRINTF(WM_DEBUG_RX,
5377 ("%s: RX: Rx buffer allocation failed, "
5378 "dropping packet%s\n", device_xname(sc->sc_dev),
5379 sc->sc_rxdiscard ? " (discard)" : ""));
5380 continue;
5381 }
5382
5383 m->m_len = len;
5384 sc->sc_rxlen += len;
5385 DPRINTF(WM_DEBUG_RX,
5386 ("%s: RX: buffer at %p len %d\n",
5387 device_xname(sc->sc_dev), m->m_data, len));
5388
5389 /* If this is not the end of the packet, keep looking. */
5390 if ((status & WRX_ST_EOP) == 0) {
5391 WM_RXCHAIN_LINK(sc, m);
5392 DPRINTF(WM_DEBUG_RX,
5393 ("%s: RX: not yet EOP, rxlen -> %d\n",
5394 device_xname(sc->sc_dev), sc->sc_rxlen));
5395 continue;
5396 }
5397
5398 /*
5399 * Okay, we have the entire packet now. The chip is
5400 * configured to include the FCS except I350 and I21[01]
5401 * (not all chips can be configured to strip it),
5402 * so we need to trim it.
5403 * May need to adjust length of previous mbuf in the
5404 * chain if the current mbuf is too short.
5405 * For an eratta, the RCTL_SECRC bit in RCTL register
5406 * is always set in I350, so we don't trim it.
5407 */
5408 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5409 && (sc->sc_type != WM_T_I210)
5410 && (sc->sc_type != WM_T_I211)) {
5411 if (m->m_len < ETHER_CRC_LEN) {
5412 sc->sc_rxtail->m_len
5413 -= (ETHER_CRC_LEN - m->m_len);
5414 m->m_len = 0;
5415 } else
5416 m->m_len -= ETHER_CRC_LEN;
5417 len = sc->sc_rxlen - ETHER_CRC_LEN;
5418 } else
5419 len = sc->sc_rxlen;
5420
5421 WM_RXCHAIN_LINK(sc, m);
5422
5423 *sc->sc_rxtailp = NULL;
5424 m = sc->sc_rxhead;
5425
5426 WM_RXCHAIN_RESET(sc);
5427
5428 DPRINTF(WM_DEBUG_RX,
5429 ("%s: RX: have entire packet, len -> %d\n",
5430 device_xname(sc->sc_dev), len));
5431
5432 /* If an error occurred, update stats and drop the packet. */
5433 if (errors &
5434 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5435 if (errors & WRX_ER_SE)
5436 log(LOG_WARNING, "%s: symbol error\n",
5437 device_xname(sc->sc_dev));
5438 else if (errors & WRX_ER_SEQ)
5439 log(LOG_WARNING, "%s: receive sequence error\n",
5440 device_xname(sc->sc_dev));
5441 else if (errors & WRX_ER_CE)
5442 log(LOG_WARNING, "%s: CRC error\n",
5443 device_xname(sc->sc_dev));
5444 m_freem(m);
5445 continue;
5446 }
5447
5448 /* No errors. Receive the packet. */
5449 m->m_pkthdr.rcvif = ifp;
5450 m->m_pkthdr.len = len;
5451
5452 /*
5453 * If VLANs are enabled, VLAN packets have been unwrapped
5454 * for us. Associate the tag with the packet.
5455 */
5456 /* XXXX should check for i350 and i354 */
5457 if ((status & WRX_ST_VP) != 0) {
5458 VLAN_INPUT_TAG(ifp, m,
5459 le16toh(vlantag),
5460 continue);
5461 }
5462
5463 /* Set up checksum info for this packet. */
5464 if ((status & WRX_ST_IXSM) == 0) {
5465 if (status & WRX_ST_IPCS) {
5466 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5467 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5468 if (errors & WRX_ER_IPE)
5469 m->m_pkthdr.csum_flags |=
5470 M_CSUM_IPv4_BAD;
5471 }
5472 if (status & WRX_ST_TCPCS) {
5473 /*
5474 * Note: we don't know if this was TCP or UDP,
5475 * so we just set both bits, and expect the
5476 * upper layers to deal.
5477 */
5478 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5479 m->m_pkthdr.csum_flags |=
5480 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5481 M_CSUM_TCPv6 | M_CSUM_UDPv6;
5482 if (errors & WRX_ER_TCPE)
5483 m->m_pkthdr.csum_flags |=
5484 M_CSUM_TCP_UDP_BAD;
5485 }
5486 }
5487
5488 ifp->if_ipackets++;
5489
5490 WM_UNLOCK(sc);
5491
5492 /* Pass this up to any BPF listeners. */
5493 bpf_mtap(ifp, m);
5494
5495 /* Pass it on. */
5496 (*ifp->if_input)(ifp, m);
5497
5498 WM_LOCK(sc);
5499
5500 if (sc->sc_stopping)
5501 break;
5502 }
5503
5504 /* Update the receive pointer. */
5505 sc->sc_rxptr = i;
5506
5507 DPRINTF(WM_DEBUG_RX,
5508 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5509 }
5510
5511 /*
5512 * wm_linkintr_gmii:
5513 *
5514 * Helper; handle link interrupts for GMII.
5515 */
5516 static void
5517 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5518 {
5519
5520 KASSERT(WM_LOCKED(sc));
5521
5522 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5523 __func__));
5524
5525 if (icr & ICR_LSC) {
5526 DPRINTF(WM_DEBUG_LINK,
5527 ("%s: LINK: LSC -> mii_pollstat\n",
5528 device_xname(sc->sc_dev)));
5529 mii_pollstat(&sc->sc_mii);
5530 if (sc->sc_type == WM_T_82543) {
5531 int miistatus, active;
5532
5533 /*
5534 * With 82543, we need to force speed and
5535 * duplex on the MAC equal to what the PHY
5536 * speed and duplex configuration is.
5537 */
5538 miistatus = sc->sc_mii.mii_media_status;
5539
5540 if (miistatus & IFM_ACTIVE) {
5541 active = sc->sc_mii.mii_media_active;
5542 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5543 switch (IFM_SUBTYPE(active)) {
5544 case IFM_10_T:
5545 sc->sc_ctrl |= CTRL_SPEED_10;
5546 break;
5547 case IFM_100_TX:
5548 sc->sc_ctrl |= CTRL_SPEED_100;
5549 break;
5550 case IFM_1000_T:
5551 sc->sc_ctrl |= CTRL_SPEED_1000;
5552 break;
5553 default:
5554 /*
5555 * fiber?
5556 * Shoud not enter here.
5557 */
5558 printf("unknown media (%x)\n",
5559 active);
5560 break;
5561 }
5562 if (active & IFM_FDX)
5563 sc->sc_ctrl |= CTRL_FD;
5564 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5565 }
5566 } else if ((sc->sc_type == WM_T_ICH8)
5567 && (sc->sc_phytype == WMPHY_IGP_3)) {
5568 wm_kmrn_lock_loss_workaround_ich8lan(sc);
5569 } else if (sc->sc_type == WM_T_PCH) {
5570 wm_k1_gig_workaround_hv(sc,
5571 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5572 }
5573
5574 if ((sc->sc_phytype == WMPHY_82578)
5575 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5576 == IFM_1000_T)) {
5577
5578 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5579 delay(200*1000); /* XXX too big */
5580
5581 /* Link stall fix for link up */
5582 wm_gmii_hv_writereg(sc->sc_dev, 1,
5583 HV_MUX_DATA_CTRL,
5584 HV_MUX_DATA_CTRL_GEN_TO_MAC
5585 | HV_MUX_DATA_CTRL_FORCE_SPEED);
5586 wm_gmii_hv_writereg(sc->sc_dev, 1,
5587 HV_MUX_DATA_CTRL,
5588 HV_MUX_DATA_CTRL_GEN_TO_MAC);
5589 }
5590 }
5591 } else if (icr & ICR_RXSEQ) {
5592 DPRINTF(WM_DEBUG_LINK,
5593 ("%s: LINK Receive sequence error\n",
5594 device_xname(sc->sc_dev)));
5595 }
5596 }
5597
5598 /*
5599 * wm_linkintr_tbi:
5600 *
5601 * Helper; handle link interrupts for TBI mode.
5602 */
5603 static void
5604 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5605 {
5606 uint32_t status;
5607
5608 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5609 __func__));
5610
5611 status = CSR_READ(sc, WMREG_STATUS);
5612 if (icr & ICR_LSC) {
5613 if (status & STATUS_LU) {
5614 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5615 device_xname(sc->sc_dev),
5616 (status & STATUS_FD) ? "FDX" : "HDX"));
5617 /*
5618 * NOTE: CTRL will update TFCE and RFCE automatically,
5619 * so we should update sc->sc_ctrl
5620 */
5621
5622 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5623 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5624 sc->sc_fcrtl &= ~FCRTL_XONE;
5625 if (status & STATUS_FD)
5626 sc->sc_tctl |=
5627 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5628 else
5629 sc->sc_tctl |=
5630 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5631 if (sc->sc_ctrl & CTRL_TFCE)
5632 sc->sc_fcrtl |= FCRTL_XONE;
5633 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5634 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5635 WMREG_OLD_FCRTL : WMREG_FCRTL,
5636 sc->sc_fcrtl);
5637 sc->sc_tbi_linkup = 1;
5638 } else {
5639 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5640 device_xname(sc->sc_dev)));
5641 sc->sc_tbi_linkup = 0;
5642 }
5643 wm_tbi_set_linkled(sc);
5644 } else if (icr & ICR_RXCFG) {
5645 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
5646 device_xname(sc->sc_dev)));
5647 sc->sc_tbi_nrxcfg++;
5648 wm_check_for_link(sc);
5649 } else if (icr & ICR_RXSEQ) {
5650 DPRINTF(WM_DEBUG_LINK,
5651 ("%s: LINK: Receive sequence error\n",
5652 device_xname(sc->sc_dev)));
5653 }
5654 }
5655
5656 /*
5657 * wm_linkintr:
5658 *
5659 * Helper; handle link interrupts.
5660 */
5661 static void
5662 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5663 {
5664
5665 if (sc->sc_flags & WM_F_HAS_MII)
5666 wm_linkintr_gmii(sc, icr);
5667 else
5668 wm_linkintr_tbi(sc, icr);
5669 }
5670
5671 /*
5672 * wm_intr:
5673 *
5674 * Interrupt service routine.
5675 */
5676 static int
5677 wm_intr(void *arg)
5678 {
5679 struct wm_softc *sc = arg;
5680 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5681 uint32_t icr;
5682 int handled = 0;
5683
5684 while (1 /* CONSTCOND */) {
5685 icr = CSR_READ(sc, WMREG_ICR);
5686 if ((icr & sc->sc_icr) == 0)
5687 break;
5688 rnd_add_uint32(&sc->rnd_source, icr);
5689
5690 WM_LOCK(sc);
5691
5692 if (sc->sc_stopping) {
5693 WM_UNLOCK(sc);
5694 break;
5695 }
5696
5697 handled = 1;
5698
5699 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5700 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5701 DPRINTF(WM_DEBUG_RX,
5702 ("%s: RX: got Rx intr 0x%08x\n",
5703 device_xname(sc->sc_dev),
5704 icr & (ICR_RXDMT0|ICR_RXT0)));
5705 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5706 }
5707 #endif
5708 wm_rxintr(sc);
5709
5710 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5711 if (icr & ICR_TXDW) {
5712 DPRINTF(WM_DEBUG_TX,
5713 ("%s: TX: got TXDW interrupt\n",
5714 device_xname(sc->sc_dev)));
5715 WM_EVCNT_INCR(&sc->sc_ev_txdw);
5716 }
5717 #endif
5718 wm_txintr(sc);
5719
5720 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
5721 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5722 wm_linkintr(sc, icr);
5723 }
5724
5725 WM_UNLOCK(sc);
5726
5727 if (icr & ICR_RXO) {
5728 #if defined(WM_DEBUG)
5729 log(LOG_WARNING, "%s: Receive overrun\n",
5730 device_xname(sc->sc_dev));
5731 #endif /* defined(WM_DEBUG) */
5732 }
5733 }
5734
5735 if (handled) {
5736 /* Try to get more packets going. */
5737 ifp->if_start(ifp);
5738 }
5739
5740 return handled;
5741 }
5742
5743 /*
5744 * Media related.
5745 * GMII, SGMII, TBI (and SERDES)
5746 */
5747
5748 /* GMII related */
5749
5750 /*
5751 * wm_gmii_reset:
5752 *
5753 * Reset the PHY.
5754 */
5755 static void
5756 wm_gmii_reset(struct wm_softc *sc)
5757 {
5758 uint32_t reg;
5759 int rv;
5760
5761 /* get phy semaphore */
5762 switch (sc->sc_type) {
5763 case WM_T_82571:
5764 case WM_T_82572:
5765 case WM_T_82573:
5766 case WM_T_82574:
5767 case WM_T_82583:
5768 /* XXX should get sw semaphore, too */
5769 rv = wm_get_swsm_semaphore(sc);
5770 break;
5771 case WM_T_82575:
5772 case WM_T_82576:
5773 case WM_T_82580:
5774 case WM_T_82580ER:
5775 case WM_T_I350:
5776 case WM_T_I354:
5777 case WM_T_I210:
5778 case WM_T_I211:
5779 case WM_T_80003:
5780 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5781 break;
5782 case WM_T_ICH8:
5783 case WM_T_ICH9:
5784 case WM_T_ICH10:
5785 case WM_T_PCH:
5786 case WM_T_PCH2:
5787 case WM_T_PCH_LPT:
5788 rv = wm_get_swfwhw_semaphore(sc);
5789 break;
5790 default:
5791 /* nothing to do*/
5792 rv = 0;
5793 break;
5794 }
5795 if (rv != 0) {
5796 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5797 __func__);
5798 return;
5799 }
5800
5801 switch (sc->sc_type) {
5802 case WM_T_82542_2_0:
5803 case WM_T_82542_2_1:
5804 /* null */
5805 break;
5806 case WM_T_82543:
5807 /*
5808 * With 82543, we need to force speed and duplex on the MAC
5809 * equal to what the PHY speed and duplex configuration is.
5810 * In addition, we need to perform a hardware reset on the PHY
5811 * to take it out of reset.
5812 */
5813 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5814 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5815
5816 /* The PHY reset pin is active-low. */
5817 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5818 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5819 CTRL_EXT_SWDPIN(4));
5820 reg |= CTRL_EXT_SWDPIO(4);
5821
5822 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5823 CSR_WRITE_FLUSH(sc);
5824 delay(10*1000);
5825
5826 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5827 CSR_WRITE_FLUSH(sc);
5828 delay(150);
5829 #if 0
5830 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5831 #endif
5832 delay(20*1000); /* XXX extra delay to get PHY ID? */
5833 break;
5834 case WM_T_82544: /* reset 10000us */
5835 case WM_T_82540:
5836 case WM_T_82545:
5837 case WM_T_82545_3:
5838 case WM_T_82546:
5839 case WM_T_82546_3:
5840 case WM_T_82541:
5841 case WM_T_82541_2:
5842 case WM_T_82547:
5843 case WM_T_82547_2:
5844 case WM_T_82571: /* reset 100us */
5845 case WM_T_82572:
5846 case WM_T_82573:
5847 case WM_T_82574:
5848 case WM_T_82575:
5849 case WM_T_82576:
5850 case WM_T_82580:
5851 case WM_T_82580ER:
5852 case WM_T_I350:
5853 case WM_T_I354:
5854 case WM_T_I210:
5855 case WM_T_I211:
5856 case WM_T_82583:
5857 case WM_T_80003:
5858 /* generic reset */
5859 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5860 CSR_WRITE_FLUSH(sc);
5861 delay(20000);
5862 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5863 CSR_WRITE_FLUSH(sc);
5864 delay(20000);
5865
5866 if ((sc->sc_type == WM_T_82541)
5867 || (sc->sc_type == WM_T_82541_2)
5868 || (sc->sc_type == WM_T_82547)
5869 || (sc->sc_type == WM_T_82547_2)) {
5870 /* workaround for igp are done in igp_reset() */
5871 /* XXX add code to set LED after phy reset */
5872 }
5873 break;
5874 case WM_T_ICH8:
5875 case WM_T_ICH9:
5876 case WM_T_ICH10:
5877 case WM_T_PCH:
5878 case WM_T_PCH2:
5879 case WM_T_PCH_LPT:
5880 /* generic reset */
5881 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5882 CSR_WRITE_FLUSH(sc);
5883 delay(100);
5884 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5885 CSR_WRITE_FLUSH(sc);
5886 delay(150);
5887 break;
5888 default:
5889 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5890 __func__);
5891 break;
5892 }
5893
5894 /* release PHY semaphore */
5895 switch (sc->sc_type) {
5896 case WM_T_82571:
5897 case WM_T_82572:
5898 case WM_T_82573:
5899 case WM_T_82574:
5900 case WM_T_82583:
5901 /* XXX should put sw semaphore, too */
5902 wm_put_swsm_semaphore(sc);
5903 break;
5904 case WM_T_82575:
5905 case WM_T_82576:
5906 case WM_T_82580:
5907 case WM_T_82580ER:
5908 case WM_T_I350:
5909 case WM_T_I354:
5910 case WM_T_I210:
5911 case WM_T_I211:
5912 case WM_T_80003:
5913 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5914 break;
5915 case WM_T_ICH8:
5916 case WM_T_ICH9:
5917 case WM_T_ICH10:
5918 case WM_T_PCH:
5919 case WM_T_PCH2:
5920 case WM_T_PCH_LPT:
5921 wm_put_swfwhw_semaphore(sc);
5922 break;
5923 default:
5924 /* nothing to do*/
5925 rv = 0;
5926 break;
5927 }
5928
5929 /* get_cfg_done */
5930 wm_get_cfg_done(sc);
5931
5932 /* extra setup */
5933 switch (sc->sc_type) {
5934 case WM_T_82542_2_0:
5935 case WM_T_82542_2_1:
5936 case WM_T_82543:
5937 case WM_T_82544:
5938 case WM_T_82540:
5939 case WM_T_82545:
5940 case WM_T_82545_3:
5941 case WM_T_82546:
5942 case WM_T_82546_3:
5943 case WM_T_82541_2:
5944 case WM_T_82547_2:
5945 case WM_T_82571:
5946 case WM_T_82572:
5947 case WM_T_82573:
5948 case WM_T_82574:
5949 case WM_T_82575:
5950 case WM_T_82576:
5951 case WM_T_82580:
5952 case WM_T_82580ER:
5953 case WM_T_I350:
5954 case WM_T_I354:
5955 case WM_T_I210:
5956 case WM_T_I211:
5957 case WM_T_82583:
5958 case WM_T_80003:
5959 /* null */
5960 break;
5961 case WM_T_82541:
5962 case WM_T_82547:
5963 /* XXX Configure actively LED after PHY reset */
5964 break;
5965 case WM_T_ICH8:
5966 case WM_T_ICH9:
5967 case WM_T_ICH10:
5968 case WM_T_PCH:
5969 case WM_T_PCH2:
5970 case WM_T_PCH_LPT:
5971 /* Allow time for h/w to get to a quiescent state afer reset */
5972 delay(10*1000);
5973
5974 if (sc->sc_type == WM_T_PCH)
5975 wm_hv_phy_workaround_ich8lan(sc);
5976
5977 if (sc->sc_type == WM_T_PCH2)
5978 wm_lv_phy_workaround_ich8lan(sc);
5979
5980 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5981 /*
5982 * dummy read to clear the phy wakeup bit after lcd
5983 * reset
5984 */
5985 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5986 }
5987
5988 /*
5989 * XXX Configure the LCD with th extended configuration region
5990 * in NVM
5991 */
5992
5993 /* Configure the LCD with the OEM bits in NVM */
5994 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5995 || (sc->sc_type == WM_T_PCH_LPT)) {
5996 /*
5997 * Disable LPLU.
5998 * XXX It seems that 82567 has LPLU, too.
5999 */
6000 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6001 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6002 reg |= HV_OEM_BITS_ANEGNOW;
6003 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6004 }
6005 break;
6006 default:
6007 panic("%s: unknown type\n", __func__);
6008 break;
6009 }
6010 }
6011
6012 /*
6013 * wm_get_phy_id_82575:
6014 *
6015 * Return PHY ID. Return -1 if it failed.
6016 */
6017 static int
6018 wm_get_phy_id_82575(struct wm_softc *sc)
6019 {
6020 uint32_t reg;
6021 int phyid = -1;
6022
6023 /* XXX */
6024 if ((sc->sc_flags & WM_F_SGMII) == 0)
6025 return -1;
6026
6027 if (wm_sgmii_uses_mdio(sc)) {
6028 switch (sc->sc_type) {
6029 case WM_T_82575:
6030 case WM_T_82576:
6031 reg = CSR_READ(sc, WMREG_MDIC);
6032 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6033 break;
6034 case WM_T_82580:
6035 case WM_T_I350:
6036 case WM_T_I354:
6037 case WM_T_I210:
6038 case WM_T_I211:
6039 reg = CSR_READ(sc, WMREG_MDICNFG);
6040 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6041 break;
6042 default:
6043 return -1;
6044 }
6045 }
6046
6047 return phyid;
6048 }
6049
6050
6051 /*
6052 * wm_gmii_mediainit:
6053 *
6054 * Initialize media for use on 1000BASE-T devices.
6055 */
6056 static void
6057 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6058 {
6059 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6060 struct mii_data *mii = &sc->sc_mii;
6061
6062 /* We have MII. */
6063 sc->sc_flags |= WM_F_HAS_MII;
6064
6065 if (sc->sc_type == WM_T_80003)
6066 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6067 else
6068 sc->sc_tipg = TIPG_1000T_DFLT;
6069
6070 /*
6071 * Let the chip set speed/duplex on its own based on
6072 * signals from the PHY.
6073 * XXXbouyer - I'm not sure this is right for the 80003,
6074 * the em driver only sets CTRL_SLU here - but it seems to work.
6075 */
6076 sc->sc_ctrl |= CTRL_SLU;
6077 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6078
6079 /* Initialize our media structures and probe the GMII. */
6080 mii->mii_ifp = ifp;
6081
6082 /*
6083 * Determine the PHY access method.
6084 *
6085 * For SGMII, use SGMII specific method.
6086 *
6087 * For some devices, we can determine the PHY access method
6088 * from sc_type.
6089 *
6090 * For ICH8 variants, it's difficult to detemine the PHY access
6091 * method by sc_type, so use the PCI product ID for some devices.
6092 * For other ICH8 variants, try to use igp's method. If the PHY
6093 * can't detect, then use bm's method.
6094 */
6095 switch (prodid) {
6096 case PCI_PRODUCT_INTEL_PCH_M_LM:
6097 case PCI_PRODUCT_INTEL_PCH_M_LC:
6098 /* 82577 */
6099 sc->sc_phytype = WMPHY_82577;
6100 mii->mii_readreg = wm_gmii_hv_readreg;
6101 mii->mii_writereg = wm_gmii_hv_writereg;
6102 break;
6103 case PCI_PRODUCT_INTEL_PCH_D_DM:
6104 case PCI_PRODUCT_INTEL_PCH_D_DC:
6105 /* 82578 */
6106 sc->sc_phytype = WMPHY_82578;
6107 mii->mii_readreg = wm_gmii_hv_readreg;
6108 mii->mii_writereg = wm_gmii_hv_writereg;
6109 break;
6110 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6111 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6112 /* 82579 */
6113 sc->sc_phytype = WMPHY_82579;
6114 mii->mii_readreg = wm_gmii_hv_readreg;
6115 mii->mii_writereg = wm_gmii_hv_writereg;
6116 break;
6117 case PCI_PRODUCT_INTEL_I217_LM:
6118 case PCI_PRODUCT_INTEL_I217_V:
6119 case PCI_PRODUCT_INTEL_I218_LM:
6120 case PCI_PRODUCT_INTEL_I218_V:
6121 /* I21[78] */
6122 mii->mii_readreg = wm_gmii_hv_readreg;
6123 mii->mii_writereg = wm_gmii_hv_writereg;
6124 break;
6125 case PCI_PRODUCT_INTEL_82801I_BM:
6126 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6127 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6128 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6129 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6130 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6131 /* 82567 */
6132 sc->sc_phytype = WMPHY_BM;
6133 mii->mii_readreg = wm_gmii_bm_readreg;
6134 mii->mii_writereg = wm_gmii_bm_writereg;
6135 break;
6136 default:
6137 if (((sc->sc_flags & WM_F_SGMII) != 0)
6138 && !wm_sgmii_uses_mdio(sc)){
6139 mii->mii_readreg = wm_sgmii_readreg;
6140 mii->mii_writereg = wm_sgmii_writereg;
6141 } else if (sc->sc_type >= WM_T_80003) {
6142 mii->mii_readreg = wm_gmii_i80003_readreg;
6143 mii->mii_writereg = wm_gmii_i80003_writereg;
6144 } else if (sc->sc_type >= WM_T_I210) {
6145 mii->mii_readreg = wm_gmii_i82544_readreg;
6146 mii->mii_writereg = wm_gmii_i82544_writereg;
6147 } else if (sc->sc_type >= WM_T_82580) {
6148 sc->sc_phytype = WMPHY_82580;
6149 mii->mii_readreg = wm_gmii_82580_readreg;
6150 mii->mii_writereg = wm_gmii_82580_writereg;
6151 } else if (sc->sc_type >= WM_T_82544) {
6152 mii->mii_readreg = wm_gmii_i82544_readreg;
6153 mii->mii_writereg = wm_gmii_i82544_writereg;
6154 } else {
6155 mii->mii_readreg = wm_gmii_i82543_readreg;
6156 mii->mii_writereg = wm_gmii_i82543_writereg;
6157 }
6158 break;
6159 }
6160 mii->mii_statchg = wm_gmii_statchg;
6161
6162 wm_gmii_reset(sc);
6163
6164 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6165 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6166 wm_gmii_mediastatus);
6167
6168 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6169 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6170 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6171 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6172 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6173 /* Attach only one port */
6174 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6175 MII_OFFSET_ANY, MIIF_DOPAUSE);
6176 } else {
6177 int i, id;
6178 uint32_t ctrl_ext;
6179
6180 id = wm_get_phy_id_82575(sc);
6181 if (id != -1) {
6182 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6183 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6184 }
6185 if ((id == -1)
6186 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6187 /* Power on sgmii phy if it is disabled */
6188 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6189 CSR_WRITE(sc, WMREG_CTRL_EXT,
6190 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6191 CSR_WRITE_FLUSH(sc);
6192 delay(300*1000); /* XXX too long */
6193
6194 /* from 1 to 8 */
6195 for (i = 1; i < 8; i++)
6196 mii_attach(sc->sc_dev, &sc->sc_mii,
6197 0xffffffff, i, MII_OFFSET_ANY,
6198 MIIF_DOPAUSE);
6199
6200 /* restore previous sfp cage power state */
6201 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6202 }
6203 }
6204 } else {
6205 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6206 MII_OFFSET_ANY, MIIF_DOPAUSE);
6207 }
6208
6209 /*
6210 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6211 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6212 */
6213 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6214 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6215 wm_set_mdio_slow_mode_hv(sc);
6216 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6217 MII_OFFSET_ANY, MIIF_DOPAUSE);
6218 }
6219
6220 /*
6221 * (For ICH8 variants)
6222 * If PHY detection failed, use BM's r/w function and retry.
6223 */
6224 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6225 /* if failed, retry with *_bm_* */
6226 mii->mii_readreg = wm_gmii_bm_readreg;
6227 mii->mii_writereg = wm_gmii_bm_writereg;
6228
6229 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6230 MII_OFFSET_ANY, MIIF_DOPAUSE);
6231 }
6232
6233 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6234 /* Any PHY wasn't find */
6235 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6236 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6237 sc->sc_phytype = WMPHY_NONE;
6238 } else {
6239 /*
6240 * PHY Found!
6241 * Check PHY type.
6242 */
6243 uint32_t model;
6244 struct mii_softc *child;
6245
6246 child = LIST_FIRST(&mii->mii_phys);
6247 if (device_is_a(child->mii_dev, "igphy")) {
6248 struct igphy_softc *isc = (struct igphy_softc *)child;
6249
6250 model = isc->sc_mii.mii_mpd_model;
6251 if (model == MII_MODEL_yyINTEL_I82566)
6252 sc->sc_phytype = WMPHY_IGP_3;
6253 }
6254
6255 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6256 }
6257 }
6258
6259 /*
6260 * wm_gmii_mediastatus: [ifmedia interface function]
6261 *
6262 * Get the current interface media status on a 1000BASE-T device.
6263 */
6264 static void
6265 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6266 {
6267 struct wm_softc *sc = ifp->if_softc;
6268
6269 ether_mediastatus(ifp, ifmr);
6270 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6271 | sc->sc_flowflags;
6272 }
6273
6274 /*
6275 * wm_gmii_mediachange: [ifmedia interface function]
6276 *
6277 * Set hardware to newly-selected media on a 1000BASE-T device.
6278 */
6279 static int
6280 wm_gmii_mediachange(struct ifnet *ifp)
6281 {
6282 struct wm_softc *sc = ifp->if_softc;
6283 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6284 int rc;
6285
6286 if ((ifp->if_flags & IFF_UP) == 0)
6287 return 0;
6288
6289 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6290 sc->sc_ctrl |= CTRL_SLU;
6291 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6292 || (sc->sc_type > WM_T_82543)) {
6293 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6294 } else {
6295 sc->sc_ctrl &= ~CTRL_ASDE;
6296 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6297 if (ife->ifm_media & IFM_FDX)
6298 sc->sc_ctrl |= CTRL_FD;
6299 switch (IFM_SUBTYPE(ife->ifm_media)) {
6300 case IFM_10_T:
6301 sc->sc_ctrl |= CTRL_SPEED_10;
6302 break;
6303 case IFM_100_TX:
6304 sc->sc_ctrl |= CTRL_SPEED_100;
6305 break;
6306 case IFM_1000_T:
6307 sc->sc_ctrl |= CTRL_SPEED_1000;
6308 break;
6309 default:
6310 panic("wm_gmii_mediachange: bad media 0x%x",
6311 ife->ifm_media);
6312 }
6313 }
6314 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6315 if (sc->sc_type <= WM_T_82543)
6316 wm_gmii_reset(sc);
6317
6318 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6319 return 0;
6320 return rc;
6321 }
6322
6323 #define MDI_IO CTRL_SWDPIN(2)
6324 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6325 #define MDI_CLK CTRL_SWDPIN(3)
6326
6327 static void
6328 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6329 {
6330 uint32_t i, v;
6331
6332 v = CSR_READ(sc, WMREG_CTRL);
6333 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6334 v |= MDI_DIR | CTRL_SWDPIO(3);
6335
6336 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6337 if (data & i)
6338 v |= MDI_IO;
6339 else
6340 v &= ~MDI_IO;
6341 CSR_WRITE(sc, WMREG_CTRL, v);
6342 CSR_WRITE_FLUSH(sc);
6343 delay(10);
6344 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6345 CSR_WRITE_FLUSH(sc);
6346 delay(10);
6347 CSR_WRITE(sc, WMREG_CTRL, v);
6348 CSR_WRITE_FLUSH(sc);
6349 delay(10);
6350 }
6351 }
6352
6353 static uint32_t
6354 wm_i82543_mii_recvbits(struct wm_softc *sc)
6355 {
6356 uint32_t v, i, data = 0;
6357
6358 v = CSR_READ(sc, WMREG_CTRL);
6359 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6360 v |= CTRL_SWDPIO(3);
6361
6362 CSR_WRITE(sc, WMREG_CTRL, v);
6363 CSR_WRITE_FLUSH(sc);
6364 delay(10);
6365 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6366 CSR_WRITE_FLUSH(sc);
6367 delay(10);
6368 CSR_WRITE(sc, WMREG_CTRL, v);
6369 CSR_WRITE_FLUSH(sc);
6370 delay(10);
6371
6372 for (i = 0; i < 16; i++) {
6373 data <<= 1;
6374 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6375 CSR_WRITE_FLUSH(sc);
6376 delay(10);
6377 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6378 data |= 1;
6379 CSR_WRITE(sc, WMREG_CTRL, v);
6380 CSR_WRITE_FLUSH(sc);
6381 delay(10);
6382 }
6383
6384 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6385 CSR_WRITE_FLUSH(sc);
6386 delay(10);
6387 CSR_WRITE(sc, WMREG_CTRL, v);
6388 CSR_WRITE_FLUSH(sc);
6389 delay(10);
6390
6391 return data;
6392 }
6393
6394 #undef MDI_IO
6395 #undef MDI_DIR
6396 #undef MDI_CLK
6397
6398 /*
6399 * wm_gmii_i82543_readreg: [mii interface function]
6400 *
6401 * Read a PHY register on the GMII (i82543 version).
6402 */
6403 static int
6404 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6405 {
6406 struct wm_softc *sc = device_private(self);
6407 int rv;
6408
6409 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6410 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6411 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6412 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6413
6414 DPRINTF(WM_DEBUG_GMII,
6415 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6416 device_xname(sc->sc_dev), phy, reg, rv));
6417
6418 return rv;
6419 }
6420
6421 /*
6422 * wm_gmii_i82543_writereg: [mii interface function]
6423 *
6424 * Write a PHY register on the GMII (i82543 version).
6425 */
6426 static void
6427 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6428 {
6429 struct wm_softc *sc = device_private(self);
6430
6431 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6432 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6433 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6434 (MII_COMMAND_START << 30), 32);
6435 }
6436
6437 /*
6438 * wm_gmii_i82544_readreg: [mii interface function]
6439 *
6440 * Read a PHY register on the GMII.
6441 */
6442 static int
6443 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6444 {
6445 struct wm_softc *sc = device_private(self);
6446 uint32_t mdic = 0;
6447 int i, rv;
6448
6449 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6450 MDIC_REGADD(reg));
6451
6452 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6453 mdic = CSR_READ(sc, WMREG_MDIC);
6454 if (mdic & MDIC_READY)
6455 break;
6456 delay(50);
6457 }
6458
6459 if ((mdic & MDIC_READY) == 0) {
6460 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6461 device_xname(sc->sc_dev), phy, reg);
6462 rv = 0;
6463 } else if (mdic & MDIC_E) {
6464 #if 0 /* This is normal if no PHY is present. */
6465 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6466 device_xname(sc->sc_dev), phy, reg);
6467 #endif
6468 rv = 0;
6469 } else {
6470 rv = MDIC_DATA(mdic);
6471 if (rv == 0xffff)
6472 rv = 0;
6473 }
6474
6475 return rv;
6476 }
6477
6478 /*
6479 * wm_gmii_i82544_writereg: [mii interface function]
6480 *
6481 * Write a PHY register on the GMII.
6482 */
6483 static void
6484 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6485 {
6486 struct wm_softc *sc = device_private(self);
6487 uint32_t mdic = 0;
6488 int i;
6489
6490 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6491 MDIC_REGADD(reg) | MDIC_DATA(val));
6492
6493 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6494 mdic = CSR_READ(sc, WMREG_MDIC);
6495 if (mdic & MDIC_READY)
6496 break;
6497 delay(50);
6498 }
6499
6500 if ((mdic & MDIC_READY) == 0)
6501 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6502 device_xname(sc->sc_dev), phy, reg);
6503 else if (mdic & MDIC_E)
6504 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6505 device_xname(sc->sc_dev), phy, reg);
6506 }
6507
6508 /*
6509 * wm_gmii_i80003_readreg: [mii interface function]
6510 *
6511 * Read a PHY register on the kumeran
6512 * This could be handled by the PHY layer if we didn't have to lock the
6513 * ressource ...
6514 */
6515 static int
6516 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6517 {
6518 struct wm_softc *sc = device_private(self);
6519 int sem;
6520 int rv;
6521
6522 if (phy != 1) /* only one PHY on kumeran bus */
6523 return 0;
6524
6525 sem = swfwphysem[sc->sc_funcid];
6526 if (wm_get_swfw_semaphore(sc, sem)) {
6527 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6528 __func__);
6529 return 0;
6530 }
6531
6532 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6533 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6534 reg >> GG82563_PAGE_SHIFT);
6535 } else {
6536 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6537 reg >> GG82563_PAGE_SHIFT);
6538 }
6539 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6540 delay(200);
6541 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6542 delay(200);
6543
6544 wm_put_swfw_semaphore(sc, sem);
6545 return rv;
6546 }
6547
6548 /*
6549 * wm_gmii_i80003_writereg: [mii interface function]
6550 *
6551 * Write a PHY register on the kumeran.
6552 * This could be handled by the PHY layer if we didn't have to lock the
6553 * ressource ...
6554 */
6555 static void
6556 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6557 {
6558 struct wm_softc *sc = device_private(self);
6559 int sem;
6560
6561 if (phy != 1) /* only one PHY on kumeran bus */
6562 return;
6563
6564 sem = swfwphysem[sc->sc_funcid];
6565 if (wm_get_swfw_semaphore(sc, sem)) {
6566 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6567 __func__);
6568 return;
6569 }
6570
6571 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6572 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6573 reg >> GG82563_PAGE_SHIFT);
6574 } else {
6575 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6576 reg >> GG82563_PAGE_SHIFT);
6577 }
6578 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6579 delay(200);
6580 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6581 delay(200);
6582
6583 wm_put_swfw_semaphore(sc, sem);
6584 }
6585
6586 /*
6587 * wm_gmii_bm_readreg: [mii interface function]
6588 *
6589 * Read a PHY register on the kumeran
6590 * This could be handled by the PHY layer if we didn't have to lock the
6591 * ressource ...
6592 */
6593 static int
6594 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6595 {
6596 struct wm_softc *sc = device_private(self);
6597 int sem;
6598 int rv;
6599
6600 sem = swfwphysem[sc->sc_funcid];
6601 if (wm_get_swfw_semaphore(sc, sem)) {
6602 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6603 __func__);
6604 return 0;
6605 }
6606
6607 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6608 if (phy == 1)
6609 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6610 reg);
6611 else
6612 wm_gmii_i82544_writereg(self, phy,
6613 GG82563_PHY_PAGE_SELECT,
6614 reg >> GG82563_PAGE_SHIFT);
6615 }
6616
6617 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6618 wm_put_swfw_semaphore(sc, sem);
6619 return rv;
6620 }
6621
6622 /*
6623 * wm_gmii_bm_writereg: [mii interface function]
6624 *
6625 * Write a PHY register on the kumeran.
6626 * This could be handled by the PHY layer if we didn't have to lock the
6627 * ressource ...
6628 */
6629 static void
6630 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6631 {
6632 struct wm_softc *sc = device_private(self);
6633 int sem;
6634
6635 sem = swfwphysem[sc->sc_funcid];
6636 if (wm_get_swfw_semaphore(sc, sem)) {
6637 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6638 __func__);
6639 return;
6640 }
6641
6642 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6643 if (phy == 1)
6644 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6645 reg);
6646 else
6647 wm_gmii_i82544_writereg(self, phy,
6648 GG82563_PHY_PAGE_SELECT,
6649 reg >> GG82563_PAGE_SHIFT);
6650 }
6651
6652 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6653 wm_put_swfw_semaphore(sc, sem);
6654 }
6655
6656 static void
6657 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6658 {
6659 struct wm_softc *sc = device_private(self);
6660 uint16_t regnum = BM_PHY_REG_NUM(offset);
6661 uint16_t wuce;
6662
6663 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6664 if (sc->sc_type == WM_T_PCH) {
6665 /* XXX e1000 driver do nothing... why? */
6666 }
6667
6668 /* Set page 769 */
6669 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6670 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6671
6672 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6673
6674 wuce &= ~BM_WUC_HOST_WU_BIT;
6675 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6676 wuce | BM_WUC_ENABLE_BIT);
6677
6678 /* Select page 800 */
6679 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6680 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6681
6682 /* Write page 800 */
6683 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6684
6685 if (rd)
6686 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6687 else
6688 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6689
6690 /* Set page 769 */
6691 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6692 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6693
6694 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6695 }
6696
6697 /*
6698 * wm_gmii_hv_readreg: [mii interface function]
6699 *
6700 * Read a PHY register on the kumeran
6701 * This could be handled by the PHY layer if we didn't have to lock the
6702 * ressource ...
6703 */
6704 static int
6705 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6706 {
6707 struct wm_softc *sc = device_private(self);
6708 uint16_t page = BM_PHY_REG_PAGE(reg);
6709 uint16_t regnum = BM_PHY_REG_NUM(reg);
6710 uint16_t val;
6711 int rv;
6712
6713 if (wm_get_swfwhw_semaphore(sc)) {
6714 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6715 __func__);
6716 return 0;
6717 }
6718
6719 /* XXX Workaround failure in MDIO access while cable is disconnected */
6720 if (sc->sc_phytype == WMPHY_82577) {
6721 /* XXX must write */
6722 }
6723
6724 /* Page 800 works differently than the rest so it has its own func */
6725 if (page == BM_WUC_PAGE) {
6726 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6727 return val;
6728 }
6729
6730 /*
6731 * Lower than page 768 works differently than the rest so it has its
6732 * own func
6733 */
6734 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6735 printf("gmii_hv_readreg!!!\n");
6736 return 0;
6737 }
6738
6739 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6740 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6741 page << BME1000_PAGE_SHIFT);
6742 }
6743
6744 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6745 wm_put_swfwhw_semaphore(sc);
6746 return rv;
6747 }
6748
6749 /*
6750 * wm_gmii_hv_writereg: [mii interface function]
6751 *
6752 * Write a PHY register on the kumeran.
6753 * This could be handled by the PHY layer if we didn't have to lock the
6754 * ressource ...
6755 */
6756 static void
6757 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6758 {
6759 struct wm_softc *sc = device_private(self);
6760 uint16_t page = BM_PHY_REG_PAGE(reg);
6761 uint16_t regnum = BM_PHY_REG_NUM(reg);
6762
6763 if (wm_get_swfwhw_semaphore(sc)) {
6764 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6765 __func__);
6766 return;
6767 }
6768
6769 /* XXX Workaround failure in MDIO access while cable is disconnected */
6770
6771 /* Page 800 works differently than the rest so it has its own func */
6772 if (page == BM_WUC_PAGE) {
6773 uint16_t tmp;
6774
6775 tmp = val;
6776 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6777 return;
6778 }
6779
6780 /*
6781 * Lower than page 768 works differently than the rest so it has its
6782 * own func
6783 */
6784 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6785 printf("gmii_hv_writereg!!!\n");
6786 return;
6787 }
6788
6789 /*
6790 * XXX Workaround MDIO accesses being disabled after entering IEEE
6791 * Power Down (whenever bit 11 of the PHY control register is set)
6792 */
6793
6794 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6795 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6796 page << BME1000_PAGE_SHIFT);
6797 }
6798
6799 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6800 wm_put_swfwhw_semaphore(sc);
6801 }
6802
6803 /*
6804 * wm_gmii_82580_readreg: [mii interface function]
6805 *
6806 * Read a PHY register on the 82580 and I350.
6807 * This could be handled by the PHY layer if we didn't have to lock the
6808 * ressource ...
6809 */
6810 static int
6811 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6812 {
6813 struct wm_softc *sc = device_private(self);
6814 int sem;
6815 int rv;
6816
6817 sem = swfwphysem[sc->sc_funcid];
6818 if (wm_get_swfw_semaphore(sc, sem)) {
6819 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6820 __func__);
6821 return 0;
6822 }
6823
6824 rv = wm_gmii_i82544_readreg(self, phy, reg);
6825
6826 wm_put_swfw_semaphore(sc, sem);
6827 return rv;
6828 }
6829
6830 /*
6831 * wm_gmii_82580_writereg: [mii interface function]
6832 *
6833 * Write a PHY register on the 82580 and I350.
6834 * This could be handled by the PHY layer if we didn't have to lock the
6835 * ressource ...
6836 */
6837 static void
6838 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6839 {
6840 struct wm_softc *sc = device_private(self);
6841 int sem;
6842
6843 sem = swfwphysem[sc->sc_funcid];
6844 if (wm_get_swfw_semaphore(sc, sem)) {
6845 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6846 __func__);
6847 return;
6848 }
6849
6850 wm_gmii_i82544_writereg(self, phy, reg, val);
6851
6852 wm_put_swfw_semaphore(sc, sem);
6853 }
6854
6855 /*
6856 * wm_gmii_statchg: [mii interface function]
6857 *
6858 * Callback from MII layer when media changes.
6859 */
6860 static void
6861 wm_gmii_statchg(struct ifnet *ifp)
6862 {
6863 struct wm_softc *sc = ifp->if_softc;
6864 struct mii_data *mii = &sc->sc_mii;
6865
6866 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6867 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6868 sc->sc_fcrtl &= ~FCRTL_XONE;
6869
6870 /*
6871 * Get flow control negotiation result.
6872 */
6873 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6874 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6875 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6876 mii->mii_media_active &= ~IFM_ETH_FMASK;
6877 }
6878
6879 if (sc->sc_flowflags & IFM_FLOW) {
6880 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6881 sc->sc_ctrl |= CTRL_TFCE;
6882 sc->sc_fcrtl |= FCRTL_XONE;
6883 }
6884 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6885 sc->sc_ctrl |= CTRL_RFCE;
6886 }
6887
6888 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6889 DPRINTF(WM_DEBUG_LINK,
6890 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
6891 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6892 } else {
6893 DPRINTF(WM_DEBUG_LINK,
6894 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
6895 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6896 }
6897
6898 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6899 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6900 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6901 : WMREG_FCRTL, sc->sc_fcrtl);
6902 if (sc->sc_type == WM_T_80003) {
6903 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6904 case IFM_1000_T:
6905 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6906 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6907 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6908 break;
6909 default:
6910 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6911 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6912 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6913 break;
6914 }
6915 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6916 }
6917 }
6918
6919 /*
6920 * wm_kmrn_readreg:
6921 *
6922 * Read a kumeran register
6923 */
6924 static int
6925 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6926 {
6927 int rv;
6928
6929 if (sc->sc_flags == WM_F_LOCK_SWFW) {
6930 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6931 aprint_error_dev(sc->sc_dev,
6932 "%s: failed to get semaphore\n", __func__);
6933 return 0;
6934 }
6935 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
6936 if (wm_get_swfwhw_semaphore(sc)) {
6937 aprint_error_dev(sc->sc_dev,
6938 "%s: failed to get semaphore\n", __func__);
6939 return 0;
6940 }
6941 }
6942
6943 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6944 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6945 KUMCTRLSTA_REN);
6946 CSR_WRITE_FLUSH(sc);
6947 delay(2);
6948
6949 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6950
6951 if (sc->sc_flags == WM_F_LOCK_SWFW)
6952 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6953 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
6954 wm_put_swfwhw_semaphore(sc);
6955
6956 return rv;
6957 }
6958
6959 /*
6960 * wm_kmrn_writereg:
6961 *
6962 * Write a kumeran register
6963 */
6964 static void
6965 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6966 {
6967
6968 if (sc->sc_flags == WM_F_LOCK_SWFW) {
6969 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6970 aprint_error_dev(sc->sc_dev,
6971 "%s: failed to get semaphore\n", __func__);
6972 return;
6973 }
6974 } else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
6975 if (wm_get_swfwhw_semaphore(sc)) {
6976 aprint_error_dev(sc->sc_dev,
6977 "%s: failed to get semaphore\n", __func__);
6978 return;
6979 }
6980 }
6981
6982 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6983 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6984 (val & KUMCTRLSTA_MASK));
6985
6986 if (sc->sc_flags == WM_F_LOCK_SWFW)
6987 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6988 else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
6989 wm_put_swfwhw_semaphore(sc);
6990 }
6991
6992 /* SGMII related */
6993
6994 /*
6995 * wm_sgmii_uses_mdio
6996 *
6997 * Check whether the transaction is to the internal PHY or the external
6998 * MDIO interface. Return true if it's MDIO.
6999 */
7000 static bool
7001 wm_sgmii_uses_mdio(struct wm_softc *sc)
7002 {
7003 uint32_t reg;
7004 bool ismdio = false;
7005
7006 switch (sc->sc_type) {
7007 case WM_T_82575:
7008 case WM_T_82576:
7009 reg = CSR_READ(sc, WMREG_MDIC);
7010 ismdio = ((reg & MDIC_DEST) != 0);
7011 break;
7012 case WM_T_82580:
7013 case WM_T_82580ER:
7014 case WM_T_I350:
7015 case WM_T_I354:
7016 case WM_T_I210:
7017 case WM_T_I211:
7018 reg = CSR_READ(sc, WMREG_MDICNFG);
7019 ismdio = ((reg & MDICNFG_DEST) != 0);
7020 break;
7021 default:
7022 break;
7023 }
7024
7025 return ismdio;
7026 }
7027
7028 /*
7029 * wm_sgmii_readreg: [mii interface function]
7030 *
7031 * Read a PHY register on the SGMII
7032 * This could be handled by the PHY layer if we didn't have to lock the
7033 * ressource ...
7034 */
7035 static int
7036 wm_sgmii_readreg(device_t self, int phy, int reg)
7037 {
7038 struct wm_softc *sc = device_private(self);
7039 uint32_t i2ccmd;
7040 int i, rv;
7041
7042 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7043 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7044 __func__);
7045 return 0;
7046 }
7047
7048 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7049 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7050 | I2CCMD_OPCODE_READ;
7051 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7052
7053 /* Poll the ready bit */
7054 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7055 delay(50);
7056 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7057 if (i2ccmd & I2CCMD_READY)
7058 break;
7059 }
7060 if ((i2ccmd & I2CCMD_READY) == 0)
7061 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7062 if ((i2ccmd & I2CCMD_ERROR) != 0)
7063 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7064
7065 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7066
7067 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7068 return rv;
7069 }
7070
7071 /*
7072 * wm_sgmii_writereg: [mii interface function]
7073 *
7074 * Write a PHY register on the SGMII.
7075 * This could be handled by the PHY layer if we didn't have to lock the
7076 * ressource ...
7077 */
7078 static void
7079 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7080 {
7081 struct wm_softc *sc = device_private(self);
7082 uint32_t i2ccmd;
7083 int i;
7084
7085 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7086 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7087 __func__);
7088 return;
7089 }
7090
7091 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7092 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7093 | I2CCMD_OPCODE_WRITE;
7094 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7095
7096 /* Poll the ready bit */
7097 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7098 delay(50);
7099 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7100 if (i2ccmd & I2CCMD_READY)
7101 break;
7102 }
7103 if ((i2ccmd & I2CCMD_READY) == 0)
7104 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7105 if ((i2ccmd & I2CCMD_ERROR) != 0)
7106 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7107
7108 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7109 }
7110
7111 /* TBI related */
7112
7113 /* XXX Currently TBI only */
7114 static int
7115 wm_check_for_link(struct wm_softc *sc)
7116 {
7117 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7118 uint32_t rxcw;
7119 uint32_t ctrl;
7120 uint32_t status;
7121 uint32_t sig;
7122
7123 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
7124 sc->sc_tbi_linkup = 1;
7125 return 0;
7126 }
7127
7128 rxcw = CSR_READ(sc, WMREG_RXCW);
7129 ctrl = CSR_READ(sc, WMREG_CTRL);
7130 status = CSR_READ(sc, WMREG_STATUS);
7131
7132 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7133
7134 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7135 device_xname(sc->sc_dev), __func__,
7136 ((ctrl & CTRL_SWDPIN(1)) == sig),
7137 ((status & STATUS_LU) != 0),
7138 ((rxcw & RXCW_C) != 0)
7139 ));
7140
7141 /*
7142 * SWDPIN LU RXCW
7143 * 0 0 0
7144 * 0 0 1 (should not happen)
7145 * 0 1 0 (should not happen)
7146 * 0 1 1 (should not happen)
7147 * 1 0 0 Disable autonego and force linkup
7148 * 1 0 1 got /C/ but not linkup yet
7149 * 1 1 0 (linkup)
7150 * 1 1 1 If IFM_AUTO, back to autonego
7151 *
7152 */
7153 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7154 && ((status & STATUS_LU) == 0)
7155 && ((rxcw & RXCW_C) == 0)) {
7156 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7157 __func__));
7158 sc->sc_tbi_linkup = 0;
7159 /* Disable auto-negotiation in the TXCW register */
7160 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7161
7162 /*
7163 * Force link-up and also force full-duplex.
7164 *
7165 * NOTE: CTRL was updated TFCE and RFCE automatically,
7166 * so we should update sc->sc_ctrl
7167 */
7168 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7169 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7170 } else if (((status & STATUS_LU) != 0)
7171 && ((rxcw & RXCW_C) != 0)
7172 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7173 sc->sc_tbi_linkup = 1;
7174 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7175 __func__));
7176 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7177 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7178 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7179 && ((rxcw & RXCW_C) != 0)) {
7180 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7181 } else {
7182 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7183 status));
7184 }
7185
7186 return 0;
7187 }
7188
7189 /*
7190 * wm_tbi_mediainit:
7191 *
7192 * Initialize media for use on 1000BASE-X devices.
7193 */
7194 static void
7195 wm_tbi_mediainit(struct wm_softc *sc)
7196 {
7197 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7198 const char *sep = "";
7199
7200 if (sc->sc_type < WM_T_82543)
7201 sc->sc_tipg = TIPG_WM_DFLT;
7202 else
7203 sc->sc_tipg = TIPG_LG_DFLT;
7204
7205 sc->sc_tbi_anegticks = 5;
7206
7207 /* Initialize our media structures */
7208 sc->sc_mii.mii_ifp = ifp;
7209
7210 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7211 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7212 wm_tbi_mediastatus);
7213
7214 /*
7215 * SWD Pins:
7216 *
7217 * 0 = Link LED (output)
7218 * 1 = Loss Of Signal (input)
7219 */
7220 sc->sc_ctrl |= CTRL_SWDPIO(0);
7221 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7222 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
7223 sc->sc_ctrl &= ~CTRL_LRST;
7224
7225 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7226
7227 #define ADD(ss, mm, dd) \
7228 do { \
7229 aprint_normal("%s%s", sep, ss); \
7230 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
7231 sep = ", "; \
7232 } while (/*CONSTCOND*/0)
7233
7234 aprint_normal_dev(sc->sc_dev, "");
7235 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7236 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7237 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7238 aprint_normal("\n");
7239
7240 #undef ADD
7241
7242 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7243 }
7244
7245 /*
7246 * wm_tbi_mediastatus: [ifmedia interface function]
7247 *
7248 * Get the current interface media status on a 1000BASE-X device.
7249 */
7250 static void
7251 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7252 {
7253 struct wm_softc *sc = ifp->if_softc;
7254 uint32_t ctrl, status;
7255
7256 ifmr->ifm_status = IFM_AVALID;
7257 ifmr->ifm_active = IFM_ETHER;
7258
7259 status = CSR_READ(sc, WMREG_STATUS);
7260 if ((status & STATUS_LU) == 0) {
7261 ifmr->ifm_active |= IFM_NONE;
7262 return;
7263 }
7264
7265 ifmr->ifm_status |= IFM_ACTIVE;
7266 ifmr->ifm_active |= IFM_1000_SX;
7267 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7268 ifmr->ifm_active |= IFM_FDX;
7269 else
7270 ifmr->ifm_active |= IFM_HDX;
7271 ctrl = CSR_READ(sc, WMREG_CTRL);
7272 if (ctrl & CTRL_RFCE)
7273 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7274 if (ctrl & CTRL_TFCE)
7275 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7276 }
7277
7278 /*
7279 * wm_tbi_mediachange: [ifmedia interface function]
7280 *
7281 * Set hardware to newly-selected media on a 1000BASE-X device.
7282 */
7283 static int
7284 wm_tbi_mediachange(struct ifnet *ifp)
7285 {
7286 struct wm_softc *sc = ifp->if_softc;
7287 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7288 uint32_t status;
7289 int i;
7290
7291 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
7292 return 0;
7293
7294 sc->sc_txcw = 0;
7295 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
7296 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7297 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7298 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7299 sc->sc_txcw |= TXCW_ANE;
7300 } else {
7301 /*
7302 * If autonegotiation is turned off, force link up and turn on
7303 * full duplex
7304 */
7305 sc->sc_txcw &= ~TXCW_ANE;
7306 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
7307 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7308 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7309 CSR_WRITE_FLUSH(sc);
7310 delay(1000);
7311 }
7312
7313 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7314 device_xname(sc->sc_dev),sc->sc_txcw));
7315 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7316 CSR_WRITE_FLUSH(sc);
7317 delay(10000);
7318
7319 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7320 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7321
7322 /*
7323 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7324 * optics detect a signal, 0 if they don't.
7325 */
7326 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7327 /* Have signal; wait for the link to come up. */
7328
7329 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7330 /*
7331 * Reset the link, and let autonegotiation do its thing
7332 */
7333 sc->sc_ctrl |= CTRL_LRST;
7334 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7335 CSR_WRITE_FLUSH(sc);
7336 delay(1000);
7337 sc->sc_ctrl &= ~CTRL_LRST;
7338 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7339 CSR_WRITE_FLUSH(sc);
7340 delay(1000);
7341 }
7342
7343 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7344 delay(10000);
7345 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7346 break;
7347 }
7348
7349 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7350 device_xname(sc->sc_dev),i));
7351
7352 status = CSR_READ(sc, WMREG_STATUS);
7353 DPRINTF(WM_DEBUG_LINK,
7354 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7355 device_xname(sc->sc_dev),status, STATUS_LU));
7356 if (status & STATUS_LU) {
7357 /* Link is up. */
7358 DPRINTF(WM_DEBUG_LINK,
7359 ("%s: LINK: set media -> link up %s\n",
7360 device_xname(sc->sc_dev),
7361 (status & STATUS_FD) ? "FDX" : "HDX"));
7362
7363 /*
7364 * NOTE: CTRL will update TFCE and RFCE automatically,
7365 * so we should update sc->sc_ctrl
7366 */
7367 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7368 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7369 sc->sc_fcrtl &= ~FCRTL_XONE;
7370 if (status & STATUS_FD)
7371 sc->sc_tctl |=
7372 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7373 else
7374 sc->sc_tctl |=
7375 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7376 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7377 sc->sc_fcrtl |= FCRTL_XONE;
7378 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7379 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7380 WMREG_OLD_FCRTL : WMREG_FCRTL,
7381 sc->sc_fcrtl);
7382 sc->sc_tbi_linkup = 1;
7383 } else {
7384 if (i == WM_LINKUP_TIMEOUT)
7385 wm_check_for_link(sc);
7386 /* Link is down. */
7387 DPRINTF(WM_DEBUG_LINK,
7388 ("%s: LINK: set media -> link down\n",
7389 device_xname(sc->sc_dev)));
7390 sc->sc_tbi_linkup = 0;
7391 }
7392 } else {
7393 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7394 device_xname(sc->sc_dev)));
7395 sc->sc_tbi_linkup = 0;
7396 }
7397
7398 wm_tbi_set_linkled(sc);
7399
7400 return 0;
7401 }
7402
7403 /*
7404 * wm_tbi_set_linkled:
7405 *
7406 * Update the link LED on 1000BASE-X devices.
7407 */
7408 static void
7409 wm_tbi_set_linkled(struct wm_softc *sc)
7410 {
7411
7412 if (sc->sc_tbi_linkup)
7413 sc->sc_ctrl |= CTRL_SWDPIN(0);
7414 else
7415 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7416
7417 /* 82540 or newer devices are active low */
7418 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7419
7420 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7421 }
7422
7423 /*
7424 * wm_tbi_check_link:
7425 *
7426 * Check the link on 1000BASE-X devices.
7427 */
7428 static void
7429 wm_tbi_check_link(struct wm_softc *sc)
7430 {
7431 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7432 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7433 uint32_t status;
7434
7435 KASSERT(WM_LOCKED(sc));
7436
7437 if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
7438 sc->sc_tbi_linkup = 1;
7439 return;
7440 }
7441
7442 status = CSR_READ(sc, WMREG_STATUS);
7443
7444 /* XXX is this needed? */
7445 (void)CSR_READ(sc, WMREG_RXCW);
7446 (void)CSR_READ(sc, WMREG_CTRL);
7447
7448 /* set link status */
7449 if ((status & STATUS_LU) == 0) {
7450 DPRINTF(WM_DEBUG_LINK,
7451 ("%s: LINK: checklink -> down\n",
7452 device_xname(sc->sc_dev)));
7453 sc->sc_tbi_linkup = 0;
7454 } else if (sc->sc_tbi_linkup == 0) {
7455 DPRINTF(WM_DEBUG_LINK,
7456 ("%s: LINK: checklink -> up %s\n",
7457 device_xname(sc->sc_dev),
7458 (status & STATUS_FD) ? "FDX" : "HDX"));
7459 sc->sc_tbi_linkup = 1;
7460 }
7461
7462 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7463 && ((status & STATUS_LU) == 0)) {
7464 sc->sc_tbi_linkup = 0;
7465 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
7466 /* RXCFG storm! */
7467 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
7468 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
7469 wm_init_locked(ifp);
7470 WM_UNLOCK(sc);
7471 ifp->if_start(ifp);
7472 WM_LOCK(sc);
7473 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7474 /* If the timer expired, retry autonegotiation */
7475 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7476 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7477 sc->sc_tbi_ticks = 0;
7478 /*
7479 * Reset the link, and let autonegotiation do
7480 * its thing
7481 */
7482 sc->sc_ctrl |= CTRL_LRST;
7483 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7484 CSR_WRITE_FLUSH(sc);
7485 delay(1000);
7486 sc->sc_ctrl &= ~CTRL_LRST;
7487 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7488 CSR_WRITE_FLUSH(sc);
7489 delay(1000);
7490 CSR_WRITE(sc, WMREG_TXCW,
7491 sc->sc_txcw & ~TXCW_ANE);
7492 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7493 }
7494 }
7495 }
7496
7497 wm_tbi_set_linkled(sc);
7498 }
7499
7500 /*
7501 * NVM related.
7502 * Microwire, SPI (w/wo EERD) and Flash.
7503 */
7504
7505 /* Both spi and uwire */
7506
7507 /*
7508 * wm_eeprom_sendbits:
7509 *
7510 * Send a series of bits to the EEPROM.
7511 */
7512 static void
7513 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7514 {
7515 uint32_t reg;
7516 int x;
7517
7518 reg = CSR_READ(sc, WMREG_EECD);
7519
7520 for (x = nbits; x > 0; x--) {
7521 if (bits & (1U << (x - 1)))
7522 reg |= EECD_DI;
7523 else
7524 reg &= ~EECD_DI;
7525 CSR_WRITE(sc, WMREG_EECD, reg);
7526 CSR_WRITE_FLUSH(sc);
7527 delay(2);
7528 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7529 CSR_WRITE_FLUSH(sc);
7530 delay(2);
7531 CSR_WRITE(sc, WMREG_EECD, reg);
7532 CSR_WRITE_FLUSH(sc);
7533 delay(2);
7534 }
7535 }
7536
7537 /*
7538 * wm_eeprom_recvbits:
7539 *
7540 * Receive a series of bits from the EEPROM.
7541 */
7542 static void
7543 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7544 {
7545 uint32_t reg, val;
7546 int x;
7547
7548 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7549
7550 val = 0;
7551 for (x = nbits; x > 0; x--) {
7552 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7553 CSR_WRITE_FLUSH(sc);
7554 delay(2);
7555 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7556 val |= (1U << (x - 1));
7557 CSR_WRITE(sc, WMREG_EECD, reg);
7558 CSR_WRITE_FLUSH(sc);
7559 delay(2);
7560 }
7561 *valp = val;
7562 }
7563
7564 /* Microwire */
7565
7566 /*
7567 * wm_nvm_read_uwire:
7568 *
7569 * Read a word from the EEPROM using the MicroWire protocol.
7570 */
7571 static int
7572 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7573 {
7574 uint32_t reg, val;
7575 int i;
7576
7577 for (i = 0; i < wordcnt; i++) {
7578 /* Clear SK and DI. */
7579 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7580 CSR_WRITE(sc, WMREG_EECD, reg);
7581
7582 /*
7583 * XXX: workaround for a bug in qemu-0.12.x and prior
7584 * and Xen.
7585 *
7586 * We use this workaround only for 82540 because qemu's
7587 * e1000 act as 82540.
7588 */
7589 if (sc->sc_type == WM_T_82540) {
7590 reg |= EECD_SK;
7591 CSR_WRITE(sc, WMREG_EECD, reg);
7592 reg &= ~EECD_SK;
7593 CSR_WRITE(sc, WMREG_EECD, reg);
7594 CSR_WRITE_FLUSH(sc);
7595 delay(2);
7596 }
7597 /* XXX: end of workaround */
7598
7599 /* Set CHIP SELECT. */
7600 reg |= EECD_CS;
7601 CSR_WRITE(sc, WMREG_EECD, reg);
7602 CSR_WRITE_FLUSH(sc);
7603 delay(2);
7604
7605 /* Shift in the READ command. */
7606 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7607
7608 /* Shift in address. */
7609 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
7610
7611 /* Shift out the data. */
7612 wm_eeprom_recvbits(sc, &val, 16);
7613 data[i] = val & 0xffff;
7614
7615 /* Clear CHIP SELECT. */
7616 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7617 CSR_WRITE(sc, WMREG_EECD, reg);
7618 CSR_WRITE_FLUSH(sc);
7619 delay(2);
7620 }
7621
7622 return 0;
7623 }
7624
7625 /* SPI */
7626
7627 /* Set SPI related information */
7628 static void
7629 wm_set_spiaddrbits(struct wm_softc *sc)
7630 {
7631 uint32_t reg;
7632
7633 sc->sc_flags |= WM_F_EEPROM_SPI;
7634 reg = CSR_READ(sc, WMREG_EECD);
7635 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7636 }
7637
7638 /*
7639 * wm_nvm_ready_spi:
7640 *
7641 * Wait for a SPI EEPROM to be ready for commands.
7642 */
7643 static int
7644 wm_nvm_ready_spi(struct wm_softc *sc)
7645 {
7646 uint32_t val;
7647 int usec;
7648
7649 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7650 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7651 wm_eeprom_recvbits(sc, &val, 8);
7652 if ((val & SPI_SR_RDY) == 0)
7653 break;
7654 }
7655 if (usec >= SPI_MAX_RETRIES) {
7656 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7657 return 1;
7658 }
7659 return 0;
7660 }
7661
7662 /*
7663 * wm_nvm_read_spi:
7664 *
7665 * Read a work from the EEPROM using the SPI protocol.
7666 */
7667 static int
7668 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7669 {
7670 uint32_t reg, val;
7671 int i;
7672 uint8_t opc;
7673
7674 /* Clear SK and CS. */
7675 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7676 CSR_WRITE(sc, WMREG_EECD, reg);
7677 CSR_WRITE_FLUSH(sc);
7678 delay(2);
7679
7680 if (wm_nvm_ready_spi(sc))
7681 return 1;
7682
7683 /* Toggle CS to flush commands. */
7684 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7685 CSR_WRITE_FLUSH(sc);
7686 delay(2);
7687 CSR_WRITE(sc, WMREG_EECD, reg);
7688 CSR_WRITE_FLUSH(sc);
7689 delay(2);
7690
7691 opc = SPI_OPC_READ;
7692 if (sc->sc_ee_addrbits == 8 && word >= 128)
7693 opc |= SPI_OPC_A8;
7694
7695 wm_eeprom_sendbits(sc, opc, 8);
7696 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
7697
7698 for (i = 0; i < wordcnt; i++) {
7699 wm_eeprom_recvbits(sc, &val, 16);
7700 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7701 }
7702
7703 /* Raise CS and clear SK. */
7704 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7705 CSR_WRITE(sc, WMREG_EECD, reg);
7706 CSR_WRITE_FLUSH(sc);
7707 delay(2);
7708
7709 return 0;
7710 }
7711
7712 /* Using with EERD */
7713
7714 static int
7715 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7716 {
7717 uint32_t attempts = 100000;
7718 uint32_t i, reg = 0;
7719 int32_t done = -1;
7720
7721 for (i = 0; i < attempts; i++) {
7722 reg = CSR_READ(sc, rw);
7723
7724 if (reg & EERD_DONE) {
7725 done = 0;
7726 break;
7727 }
7728 delay(5);
7729 }
7730
7731 return done;
7732 }
7733
7734 static int
7735 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7736 uint16_t *data)
7737 {
7738 int i, eerd = 0;
7739 int error = 0;
7740
7741 for (i = 0; i < wordcnt; i++) {
7742 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7743
7744 CSR_WRITE(sc, WMREG_EERD, eerd);
7745 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
7746 if (error != 0)
7747 break;
7748
7749 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
7750 }
7751
7752 return error;
7753 }
7754
7755 /* Flash */
7756
7757 static int
7758 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7759 {
7760 uint32_t eecd;
7761 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7762 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7763 uint8_t sig_byte = 0;
7764
7765 switch (sc->sc_type) {
7766 case WM_T_ICH8:
7767 case WM_T_ICH9:
7768 eecd = CSR_READ(sc, WMREG_EECD);
7769 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7770 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7771 return 0;
7772 }
7773 /* FALLTHROUGH */
7774 default:
7775 /* Default to 0 */
7776 *bank = 0;
7777
7778 /* Check bank 0 */
7779 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7780 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7781 *bank = 0;
7782 return 0;
7783 }
7784
7785 /* Check bank 1 */
7786 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7787 &sig_byte);
7788 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7789 *bank = 1;
7790 return 0;
7791 }
7792 }
7793
7794 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7795 device_xname(sc->sc_dev)));
7796 return -1;
7797 }
7798
7799 /******************************************************************************
7800 * This function does initial flash setup so that a new read/write/erase cycle
7801 * can be started.
7802 *
7803 * sc - The pointer to the hw structure
7804 ****************************************************************************/
7805 static int32_t
7806 wm_ich8_cycle_init(struct wm_softc *sc)
7807 {
7808 uint16_t hsfsts;
7809 int32_t error = 1;
7810 int32_t i = 0;
7811
7812 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7813
7814 /* May be check the Flash Des Valid bit in Hw status */
7815 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7816 return error;
7817 }
7818
7819 /* Clear FCERR in Hw status by writing 1 */
7820 /* Clear DAEL in Hw status by writing a 1 */
7821 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7822
7823 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7824
7825 /*
7826 * Either we should have a hardware SPI cycle in progress bit to check
7827 * against, in order to start a new cycle or FDONE bit should be
7828 * changed in the hardware so that it is 1 after harware reset, which
7829 * can then be used as an indication whether a cycle is in progress or
7830 * has been completed .. we should also have some software semaphore
7831 * mechanism to guard FDONE or the cycle in progress bit so that two
7832 * threads access to those bits can be sequentiallized or a way so that
7833 * 2 threads dont start the cycle at the same time
7834 */
7835
7836 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7837 /*
7838 * There is no cycle running at present, so we can start a
7839 * cycle
7840 */
7841
7842 /* Begin by setting Flash Cycle Done. */
7843 hsfsts |= HSFSTS_DONE;
7844 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7845 error = 0;
7846 } else {
7847 /*
7848 * otherwise poll for sometime so the current cycle has a
7849 * chance to end before giving up.
7850 */
7851 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7852 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7853 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7854 error = 0;
7855 break;
7856 }
7857 delay(1);
7858 }
7859 if (error == 0) {
7860 /*
7861 * Successful in waiting for previous cycle to timeout,
7862 * now set the Flash Cycle Done.
7863 */
7864 hsfsts |= HSFSTS_DONE;
7865 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7866 }
7867 }
7868 return error;
7869 }
7870
7871 /******************************************************************************
7872 * This function starts a flash cycle and waits for its completion
7873 *
7874 * sc - The pointer to the hw structure
7875 ****************************************************************************/
7876 static int32_t
7877 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7878 {
7879 uint16_t hsflctl;
7880 uint16_t hsfsts;
7881 int32_t error = 1;
7882 uint32_t i = 0;
7883
7884 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7885 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7886 hsflctl |= HSFCTL_GO;
7887 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7888
7889 /* Wait till FDONE bit is set to 1 */
7890 do {
7891 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7892 if (hsfsts & HSFSTS_DONE)
7893 break;
7894 delay(1);
7895 i++;
7896 } while (i < timeout);
7897 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7898 error = 0;
7899
7900 return error;
7901 }
7902
7903 /******************************************************************************
7904 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7905 *
7906 * sc - The pointer to the hw structure
7907 * index - The index of the byte or word to read.
7908 * size - Size of data to read, 1=byte 2=word
7909 * data - Pointer to the word to store the value read.
7910 *****************************************************************************/
7911 static int32_t
7912 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7913 uint32_t size, uint16_t *data)
7914 {
7915 uint16_t hsfsts;
7916 uint16_t hsflctl;
7917 uint32_t flash_linear_address;
7918 uint32_t flash_data = 0;
7919 int32_t error = 1;
7920 int32_t count = 0;
7921
7922 if (size < 1 || size > 2 || data == 0x0 ||
7923 index > ICH_FLASH_LINEAR_ADDR_MASK)
7924 return error;
7925
7926 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7927 sc->sc_ich8_flash_base;
7928
7929 do {
7930 delay(1);
7931 /* Steps */
7932 error = wm_ich8_cycle_init(sc);
7933 if (error)
7934 break;
7935
7936 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7937 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7938 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7939 & HSFCTL_BCOUNT_MASK;
7940 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7941 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7942
7943 /*
7944 * Write the last 24 bits of index into Flash Linear address
7945 * field in Flash Address
7946 */
7947 /* TODO: TBD maybe check the index against the size of flash */
7948
7949 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7950
7951 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7952
7953 /*
7954 * Check if FCERR is set to 1, if set to 1, clear it and try
7955 * the whole sequence a few more times, else read in (shift in)
7956 * the Flash Data0, the order is least significant byte first
7957 * msb to lsb
7958 */
7959 if (error == 0) {
7960 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7961 if (size == 1)
7962 *data = (uint8_t)(flash_data & 0x000000FF);
7963 else if (size == 2)
7964 *data = (uint16_t)(flash_data & 0x0000FFFF);
7965 break;
7966 } else {
7967 /*
7968 * If we've gotten here, then things are probably
7969 * completely hosed, but if the error condition is
7970 * detected, it won't hurt to give it another try...
7971 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7972 */
7973 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7974 if (hsfsts & HSFSTS_ERR) {
7975 /* Repeat for some time before giving up. */
7976 continue;
7977 } else if ((hsfsts & HSFSTS_DONE) == 0)
7978 break;
7979 }
7980 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7981
7982 return error;
7983 }
7984
7985 /******************************************************************************
7986 * Reads a single byte from the NVM using the ICH8 flash access registers.
7987 *
7988 * sc - pointer to wm_hw structure
7989 * index - The index of the byte to read.
7990 * data - Pointer to a byte to store the value read.
7991 *****************************************************************************/
7992 static int32_t
7993 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7994 {
7995 int32_t status;
7996 uint16_t word = 0;
7997
7998 status = wm_read_ich8_data(sc, index, 1, &word);
7999 if (status == 0)
8000 *data = (uint8_t)word;
8001 else
8002 *data = 0;
8003
8004 return status;
8005 }
8006
8007 /******************************************************************************
8008 * Reads a word from the NVM using the ICH8 flash access registers.
8009 *
8010 * sc - pointer to wm_hw structure
8011 * index - The starting byte index of the word to read.
8012 * data - Pointer to a word to store the value read.
8013 *****************************************************************************/
8014 static int32_t
8015 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8016 {
8017 int32_t status;
8018
8019 status = wm_read_ich8_data(sc, index, 2, data);
8020 return status;
8021 }
8022
8023 /******************************************************************************
8024 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8025 * register.
8026 *
8027 * sc - Struct containing variables accessed by shared code
8028 * offset - offset of word in the EEPROM to read
8029 * data - word read from the EEPROM
8030 * words - number of words to read
8031 *****************************************************************************/
8032 static int
8033 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8034 {
8035 int32_t error = 0;
8036 uint32_t flash_bank = 0;
8037 uint32_t act_offset = 0;
8038 uint32_t bank_offset = 0;
8039 uint16_t word = 0;
8040 uint16_t i = 0;
8041
8042 /*
8043 * We need to know which is the valid flash bank. In the event
8044 * that we didn't allocate eeprom_shadow_ram, we may not be
8045 * managing flash_bank. So it cannot be trusted and needs
8046 * to be updated with each read.
8047 */
8048 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8049 if (error) {
8050 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
8051 __func__);
8052 flash_bank = 0;
8053 }
8054
8055 /*
8056 * Adjust offset appropriately if we're on bank 1 - adjust for word
8057 * size
8058 */
8059 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8060
8061 error = wm_get_swfwhw_semaphore(sc);
8062 if (error) {
8063 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8064 __func__);
8065 return error;
8066 }
8067
8068 for (i = 0; i < words; i++) {
8069 /* The NVM part needs a byte offset, hence * 2 */
8070 act_offset = bank_offset + ((offset + i) * 2);
8071 error = wm_read_ich8_word(sc, act_offset, &word);
8072 if (error) {
8073 aprint_error_dev(sc->sc_dev,
8074 "%s: failed to read NVM\n", __func__);
8075 break;
8076 }
8077 data[i] = word;
8078 }
8079
8080 wm_put_swfwhw_semaphore(sc);
8081 return error;
8082 }
8083
8084 /* Lock, detecting NVM type, validate checksum and read */
8085
8086 /*
8087 * wm_nvm_acquire:
8088 *
8089 * Perform the EEPROM handshake required on some chips.
8090 */
8091 static int
8092 wm_nvm_acquire(struct wm_softc *sc)
8093 {
8094 uint32_t reg;
8095 int x;
8096 int ret = 0;
8097
8098 /* always success */
8099 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8100 return 0;
8101
8102 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8103 ret = wm_get_swfwhw_semaphore(sc);
8104 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8105 /* This will also do wm_get_swsm_semaphore() if needed */
8106 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8107 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8108 ret = wm_get_swsm_semaphore(sc);
8109 }
8110
8111 if (ret) {
8112 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8113 __func__);
8114 return 1;
8115 }
8116
8117 if (sc->sc_flags & WM_F_LOCK_EECD) {
8118 reg = CSR_READ(sc, WMREG_EECD);
8119
8120 /* Request EEPROM access. */
8121 reg |= EECD_EE_REQ;
8122 CSR_WRITE(sc, WMREG_EECD, reg);
8123
8124 /* ..and wait for it to be granted. */
8125 for (x = 0; x < 1000; x++) {
8126 reg = CSR_READ(sc, WMREG_EECD);
8127 if (reg & EECD_EE_GNT)
8128 break;
8129 delay(5);
8130 }
8131 if ((reg & EECD_EE_GNT) == 0) {
8132 aprint_error_dev(sc->sc_dev,
8133 "could not acquire EEPROM GNT\n");
8134 reg &= ~EECD_EE_REQ;
8135 CSR_WRITE(sc, WMREG_EECD, reg);
8136 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8137 wm_put_swfwhw_semaphore(sc);
8138 if (sc->sc_flags & WM_F_LOCK_SWFW)
8139 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8140 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8141 wm_put_swsm_semaphore(sc);
8142 return 1;
8143 }
8144 }
8145
8146 return 0;
8147 }
8148
8149 /*
8150 * wm_nvm_release:
8151 *
8152 * Release the EEPROM mutex.
8153 */
8154 static void
8155 wm_nvm_release(struct wm_softc *sc)
8156 {
8157 uint32_t reg;
8158
8159 /* always success */
8160 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8161 return;
8162
8163 if (sc->sc_flags & WM_F_LOCK_EECD) {
8164 reg = CSR_READ(sc, WMREG_EECD);
8165 reg &= ~EECD_EE_REQ;
8166 CSR_WRITE(sc, WMREG_EECD, reg);
8167 }
8168
8169 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8170 wm_put_swfwhw_semaphore(sc);
8171 if (sc->sc_flags & WM_F_LOCK_SWFW)
8172 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8173 else if (sc->sc_flags & WM_F_LOCK_SWSM)
8174 wm_put_swsm_semaphore(sc);
8175 }
8176
8177 static int
8178 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8179 {
8180 uint32_t eecd = 0;
8181
8182 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8183 || sc->sc_type == WM_T_82583) {
8184 eecd = CSR_READ(sc, WMREG_EECD);
8185
8186 /* Isolate bits 15 & 16 */
8187 eecd = ((eecd >> 15) & 0x03);
8188
8189 /* If both bits are set, device is Flash type */
8190 if (eecd == 0x03)
8191 return 0;
8192 }
8193 return 1;
8194 }
8195
8196 #define NVM_CHECKSUM 0xBABA
8197 #define EEPROM_SIZE 0x0040
8198 #define NVM_COMPAT 0x0003
8199 #define NVM_COMPAT_VALID_CHECKSUM 0x0001
8200 #define NVM_FUTURE_INIT_WORD1 0x0019
8201 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
8202
8203 /*
8204 * wm_nvm_validate_checksum
8205 *
8206 * The checksum is defined as the sum of the first 64 (16 bit) words.
8207 */
8208 static int
8209 wm_nvm_validate_checksum(struct wm_softc *sc)
8210 {
8211 uint16_t checksum;
8212 uint16_t eeprom_data;
8213 #ifdef WM_DEBUG
8214 uint16_t csum_wordaddr, valid_checksum;
8215 #endif
8216 int i;
8217
8218 checksum = 0;
8219
8220 /* Don't check for I211 */
8221 if (sc->sc_type == WM_T_I211)
8222 return 0;
8223
8224 #ifdef WM_DEBUG
8225 if (sc->sc_type == WM_T_PCH_LPT) {
8226 csum_wordaddr = NVM_COMPAT;
8227 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8228 } else {
8229 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
8230 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8231 }
8232
8233 /* Dump EEPROM image for debug */
8234 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8235 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8236 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8237 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8238 if ((eeprom_data & valid_checksum) == 0) {
8239 DPRINTF(WM_DEBUG_NVM,
8240 ("%s: NVM need to be updated (%04x != %04x)\n",
8241 device_xname(sc->sc_dev), eeprom_data,
8242 valid_checksum));
8243 }
8244 }
8245
8246 if ((wm_debug & WM_DEBUG_NVM) != 0) {
8247 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8248 for (i = 0; i < EEPROM_SIZE; i++) {
8249 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8250 printf("XX ");
8251 else
8252 printf("%04x ", eeprom_data);
8253 if (i % 8 == 7)
8254 printf("\n");
8255 }
8256 }
8257
8258 #endif /* WM_DEBUG */
8259
8260 for (i = 0; i < EEPROM_SIZE; i++) {
8261 if (wm_nvm_read(sc, i, 1, &eeprom_data))
8262 return 1;
8263 checksum += eeprom_data;
8264 }
8265
8266 if (checksum != (uint16_t) NVM_CHECKSUM) {
8267 #ifdef WM_DEBUG
8268 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8269 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8270 #endif
8271 }
8272
8273 return 0;
8274 }
8275
8276 /*
8277 * wm_nvm_read:
8278 *
8279 * Read data from the serial EEPROM.
8280 */
8281 static int
8282 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8283 {
8284 int rv;
8285
8286 if (sc->sc_flags & WM_F_EEPROM_INVALID)
8287 return 1;
8288
8289 if (wm_nvm_acquire(sc))
8290 return 1;
8291
8292 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8293 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8294 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8295 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8296 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8297 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8298 else if (sc->sc_flags & WM_F_EEPROM_SPI)
8299 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8300 else
8301 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8302
8303 wm_nvm_release(sc);
8304 return rv;
8305 }
8306
8307 /*
8308 * Hardware semaphores.
8309 * Very complexed...
8310 */
8311
8312 static int
8313 wm_get_swsm_semaphore(struct wm_softc *sc)
8314 {
8315 int32_t timeout;
8316 uint32_t swsm;
8317
8318 /* Get the SW semaphore. */
8319 timeout = 1000 + 1; /* XXX */
8320 while (timeout) {
8321 swsm = CSR_READ(sc, WMREG_SWSM);
8322
8323 if ((swsm & SWSM_SMBI) == 0)
8324 break;
8325
8326 delay(50);
8327 timeout--;
8328 }
8329
8330 if (timeout == 0) {
8331 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
8332 return 1;
8333 }
8334
8335 /* Get the FW semaphore. */
8336 timeout = 1000 + 1; /* XXX */
8337 while (timeout) {
8338 swsm = CSR_READ(sc, WMREG_SWSM);
8339 swsm |= SWSM_SWESMBI;
8340 CSR_WRITE(sc, WMREG_SWSM, swsm);
8341 /* If we managed to set the bit we got the semaphore. */
8342 swsm = CSR_READ(sc, WMREG_SWSM);
8343 if (swsm & SWSM_SWESMBI)
8344 break;
8345
8346 delay(50);
8347 timeout--;
8348 }
8349
8350 if (timeout == 0) {
8351 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8352 /* Release semaphores */
8353 wm_put_swsm_semaphore(sc);
8354 return 1;
8355 }
8356 return 0;
8357 }
8358
8359 static void
8360 wm_put_swsm_semaphore(struct wm_softc *sc)
8361 {
8362 uint32_t swsm;
8363
8364 swsm = CSR_READ(sc, WMREG_SWSM);
8365 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8366 CSR_WRITE(sc, WMREG_SWSM, swsm);
8367 }
8368
8369 static int
8370 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8371 {
8372 uint32_t swfw_sync;
8373 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8374 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8375 int timeout = 200;
8376
8377 for (timeout = 0; timeout < 200; timeout++) {
8378 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8379 if (wm_get_swsm_semaphore(sc)) {
8380 aprint_error_dev(sc->sc_dev,
8381 "%s: failed to get semaphore\n",
8382 __func__);
8383 return 1;
8384 }
8385 }
8386 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8387 if ((swfw_sync & (swmask | fwmask)) == 0) {
8388 swfw_sync |= swmask;
8389 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8390 if (sc->sc_flags & WM_F_LOCK_SWSM)
8391 wm_put_swsm_semaphore(sc);
8392 return 0;
8393 }
8394 if (sc->sc_flags & WM_F_LOCK_SWSM)
8395 wm_put_swsm_semaphore(sc);
8396 delay(5000);
8397 }
8398 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8399 device_xname(sc->sc_dev), mask, swfw_sync);
8400 return 1;
8401 }
8402
8403 static void
8404 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8405 {
8406 uint32_t swfw_sync;
8407
8408 if (sc->sc_flags & WM_F_LOCK_SWSM) {
8409 while (wm_get_swsm_semaphore(sc) != 0)
8410 continue;
8411 }
8412 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8413 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8414 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8415 if (sc->sc_flags & WM_F_LOCK_SWSM)
8416 wm_put_swsm_semaphore(sc);
8417 }
8418
8419 static int
8420 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8421 {
8422 uint32_t ext_ctrl;
8423 int timeout = 200;
8424
8425 for (timeout = 0; timeout < 200; timeout++) {
8426 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8427 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8428 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8429
8430 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8431 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8432 return 0;
8433 delay(5000);
8434 }
8435 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8436 device_xname(sc->sc_dev), ext_ctrl);
8437 return 1;
8438 }
8439
8440 static void
8441 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8442 {
8443 uint32_t ext_ctrl;
8444 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8445 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8446 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8447 }
8448
8449 static int
8450 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8451 {
8452 int i = 0;
8453 uint32_t reg;
8454
8455 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8456 do {
8457 CSR_WRITE(sc, WMREG_EXTCNFCTR,
8458 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8459 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8460 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8461 break;
8462 delay(2*1000);
8463 i++;
8464 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8465
8466 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8467 wm_put_hw_semaphore_82573(sc);
8468 log(LOG_ERR, "%s: Driver can't access the PHY\n",
8469 device_xname(sc->sc_dev));
8470 return -1;
8471 }
8472
8473 return 0;
8474 }
8475
8476 static void
8477 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8478 {
8479 uint32_t reg;
8480
8481 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8482 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8483 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8484 }
8485
8486 /*
8487 * Management mode and power management related subroutines.
8488 * BMC, AMT, suspend/resume and EEE.
8489 */
8490
8491 static int
8492 wm_check_mng_mode(struct wm_softc *sc)
8493 {
8494 int rv;
8495
8496 switch (sc->sc_type) {
8497 case WM_T_ICH8:
8498 case WM_T_ICH9:
8499 case WM_T_ICH10:
8500 case WM_T_PCH:
8501 case WM_T_PCH2:
8502 case WM_T_PCH_LPT:
8503 rv = wm_check_mng_mode_ich8lan(sc);
8504 break;
8505 case WM_T_82574:
8506 case WM_T_82583:
8507 rv = wm_check_mng_mode_82574(sc);
8508 break;
8509 case WM_T_82571:
8510 case WM_T_82572:
8511 case WM_T_82573:
8512 case WM_T_80003:
8513 rv = wm_check_mng_mode_generic(sc);
8514 break;
8515 default:
8516 /* noting to do */
8517 rv = 0;
8518 break;
8519 }
8520
8521 return rv;
8522 }
8523
8524 static int
8525 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8526 {
8527 uint32_t fwsm;
8528
8529 fwsm = CSR_READ(sc, WMREG_FWSM);
8530
8531 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8532 return 1;
8533
8534 return 0;
8535 }
8536
8537 static int
8538 wm_check_mng_mode_82574(struct wm_softc *sc)
8539 {
8540 uint16_t data;
8541
8542 wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8543
8544 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8545 return 1;
8546
8547 return 0;
8548 }
8549
8550 static int
8551 wm_check_mng_mode_generic(struct wm_softc *sc)
8552 {
8553 uint32_t fwsm;
8554
8555 fwsm = CSR_READ(sc, WMREG_FWSM);
8556
8557 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8558 return 1;
8559
8560 return 0;
8561 }
8562
8563 static int
8564 wm_enable_mng_pass_thru(struct wm_softc *sc)
8565 {
8566 uint32_t manc, fwsm, factps;
8567
8568 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8569 return 0;
8570
8571 manc = CSR_READ(sc, WMREG_MANC);
8572
8573 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8574 device_xname(sc->sc_dev), manc));
8575 if ((manc & MANC_RECV_TCO_EN) == 0)
8576 return 0;
8577
8578 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8579 fwsm = CSR_READ(sc, WMREG_FWSM);
8580 factps = CSR_READ(sc, WMREG_FACTPS);
8581 if (((factps & FACTPS_MNGCG) == 0)
8582 && ((fwsm & FWSM_MODE_MASK)
8583 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8584 return 1;
8585 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8586 uint16_t data;
8587
8588 factps = CSR_READ(sc, WMREG_FACTPS);
8589 wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8590 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8591 device_xname(sc->sc_dev), factps, data));
8592 if (((factps & FACTPS_MNGCG) == 0)
8593 && ((data & EEPROM_CFG2_MNGM_MASK)
8594 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8595 return 1;
8596 } else if (((manc & MANC_SMBUS_EN) != 0)
8597 && ((manc & MANC_ASF_EN) == 0))
8598 return 1;
8599
8600 return 0;
8601 }
8602
8603 static int
8604 wm_check_reset_block(struct wm_softc *sc)
8605 {
8606 uint32_t reg;
8607
8608 switch (sc->sc_type) {
8609 case WM_T_ICH8:
8610 case WM_T_ICH9:
8611 case WM_T_ICH10:
8612 case WM_T_PCH:
8613 case WM_T_PCH2:
8614 case WM_T_PCH_LPT:
8615 reg = CSR_READ(sc, WMREG_FWSM);
8616 if ((reg & FWSM_RSPCIPHY) != 0)
8617 return 0;
8618 else
8619 return -1;
8620 break;
8621 case WM_T_82571:
8622 case WM_T_82572:
8623 case WM_T_82573:
8624 case WM_T_82574:
8625 case WM_T_82583:
8626 case WM_T_80003:
8627 reg = CSR_READ(sc, WMREG_MANC);
8628 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8629 return -1;
8630 else
8631 return 0;
8632 break;
8633 default:
8634 /* no problem */
8635 break;
8636 }
8637
8638 return 0;
8639 }
8640
8641 static void
8642 wm_get_hw_control(struct wm_softc *sc)
8643 {
8644 uint32_t reg;
8645
8646 switch (sc->sc_type) {
8647 case WM_T_82573:
8648 reg = CSR_READ(sc, WMREG_SWSM);
8649 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8650 break;
8651 case WM_T_82571:
8652 case WM_T_82572:
8653 case WM_T_82574:
8654 case WM_T_82583:
8655 case WM_T_80003:
8656 case WM_T_ICH8:
8657 case WM_T_ICH9:
8658 case WM_T_ICH10:
8659 case WM_T_PCH:
8660 case WM_T_PCH2:
8661 case WM_T_PCH_LPT:
8662 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8663 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8664 break;
8665 default:
8666 break;
8667 }
8668 }
8669
8670 static void
8671 wm_release_hw_control(struct wm_softc *sc)
8672 {
8673 uint32_t reg;
8674
8675 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8676 return;
8677
8678 if (sc->sc_type == WM_T_82573) {
8679 reg = CSR_READ(sc, WMREG_SWSM);
8680 reg &= ~SWSM_DRV_LOAD;
8681 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8682 } else {
8683 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8684 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8685 }
8686 }
8687
8688 static void
8689 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8690 {
8691 uint32_t reg;
8692
8693 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8694
8695 if (on != 0)
8696 reg |= EXTCNFCTR_GATE_PHY_CFG;
8697 else
8698 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8699
8700 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8701 }
8702
8703 static void
8704 wm_smbustopci(struct wm_softc *sc)
8705 {
8706 uint32_t fwsm;
8707
8708 fwsm = CSR_READ(sc, WMREG_FWSM);
8709 if (((fwsm & FWSM_FW_VALID) == 0)
8710 && ((wm_check_reset_block(sc) == 0))) {
8711 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8712 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8713 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8714 CSR_WRITE_FLUSH(sc);
8715 delay(10);
8716 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8717 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8718 CSR_WRITE_FLUSH(sc);
8719 delay(50*1000);
8720
8721 /*
8722 * Gate automatic PHY configuration by hardware on non-managed
8723 * 82579
8724 */
8725 if (sc->sc_type == WM_T_PCH2)
8726 wm_gate_hw_phy_config_ich8lan(sc, 1);
8727 }
8728 }
8729
8730 static void
8731 wm_init_manageability(struct wm_softc *sc)
8732 {
8733
8734 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8735 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8736 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8737
8738 /* Disable hardware interception of ARP */
8739 manc &= ~MANC_ARP_EN;
8740
8741 /* Enable receiving management packets to the host */
8742 if (sc->sc_type >= WM_T_82571) {
8743 manc |= MANC_EN_MNG2HOST;
8744 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8745 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8746
8747 }
8748
8749 CSR_WRITE(sc, WMREG_MANC, manc);
8750 }
8751 }
8752
8753 static void
8754 wm_release_manageability(struct wm_softc *sc)
8755 {
8756
8757 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8758 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8759
8760 manc |= MANC_ARP_EN;
8761 if (sc->sc_type >= WM_T_82571)
8762 manc &= ~MANC_EN_MNG2HOST;
8763
8764 CSR_WRITE(sc, WMREG_MANC, manc);
8765 }
8766 }
8767
8768 static void
8769 wm_get_wakeup(struct wm_softc *sc)
8770 {
8771
8772 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8773 switch (sc->sc_type) {
8774 case WM_T_82573:
8775 case WM_T_82583:
8776 sc->sc_flags |= WM_F_HAS_AMT;
8777 /* FALLTHROUGH */
8778 case WM_T_80003:
8779 case WM_T_82541:
8780 case WM_T_82547:
8781 case WM_T_82571:
8782 case WM_T_82572:
8783 case WM_T_82574:
8784 case WM_T_82575:
8785 case WM_T_82576:
8786 case WM_T_82580:
8787 case WM_T_82580ER:
8788 case WM_T_I350:
8789 case WM_T_I354:
8790 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8791 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8792 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8793 break;
8794 case WM_T_ICH8:
8795 case WM_T_ICH9:
8796 case WM_T_ICH10:
8797 case WM_T_PCH:
8798 case WM_T_PCH2:
8799 case WM_T_PCH_LPT:
8800 sc->sc_flags |= WM_F_HAS_AMT;
8801 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8802 break;
8803 default:
8804 break;
8805 }
8806
8807 /* 1: HAS_MANAGE */
8808 if (wm_enable_mng_pass_thru(sc) != 0)
8809 sc->sc_flags |= WM_F_HAS_MANAGE;
8810
8811 #ifdef WM_DEBUG
8812 printf("\n");
8813 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8814 printf("HAS_AMT,");
8815 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8816 printf("ARC_SUBSYS_VALID,");
8817 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8818 printf("ASF_FIRMWARE_PRES,");
8819 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8820 printf("HAS_MANAGE,");
8821 printf("\n");
8822 #endif
8823 /*
8824 * Note that the WOL flags is set after the resetting of the eeprom
8825 * stuff
8826 */
8827 }
8828
8829 #ifdef WM_WOL
8830 /* WOL in the newer chipset interfaces (pchlan) */
8831 static void
8832 wm_enable_phy_wakeup(struct wm_softc *sc)
8833 {
8834 #if 0
8835 uint16_t preg;
8836
8837 /* Copy MAC RARs to PHY RARs */
8838
8839 /* Copy MAC MTA to PHY MTA */
8840
8841 /* Configure PHY Rx Control register */
8842
8843 /* Enable PHY wakeup in MAC register */
8844
8845 /* Configure and enable PHY wakeup in PHY registers */
8846
8847 /* Activate PHY wakeup */
8848
8849 /* XXX */
8850 #endif
8851 }
8852
8853 /* Power down workaround on D3 */
8854 static void
8855 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8856 {
8857 uint32_t reg;
8858 int i;
8859
8860 for (i = 0; i < 2; i++) {
8861 /* Disable link */
8862 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8863 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8864 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8865
8866 /*
8867 * Call gig speed drop workaround on Gig disable before
8868 * accessing any PHY registers
8869 */
8870 if (sc->sc_type == WM_T_ICH8)
8871 wm_gig_downshift_workaround_ich8lan(sc);
8872
8873 /* Write VR power-down enable */
8874 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8875 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8876 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8877 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8878
8879 /* Read it back and test */
8880 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8881 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8882 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8883 break;
8884
8885 /* Issue PHY reset and repeat at most one more time */
8886 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8887 }
8888 }
8889
8890 static void
8891 wm_enable_wakeup(struct wm_softc *sc)
8892 {
8893 uint32_t reg, pmreg;
8894 pcireg_t pmode;
8895
8896 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8897 &pmreg, NULL) == 0)
8898 return;
8899
8900 /* Advertise the wakeup capability */
8901 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8902 | CTRL_SWDPIN(3));
8903 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8904
8905 /* ICH workaround */
8906 switch (sc->sc_type) {
8907 case WM_T_ICH8:
8908 case WM_T_ICH9:
8909 case WM_T_ICH10:
8910 case WM_T_PCH:
8911 case WM_T_PCH2:
8912 case WM_T_PCH_LPT:
8913 /* Disable gig during WOL */
8914 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8915 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8916 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8917 if (sc->sc_type == WM_T_PCH)
8918 wm_gmii_reset(sc);
8919
8920 /* Power down workaround */
8921 if (sc->sc_phytype == WMPHY_82577) {
8922 struct mii_softc *child;
8923
8924 /* Assume that the PHY is copper */
8925 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8926 if (child->mii_mpd_rev <= 2)
8927 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8928 (768 << 5) | 25, 0x0444); /* magic num */
8929 }
8930 break;
8931 default:
8932 break;
8933 }
8934
8935 /* Keep the laser running on fiber adapters */
8936 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8937 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8938 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8939 reg |= CTRL_EXT_SWDPIN(3);
8940 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8941 }
8942
8943 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8944 #if 0 /* for the multicast packet */
8945 reg |= WUFC_MC;
8946 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8947 #endif
8948
8949 if (sc->sc_type == WM_T_PCH) {
8950 wm_enable_phy_wakeup(sc);
8951 } else {
8952 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8953 CSR_WRITE(sc, WMREG_WUFC, reg);
8954 }
8955
8956 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8957 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8958 || (sc->sc_type == WM_T_PCH2))
8959 && (sc->sc_phytype == WMPHY_IGP_3))
8960 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8961
8962 /* Request PME */
8963 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8964 #if 0
8965 /* Disable WOL */
8966 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8967 #else
8968 /* For WOL */
8969 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8970 #endif
8971 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8972 }
8973 #endif /* WM_WOL */
8974
8975 /* EEE */
8976
8977 static void
8978 wm_set_eee_i350(struct wm_softc *sc)
8979 {
8980 uint32_t ipcnfg, eeer;
8981
8982 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8983 eeer = CSR_READ(sc, WMREG_EEER);
8984
8985 if ((sc->sc_flags & WM_F_EEE) != 0) {
8986 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8987 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8988 | EEER_LPI_FC);
8989 } else {
8990 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8991 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8992 | EEER_LPI_FC);
8993 }
8994
8995 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8996 CSR_WRITE(sc, WMREG_EEER, eeer);
8997 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8998 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8999 }
9000
9001 /*
9002 * Workarounds (mainly PHY related).
9003 * Basically, PHY's workarounds are in the PHY drivers.
9004 */
9005
9006 /* Work-around for 82566 Kumeran PCS lock loss */
9007 static void
9008 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9009 {
9010 int miistatus, active, i;
9011 int reg;
9012
9013 miistatus = sc->sc_mii.mii_media_status;
9014
9015 /* If the link is not up, do nothing */
9016 if ((miistatus & IFM_ACTIVE) != 0)
9017 return;
9018
9019 active = sc->sc_mii.mii_media_active;
9020
9021 /* Nothing to do if the link is other than 1Gbps */
9022 if (IFM_SUBTYPE(active) != IFM_1000_T)
9023 return;
9024
9025 for (i = 0; i < 10; i++) {
9026 /* read twice */
9027 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9028 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9029 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9030 goto out; /* GOOD! */
9031
9032 /* Reset the PHY */
9033 wm_gmii_reset(sc);
9034 delay(5*1000);
9035 }
9036
9037 /* Disable GigE link negotiation */
9038 reg = CSR_READ(sc, WMREG_PHY_CTRL);
9039 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9040 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9041
9042 /*
9043 * Call gig speed drop workaround on Gig disable before accessing
9044 * any PHY registers.
9045 */
9046 wm_gig_downshift_workaround_ich8lan(sc);
9047
9048 out:
9049 return;
9050 }
9051
9052 /* WOL from S5 stops working */
9053 static void
9054 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9055 {
9056 uint16_t kmrn_reg;
9057
9058 /* Only for igp3 */
9059 if (sc->sc_phytype == WMPHY_IGP_3) {
9060 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9061 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9062 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9063 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9064 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9065 }
9066 }
9067
9068 /*
9069 * Workaround for pch's PHYs
9070 * XXX should be moved to new PHY driver?
9071 */
9072 static void
9073 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9074 {
9075 if (sc->sc_phytype == WMPHY_82577)
9076 wm_set_mdio_slow_mode_hv(sc);
9077
9078 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9079
9080 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9081
9082 /* 82578 */
9083 if (sc->sc_phytype == WMPHY_82578) {
9084 /* PCH rev. < 3 */
9085 if (sc->sc_rev < 3) {
9086 /* XXX 6 bit shift? Why? Is it page2? */
9087 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9088 0x66c0);
9089 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9090 0xffff);
9091 }
9092
9093 /* XXX phy rev. < 2 */
9094 }
9095
9096 /* Select page 0 */
9097
9098 /* XXX acquire semaphore */
9099 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9100 /* XXX release semaphore */
9101
9102 /*
9103 * Configure the K1 Si workaround during phy reset assuming there is
9104 * link so that it disables K1 if link is in 1Gbps.
9105 */
9106 wm_k1_gig_workaround_hv(sc, 1);
9107 }
9108
9109 static void
9110 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9111 {
9112
9113 wm_set_mdio_slow_mode_hv(sc);
9114 }
9115
9116 static void
9117 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9118 {
9119 int k1_enable = sc->sc_nvm_k1_enabled;
9120
9121 /* XXX acquire semaphore */
9122
9123 if (link) {
9124 k1_enable = 0;
9125
9126 /* Link stall fix for link up */
9127 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9128 } else {
9129 /* Link stall fix for link down */
9130 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9131 }
9132
9133 wm_configure_k1_ich8lan(sc, k1_enable);
9134
9135 /* XXX release semaphore */
9136 }
9137
9138 static void
9139 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9140 {
9141 uint32_t reg;
9142
9143 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9144 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9145 reg | HV_KMRN_MDIO_SLOW);
9146 }
9147
9148 static void
9149 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9150 {
9151 uint32_t ctrl, ctrl_ext, tmp;
9152 uint16_t kmrn_reg;
9153
9154 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9155
9156 if (k1_enable)
9157 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9158 else
9159 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9160
9161 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9162
9163 delay(20);
9164
9165 ctrl = CSR_READ(sc, WMREG_CTRL);
9166 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9167
9168 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9169 tmp |= CTRL_FRCSPD;
9170
9171 CSR_WRITE(sc, WMREG_CTRL, tmp);
9172 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9173 CSR_WRITE_FLUSH(sc);
9174 delay(20);
9175
9176 CSR_WRITE(sc, WMREG_CTRL, ctrl);
9177 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9178 CSR_WRITE_FLUSH(sc);
9179 delay(20);
9180 }
9181
9182 /* special case - for 82575 - need to do manual init ... */
9183 static void
9184 wm_reset_init_script_82575(struct wm_softc *sc)
9185 {
9186 /*
9187 * remark: this is untested code - we have no board without EEPROM
9188 * same setup as mentioned int the freeBSD driver for the i82575
9189 */
9190
9191 /* SerDes configuration via SERDESCTRL */
9192 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9193 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9194 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9195 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9196
9197 /* CCM configuration via CCMCTL register */
9198 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9199 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9200
9201 /* PCIe lanes configuration */
9202 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9203 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9204 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9205 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9206
9207 /* PCIe PLL Configuration */
9208 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9209 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9210 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9211 }
9212