if_wm.c revision 1.506 1 /* $NetBSD: if_wm.c,v 1.506 2017/04/05 10:44:35 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Split header buffer for newer descriptors
79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID
84 */
85
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.506 2017/04/05 10:44:35 msaitoh Exp $");
88
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #include "opt_if_wm.h"
92 #endif
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kmem.h>
100 #include <sys/kernel.h>
101 #include <sys/socket.h>
102 #include <sys/ioctl.h>
103 #include <sys/errno.h>
104 #include <sys/device.h>
105 #include <sys/queue.h>
106 #include <sys/syslog.h>
107 #include <sys/interrupt.h>
108 #include <sys/cpu.h>
109 #include <sys/pcq.h>
110
111 #include <sys/rndsource.h>
112
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117
118 #include <net/bpf.h>
119
120 #include <netinet/in.h> /* XXX for struct ip */
121 #include <netinet/in_systm.h> /* XXX for struct ip */
122 #include <netinet/ip.h> /* XXX for struct ip */
123 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
124 #include <netinet/tcp.h> /* XXX for struct tcphdr */
125
126 #include <sys/bus.h>
127 #include <sys/intr.h>
128 #include <machine/endian.h>
129
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 #include <dev/mii/miidevs.h>
133 #include <dev/mii/mii_bitbang.h>
134 #include <dev/mii/ikphyreg.h>
135 #include <dev/mii/igphyreg.h>
136 #include <dev/mii/igphyvar.h>
137 #include <dev/mii/inbmphyreg.h>
138
139 #include <dev/pci/pcireg.h>
140 #include <dev/pci/pcivar.h>
141 #include <dev/pci/pcidevs.h>
142
143 #include <dev/pci/if_wmreg.h>
144 #include <dev/pci/if_wmvar.h>
145
146 #ifdef WM_DEBUG
147 #define WM_DEBUG_LINK __BIT(0)
148 #define WM_DEBUG_TX __BIT(1)
149 #define WM_DEBUG_RX __BIT(2)
150 #define WM_DEBUG_GMII __BIT(3)
151 #define WM_DEBUG_MANAGE __BIT(4)
152 #define WM_DEBUG_NVM __BIT(5)
153 #define WM_DEBUG_INIT __BIT(6)
154 #define WM_DEBUG_LOCK __BIT(7)
155 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
156 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
157
158 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
159 #else
160 #define DPRINTF(x, y) /* nothing */
161 #endif /* WM_DEBUG */
162
163 #ifdef NET_MPSAFE
164 #define WM_MPSAFE 1
165 #define CALLOUT_FLAGS CALLOUT_MPSAFE
166 #else
167 #define CALLOUT_FLAGS 0
168 #endif
169
170 /*
171 * This device driver's max interrupt numbers.
172 */
173 #define WM_MAX_NQUEUEINTR 16
174 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
175
176 /*
177 * Transmit descriptor list size. Due to errata, we can only have
178 * 256 hardware descriptors in the ring on < 82544, but we use 4096
179 * on >= 82544. We tell the upper layers that they can queue a lot
180 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
181 * of them at a time.
182 *
183 * We allow up to 256 (!) DMA segments per packet. Pathological packet
184 * chains containing many small mbufs have been observed in zero-copy
185 * situations with jumbo frames.
186 */
187 #define WM_NTXSEGS 256
188 #define WM_IFQUEUELEN 256
189 #define WM_TXQUEUELEN_MAX 64
190 #define WM_TXQUEUELEN_MAX_82547 16
191 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
192 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
193 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
194 #define WM_NTXDESC_82542 256
195 #define WM_NTXDESC_82544 4096
196 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
197 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
198 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
199 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
200 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
201
202 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
203
204 #define WM_TXINTERQSIZE 256
205
206 /*
207 * Receive descriptor list size. We have one Rx buffer for normal
208 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
209 * packet. We allocate 256 receive descriptors, each with a 2k
210 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
211 */
212 #define WM_NRXDESC 256
213 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
214 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
215 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
216
217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
218 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
219 #endif
220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
221 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
222 #endif
223
224 typedef union txdescs {
225 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
226 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
227 } txdescs_t;
228
229 typedef union rxdescs {
230 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
231 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
232 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
233 } rxdescs_t;
234
235 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
236 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
237
238 /*
239 * Software state for transmit jobs.
240 */
241 struct wm_txsoft {
242 struct mbuf *txs_mbuf; /* head of our mbuf chain */
243 bus_dmamap_t txs_dmamap; /* our DMA map */
244 int txs_firstdesc; /* first descriptor in packet */
245 int txs_lastdesc; /* last descriptor in packet */
246 int txs_ndesc; /* # of descriptors used */
247 };
248
249 /*
250 * Software state for receive buffers. Each descriptor gets a
251 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
252 * more than one buffer, we chain them together.
253 */
254 struct wm_rxsoft {
255 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
256 bus_dmamap_t rxs_dmamap; /* our DMA map */
257 };
258
259 #define WM_LINKUP_TIMEOUT 50
260
261 static uint16_t swfwphysem[] = {
262 SWFW_PHY0_SM,
263 SWFW_PHY1_SM,
264 SWFW_PHY2_SM,
265 SWFW_PHY3_SM
266 };
267
268 static const uint32_t wm_82580_rxpbs_table[] = {
269 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
270 };
271
272 struct wm_softc;
273
274 #ifdef WM_EVENT_COUNTERS
275 #define WM_Q_EVCNT_DEFINE(qname, evname) \
276 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
277 struct evcnt qname##_ev_##evname;
278
279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
280 do{ \
281 snprintf((q)->qname##_##evname##_evcnt_name, \
282 sizeof((q)->qname##_##evname##_evcnt_name), \
283 "%s%02d%s", #qname, (qnum), #evname); \
284 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
285 (evtype), NULL, (xname), \
286 (q)->qname##_##evname##_evcnt_name); \
287 }while(0)
288
289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
290 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
291
292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
293 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
294
295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
296 evcnt_detach(&(q)->qname##_ev_##evname);
297 #endif /* WM_EVENT_COUNTERS */
298
299 struct wm_txqueue {
300 kmutex_t *txq_lock; /* lock for tx operations */
301
302 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
303
304 /* Software state for the transmit descriptors. */
305 int txq_num; /* must be a power of two */
306 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
307
308 /* TX control data structures. */
309 int txq_ndesc; /* must be a power of two */
310 size_t txq_descsize; /* a tx descriptor size */
311 txdescs_t *txq_descs_u;
312 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
313 bus_dma_segment_t txq_desc_seg; /* control data segment */
314 int txq_desc_rseg; /* real number of control segment */
315 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
316 #define txq_descs txq_descs_u->sctxu_txdescs
317 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
318
319 bus_addr_t txq_tdt_reg; /* offset of TDT register */
320
321 int txq_free; /* number of free Tx descriptors */
322 int txq_next; /* next ready Tx descriptor */
323
324 int txq_sfree; /* number of free Tx jobs */
325 int txq_snext; /* next free Tx job */
326 int txq_sdirty; /* dirty Tx jobs */
327
328 /* These 4 variables are used only on the 82547. */
329 int txq_fifo_size; /* Tx FIFO size */
330 int txq_fifo_head; /* current head of FIFO */
331 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
332 int txq_fifo_stall; /* Tx FIFO is stalled */
333
334 /*
335 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
336 * CPUs. This queue intermediate them without block.
337 */
338 pcq_t *txq_interq;
339
340 /*
341 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
342 * to manage Tx H/W queue's busy flag.
343 */
344 int txq_flags; /* flags for H/W queue, see below */
345 #define WM_TXQ_NO_SPACE 0x1
346
347 bool txq_stopping;
348
349 uint32_t txq_packets; /* for AIM */
350 uint32_t txq_bytes; /* for AIM */
351 #ifdef WM_EVENT_COUNTERS
352 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */
353 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */
354 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */
355 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
356 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
357 /* XXX not used? */
358
359 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */
360 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */
361 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */
362 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */
363 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */
364 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */
365
366 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */
367
368 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */
369
370 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
371 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
372 #endif /* WM_EVENT_COUNTERS */
373 };
374
375 struct wm_rxqueue {
376 kmutex_t *rxq_lock; /* lock for rx operations */
377
378 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
379
380 /* Software state for the receive descriptors. */
381 struct wm_rxsoft rxq_soft[WM_NRXDESC];
382
383 /* RX control data structures. */
384 int rxq_ndesc; /* must be a power of two */
385 size_t rxq_descsize; /* a rx descriptor size */
386 rxdescs_t *rxq_descs_u;
387 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
388 bus_dma_segment_t rxq_desc_seg; /* control data segment */
389 int rxq_desc_rseg; /* real number of control segment */
390 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
391 #define rxq_descs rxq_descs_u->sctxu_rxdescs
392 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
393 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
394
395 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
396
397 int rxq_ptr; /* next ready Rx desc/queue ent */
398 int rxq_discard;
399 int rxq_len;
400 struct mbuf *rxq_head;
401 struct mbuf *rxq_tail;
402 struct mbuf **rxq_tailp;
403
404 bool rxq_stopping;
405
406 uint32_t rxq_packets; /* for AIM */
407 uint32_t rxq_bytes; /* for AIM */
408 #ifdef WM_EVENT_COUNTERS
409 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */
410
411 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */
412 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */
413 #endif
414 };
415
416 struct wm_queue {
417 int wmq_id; /* index of transmit and receive queues */
418 int wmq_intr_idx; /* index of MSI-X tables */
419
420 uint32_t wmq_itr; /* interrupt interval per queue. */
421 bool wmq_set_itr;
422
423 struct wm_txqueue wmq_txq;
424 struct wm_rxqueue wmq_rxq;
425
426 void *wmq_si;
427 };
428
429 struct wm_phyop {
430 int (*acquire)(struct wm_softc *);
431 void (*release)(struct wm_softc *);
432 int reset_delay_us;
433 };
434
435 /*
436 * Software state per device.
437 */
438 struct wm_softc {
439 device_t sc_dev; /* generic device information */
440 bus_space_tag_t sc_st; /* bus space tag */
441 bus_space_handle_t sc_sh; /* bus space handle */
442 bus_size_t sc_ss; /* bus space size */
443 bus_space_tag_t sc_iot; /* I/O space tag */
444 bus_space_handle_t sc_ioh; /* I/O space handle */
445 bus_size_t sc_ios; /* I/O space size */
446 bus_space_tag_t sc_flasht; /* flash registers space tag */
447 bus_space_handle_t sc_flashh; /* flash registers space handle */
448 bus_size_t sc_flashs; /* flash registers space size */
449 off_t sc_flashreg_offset; /*
450 * offset to flash registers from
451 * start of BAR
452 */
453 bus_dma_tag_t sc_dmat; /* bus DMA tag */
454
455 struct ethercom sc_ethercom; /* ethernet common data */
456 struct mii_data sc_mii; /* MII/media information */
457
458 pci_chipset_tag_t sc_pc;
459 pcitag_t sc_pcitag;
460 int sc_bus_speed; /* PCI/PCIX bus speed */
461 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
462
463 uint16_t sc_pcidevid; /* PCI device ID */
464 wm_chip_type sc_type; /* MAC type */
465 int sc_rev; /* MAC revision */
466 wm_phy_type sc_phytype; /* PHY type */
467 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
468 #define WM_MEDIATYPE_UNKNOWN 0x00
469 #define WM_MEDIATYPE_FIBER 0x01
470 #define WM_MEDIATYPE_COPPER 0x02
471 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
472 int sc_funcid; /* unit number of the chip (0 to 3) */
473 int sc_flags; /* flags; see below */
474 int sc_if_flags; /* last if_flags */
475 int sc_flowflags; /* 802.3x flow control flags */
476 int sc_align_tweak;
477
478 void *sc_ihs[WM_MAX_NINTR]; /*
479 * interrupt cookie.
480 * legacy and msi use sc_ihs[0].
481 */
482 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
483 int sc_nintrs; /* number of interrupts */
484
485 int sc_link_intr_idx; /* index of MSI-X tables */
486
487 callout_t sc_tick_ch; /* tick callout */
488 bool sc_core_stopping;
489
490 int sc_nvm_ver_major;
491 int sc_nvm_ver_minor;
492 int sc_nvm_ver_build;
493 int sc_nvm_addrbits; /* NVM address bits */
494 unsigned int sc_nvm_wordsize; /* NVM word size */
495 int sc_ich8_flash_base;
496 int sc_ich8_flash_bank_size;
497 int sc_nvm_k1_enabled;
498
499 int sc_nqueues;
500 struct wm_queue *sc_queue;
501 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */
502 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */
503
504 int sc_affinity_offset;
505
506 #ifdef WM_EVENT_COUNTERS
507 /* Event counters. */
508 struct evcnt sc_ev_linkintr; /* Link interrupts */
509
510 /* WM_T_82542_2_1 only */
511 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
512 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
513 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
514 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
515 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
516 #endif /* WM_EVENT_COUNTERS */
517
518 /* This variable are used only on the 82547. */
519 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
520
521 uint32_t sc_ctrl; /* prototype CTRL register */
522 #if 0
523 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
524 #endif
525 uint32_t sc_icr; /* prototype interrupt bits */
526 uint32_t sc_itr_init; /* prototype intr throttling reg */
527 uint32_t sc_tctl; /* prototype TCTL register */
528 uint32_t sc_rctl; /* prototype RCTL register */
529 uint32_t sc_txcw; /* prototype TXCW register */
530 uint32_t sc_tipg; /* prototype TIPG register */
531 uint32_t sc_fcrtl; /* prototype FCRTL register */
532 uint32_t sc_pba; /* prototype PBA register */
533
534 int sc_tbi_linkup; /* TBI link status */
535 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
536 int sc_tbi_serdes_ticks; /* tbi ticks */
537
538 int sc_mchash_type; /* multicast filter offset */
539
540 krndsource_t rnd_source; /* random source */
541
542 struct if_percpuq *sc_ipq; /* softint-based input queues */
543
544 kmutex_t *sc_core_lock; /* lock for softc operations */
545 kmutex_t *sc_ich_phymtx; /*
546 * 82574/82583/ICH/PCH specific PHY
547 * mutex. For 82574/82583, the mutex
548 * is used for both PHY and NVM.
549 */
550 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
551
552 struct wm_phyop phy;
553 };
554
555 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
556 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
557 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
558
559 #define WM_RXCHAIN_RESET(rxq) \
560 do { \
561 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
562 *(rxq)->rxq_tailp = NULL; \
563 (rxq)->rxq_len = 0; \
564 } while (/*CONSTCOND*/0)
565
566 #define WM_RXCHAIN_LINK(rxq, m) \
567 do { \
568 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
569 (rxq)->rxq_tailp = &(m)->m_next; \
570 } while (/*CONSTCOND*/0)
571
572 #ifdef WM_EVENT_COUNTERS
573 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
574 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
575
576 #define WM_Q_EVCNT_INCR(qname, evname) \
577 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
578 #define WM_Q_EVCNT_ADD(qname, evname, val) \
579 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
580 #else /* !WM_EVENT_COUNTERS */
581 #define WM_EVCNT_INCR(ev) /* nothing */
582 #define WM_EVCNT_ADD(ev, val) /* nothing */
583
584 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
585 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
586 #endif /* !WM_EVENT_COUNTERS */
587
588 #define CSR_READ(sc, reg) \
589 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
590 #define CSR_WRITE(sc, reg, val) \
591 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
592 #define CSR_WRITE_FLUSH(sc) \
593 (void) CSR_READ((sc), WMREG_STATUS)
594
595 #define ICH8_FLASH_READ32(sc, reg) \
596 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
597 (reg) + sc->sc_flashreg_offset)
598 #define ICH8_FLASH_WRITE32(sc, reg, data) \
599 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
600 (reg) + sc->sc_flashreg_offset, (data))
601
602 #define ICH8_FLASH_READ16(sc, reg) \
603 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
604 (reg) + sc->sc_flashreg_offset)
605 #define ICH8_FLASH_WRITE16(sc, reg, data) \
606 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
607 (reg) + sc->sc_flashreg_offset, (data))
608
609 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
610 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
611
612 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
613 #define WM_CDTXADDR_HI(txq, x) \
614 (sizeof(bus_addr_t) == 8 ? \
615 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
616
617 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
618 #define WM_CDRXADDR_HI(rxq, x) \
619 (sizeof(bus_addr_t) == 8 ? \
620 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
621
622 /*
623 * Register read/write functions.
624 * Other than CSR_{READ|WRITE}().
625 */
626 #if 0
627 static inline uint32_t wm_io_read(struct wm_softc *, int);
628 #endif
629 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
630 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
631 uint32_t, uint32_t);
632 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
633
634 /*
635 * Descriptor sync/init functions.
636 */
637 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
638 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
639 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
640
641 /*
642 * Device driver interface functions and commonly used functions.
643 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
644 */
645 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
646 static int wm_match(device_t, cfdata_t, void *);
647 static void wm_attach(device_t, device_t, void *);
648 static int wm_detach(device_t, int);
649 static bool wm_suspend(device_t, const pmf_qual_t *);
650 static bool wm_resume(device_t, const pmf_qual_t *);
651 static void wm_watchdog(struct ifnet *);
652 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
653 static void wm_tick(void *);
654 static int wm_ifflags_cb(struct ethercom *);
655 static int wm_ioctl(struct ifnet *, u_long, void *);
656 /* MAC address related */
657 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
658 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
659 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
660 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
661 static void wm_set_filter(struct wm_softc *);
662 /* Reset and init related */
663 static void wm_set_vlan(struct wm_softc *);
664 static void wm_set_pcie_completion_timeout(struct wm_softc *);
665 static void wm_get_auto_rd_done(struct wm_softc *);
666 static void wm_lan_init_done(struct wm_softc *);
667 static void wm_get_cfg_done(struct wm_softc *);
668 static void wm_initialize_hardware_bits(struct wm_softc *);
669 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
670 static void wm_reset_phy(struct wm_softc *);
671 static void wm_flush_desc_rings(struct wm_softc *);
672 static void wm_reset(struct wm_softc *);
673 static int wm_add_rxbuf(struct wm_rxqueue *, int);
674 static void wm_rxdrain(struct wm_rxqueue *);
675 static void wm_rss_getkey(uint8_t *);
676 static void wm_init_rss(struct wm_softc *);
677 static void wm_adjust_qnum(struct wm_softc *, int);
678 static inline bool wm_is_using_msix(struct wm_softc *);
679 static inline bool wm_is_using_multiqueue(struct wm_softc *);
680 static int wm_softint_establish(struct wm_softc *, int, int);
681 static int wm_setup_legacy(struct wm_softc *);
682 static int wm_setup_msix(struct wm_softc *);
683 static int wm_init(struct ifnet *);
684 static int wm_init_locked(struct ifnet *);
685 static void wm_turnon(struct wm_softc *);
686 static void wm_turnoff(struct wm_softc *);
687 static void wm_stop(struct ifnet *, int);
688 static void wm_stop_locked(struct ifnet *, int);
689 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
690 static void wm_82547_txfifo_stall(void *);
691 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
692 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
693 /* DMA related */
694 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
695 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
696 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
697 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
698 struct wm_txqueue *);
699 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
700 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
701 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
702 struct wm_rxqueue *);
703 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
704 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
705 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
706 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
707 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
708 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
709 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
710 struct wm_txqueue *);
711 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
712 struct wm_rxqueue *);
713 static int wm_alloc_txrx_queues(struct wm_softc *);
714 static void wm_free_txrx_queues(struct wm_softc *);
715 static int wm_init_txrx_queues(struct wm_softc *);
716 /* Start */
717 static int wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
718 struct wm_txsoft *, uint32_t *, uint8_t *);
719 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
720 static void wm_start(struct ifnet *);
721 static void wm_start_locked(struct ifnet *);
722 static int wm_transmit(struct ifnet *, struct mbuf *);
723 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
724 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
725 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
726 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
727 static void wm_nq_start(struct ifnet *);
728 static void wm_nq_start_locked(struct ifnet *);
729 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
730 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
731 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
732 static void wm_deferred_start_locked(struct wm_txqueue *);
733 static void wm_handle_queue(void *);
734 /* Interrupt */
735 static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
736 static void wm_rxeof(struct wm_rxqueue *, u_int);
737 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
738 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
739 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
740 static void wm_linkintr(struct wm_softc *, uint32_t);
741 static int wm_intr_legacy(void *);
742 static inline void wm_txrxintr_disable(struct wm_queue *);
743 static inline void wm_txrxintr_enable(struct wm_queue *);
744 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
745 static int wm_txrxintr_msix(void *);
746 static int wm_linkintr_msix(void *);
747
748 /*
749 * Media related.
750 * GMII, SGMII, TBI, SERDES and SFP.
751 */
752 /* Common */
753 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
754 /* GMII related */
755 static void wm_gmii_reset(struct wm_softc *);
756 static void wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
757 static int wm_get_phy_id_82575(struct wm_softc *);
758 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
759 static int wm_gmii_mediachange(struct ifnet *);
760 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
761 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
762 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
763 static int wm_gmii_i82543_readreg(device_t, int, int);
764 static void wm_gmii_i82543_writereg(device_t, int, int, int);
765 static int wm_gmii_mdic_readreg(device_t, int, int);
766 static void wm_gmii_mdic_writereg(device_t, int, int, int);
767 static int wm_gmii_i82544_readreg(device_t, int, int);
768 static void wm_gmii_i82544_writereg(device_t, int, int, int);
769 static int wm_gmii_i80003_readreg(device_t, int, int);
770 static void wm_gmii_i80003_writereg(device_t, int, int, int);
771 static int wm_gmii_bm_readreg(device_t, int, int);
772 static void wm_gmii_bm_writereg(device_t, int, int, int);
773 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
774 static int wm_gmii_hv_readreg(device_t, int, int);
775 static int wm_gmii_hv_readreg_locked(device_t, int, int);
776 static void wm_gmii_hv_writereg(device_t, int, int, int);
777 static void wm_gmii_hv_writereg_locked(device_t, int, int, int);
778 static int wm_gmii_82580_readreg(device_t, int, int);
779 static void wm_gmii_82580_writereg(device_t, int, int, int);
780 static int wm_gmii_gs40g_readreg(device_t, int, int);
781 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
782 static void wm_gmii_statchg(struct ifnet *);
783 /*
784 * kumeran related (80003, ICH* and PCH*).
785 * These functions are not for accessing MII registers but for accessing
786 * kumeran specific registers.
787 */
788 static int wm_kmrn_readreg(struct wm_softc *, int);
789 static int wm_kmrn_readreg_locked(struct wm_softc *, int);
790 static void wm_kmrn_writereg(struct wm_softc *, int, int);
791 static void wm_kmrn_writereg_locked(struct wm_softc *, int, int);
792 /* SGMII */
793 static bool wm_sgmii_uses_mdio(struct wm_softc *);
794 static int wm_sgmii_readreg(device_t, int, int);
795 static void wm_sgmii_writereg(device_t, int, int, int);
796 /* TBI related */
797 static void wm_tbi_mediainit(struct wm_softc *);
798 static int wm_tbi_mediachange(struct ifnet *);
799 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
800 static int wm_check_for_link(struct wm_softc *);
801 static void wm_tbi_tick(struct wm_softc *);
802 /* SERDES related */
803 static void wm_serdes_power_up_link_82575(struct wm_softc *);
804 static int wm_serdes_mediachange(struct ifnet *);
805 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
806 static void wm_serdes_tick(struct wm_softc *);
807 /* SFP related */
808 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
809 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
810
811 /*
812 * NVM related.
813 * Microwire, SPI (w/wo EERD) and Flash.
814 */
815 /* Misc functions */
816 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
817 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
818 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
819 /* Microwire */
820 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
821 /* SPI */
822 static int wm_nvm_ready_spi(struct wm_softc *);
823 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
824 /* Using with EERD */
825 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
826 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
827 /* Flash */
828 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
829 unsigned int *);
830 static int32_t wm_ich8_cycle_init(struct wm_softc *);
831 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
832 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
833 uint32_t *);
834 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
835 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
836 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
837 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
838 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
839 /* iNVM */
840 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
841 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
842 /* Lock, detecting NVM type, validate checksum and read */
843 static int wm_nvm_acquire(struct wm_softc *);
844 static void wm_nvm_release(struct wm_softc *);
845 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
846 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
847 static int wm_nvm_validate_checksum(struct wm_softc *);
848 static void wm_nvm_version_invm(struct wm_softc *);
849 static void wm_nvm_version(struct wm_softc *);
850 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
851
852 /*
853 * Hardware semaphores.
854 * Very complexed...
855 */
856 static int wm_get_null(struct wm_softc *);
857 static void wm_put_null(struct wm_softc *);
858 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
859 static void wm_put_swsm_semaphore(struct wm_softc *);
860 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
861 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
862 static int wm_get_phy_82575(struct wm_softc *);
863 static void wm_put_phy_82575(struct wm_softc *);
864 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
865 static void wm_put_swfwhw_semaphore(struct wm_softc *);
866 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
867 static void wm_put_swflag_ich8lan(struct wm_softc *);
868 static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */
869 static void wm_put_nvm_ich8lan(struct wm_softc *);
870 static int wm_get_hw_semaphore_82573(struct wm_softc *);
871 static void wm_put_hw_semaphore_82573(struct wm_softc *);
872
873 /*
874 * Management mode and power management related subroutines.
875 * BMC, AMT, suspend/resume and EEE.
876 */
877 #if 0
878 static int wm_check_mng_mode(struct wm_softc *);
879 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
880 static int wm_check_mng_mode_82574(struct wm_softc *);
881 static int wm_check_mng_mode_generic(struct wm_softc *);
882 #endif
883 static int wm_enable_mng_pass_thru(struct wm_softc *);
884 static bool wm_phy_resetisblocked(struct wm_softc *);
885 static void wm_get_hw_control(struct wm_softc *);
886 static void wm_release_hw_control(struct wm_softc *);
887 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
888 static void wm_smbustopci(struct wm_softc *);
889 static void wm_init_manageability(struct wm_softc *);
890 static void wm_release_manageability(struct wm_softc *);
891 static void wm_get_wakeup(struct wm_softc *);
892 static void wm_ulp_disable(struct wm_softc *);
893 static void wm_enable_phy_wakeup(struct wm_softc *);
894 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
895 static void wm_enable_wakeup(struct wm_softc *);
896 /* LPLU (Low Power Link Up) */
897 static void wm_lplu_d0_disable(struct wm_softc *);
898 static void wm_lplu_d0_disable_pch(struct wm_softc *);
899 /* EEE */
900 static void wm_set_eee_i350(struct wm_softc *);
901
902 /*
903 * Workarounds (mainly PHY related).
904 * Basically, PHY's workarounds are in the PHY drivers.
905 */
906 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
907 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
908 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
909 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
910 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
911 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
912 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
913 static void wm_reset_init_script_82575(struct wm_softc *);
914 static void wm_reset_mdicnfg_82580(struct wm_softc *);
915 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
916 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
917 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
918 static void wm_pll_workaround_i210(struct wm_softc *);
919
920 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
921 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
922
923 /*
924 * Devices supported by this driver.
925 */
926 static const struct wm_product {
927 pci_vendor_id_t wmp_vendor;
928 pci_product_id_t wmp_product;
929 const char *wmp_name;
930 wm_chip_type wmp_type;
931 uint32_t wmp_flags;
932 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
933 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
934 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
935 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
936 #define WMP_MEDIATYPE(x) ((x) & 0x03)
937 } wm_products[] = {
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
939 "Intel i82542 1000BASE-X Ethernet",
940 WM_T_82542_2_1, WMP_F_FIBER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
943 "Intel i82543GC 1000BASE-X Ethernet",
944 WM_T_82543, WMP_F_FIBER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
947 "Intel i82543GC 1000BASE-T Ethernet",
948 WM_T_82543, WMP_F_COPPER },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
951 "Intel i82544EI 1000BASE-T Ethernet",
952 WM_T_82544, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
955 "Intel i82544EI 1000BASE-X Ethernet",
956 WM_T_82544, WMP_F_FIBER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
959 "Intel i82544GC 1000BASE-T Ethernet",
960 WM_T_82544, WMP_F_COPPER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
963 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
964 WM_T_82544, WMP_F_COPPER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
967 "Intel i82540EM 1000BASE-T Ethernet",
968 WM_T_82540, WMP_F_COPPER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
971 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
972 WM_T_82540, WMP_F_COPPER },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
975 "Intel i82540EP 1000BASE-T Ethernet",
976 WM_T_82540, WMP_F_COPPER },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
979 "Intel i82540EP 1000BASE-T Ethernet",
980 WM_T_82540, WMP_F_COPPER },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
983 "Intel i82540EP 1000BASE-T Ethernet",
984 WM_T_82540, WMP_F_COPPER },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
987 "Intel i82545EM 1000BASE-T Ethernet",
988 WM_T_82545, WMP_F_COPPER },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
991 "Intel i82545GM 1000BASE-T Ethernet",
992 WM_T_82545_3, WMP_F_COPPER },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
995 "Intel i82545GM 1000BASE-X Ethernet",
996 WM_T_82545_3, WMP_F_FIBER },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
999 "Intel i82545GM Gigabit Ethernet (SERDES)",
1000 WM_T_82545_3, WMP_F_SERDES },
1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1003 "Intel i82546EB 1000BASE-T Ethernet",
1004 WM_T_82546, WMP_F_COPPER },
1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1007 "Intel i82546EB 1000BASE-T Ethernet",
1008 WM_T_82546, WMP_F_COPPER },
1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1011 "Intel i82545EM 1000BASE-X Ethernet",
1012 WM_T_82545, WMP_F_FIBER },
1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1015 "Intel i82546EB 1000BASE-X Ethernet",
1016 WM_T_82546, WMP_F_FIBER },
1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1019 "Intel i82546GB 1000BASE-T Ethernet",
1020 WM_T_82546_3, WMP_F_COPPER },
1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1023 "Intel i82546GB 1000BASE-X Ethernet",
1024 WM_T_82546_3, WMP_F_FIBER },
1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1027 "Intel i82546GB Gigabit Ethernet (SERDES)",
1028 WM_T_82546_3, WMP_F_SERDES },
1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1031 "i82546GB quad-port Gigabit Ethernet",
1032 WM_T_82546_3, WMP_F_COPPER },
1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1035 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1036 WM_T_82546_3, WMP_F_COPPER },
1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1039 "Intel PRO/1000MT (82546GB)",
1040 WM_T_82546_3, WMP_F_COPPER },
1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1043 "Intel i82541EI 1000BASE-T Ethernet",
1044 WM_T_82541, WMP_F_COPPER },
1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1047 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1048 WM_T_82541, WMP_F_COPPER },
1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1051 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1052 WM_T_82541, WMP_F_COPPER },
1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1055 "Intel i82541ER 1000BASE-T Ethernet",
1056 WM_T_82541_2, WMP_F_COPPER },
1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1059 "Intel i82541GI 1000BASE-T Ethernet",
1060 WM_T_82541_2, WMP_F_COPPER },
1061
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1063 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1064 WM_T_82541_2, WMP_F_COPPER },
1065
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1067 "Intel i82541PI 1000BASE-T Ethernet",
1068 WM_T_82541_2, WMP_F_COPPER },
1069
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1071 "Intel i82547EI 1000BASE-T Ethernet",
1072 WM_T_82547, WMP_F_COPPER },
1073
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1075 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1076 WM_T_82547, WMP_F_COPPER },
1077
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1079 "Intel i82547GI 1000BASE-T Ethernet",
1080 WM_T_82547_2, WMP_F_COPPER },
1081
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1083 "Intel PRO/1000 PT (82571EB)",
1084 WM_T_82571, WMP_F_COPPER },
1085
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1087 "Intel PRO/1000 PF (82571EB)",
1088 WM_T_82571, WMP_F_FIBER },
1089
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1091 "Intel PRO/1000 PB (82571EB)",
1092 WM_T_82571, WMP_F_SERDES },
1093
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1095 "Intel PRO/1000 QT (82571EB)",
1096 WM_T_82571, WMP_F_COPPER },
1097
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1099 "Intel PRO/1000 PT Quad Port Server Adapter",
1100 WM_T_82571, WMP_F_COPPER, },
1101
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1103 "Intel Gigabit PT Quad Port Server ExpressModule",
1104 WM_T_82571, WMP_F_COPPER, },
1105
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1107 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1108 WM_T_82571, WMP_F_SERDES, },
1109
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1111 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1112 WM_T_82571, WMP_F_SERDES, },
1113
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1115 "Intel 82571EB Quad 1000baseX Ethernet",
1116 WM_T_82571, WMP_F_FIBER, },
1117
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1119 "Intel i82572EI 1000baseT Ethernet",
1120 WM_T_82572, WMP_F_COPPER },
1121
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1123 "Intel i82572EI 1000baseX Ethernet",
1124 WM_T_82572, WMP_F_FIBER },
1125
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1127 "Intel i82572EI Gigabit Ethernet (SERDES)",
1128 WM_T_82572, WMP_F_SERDES },
1129
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1131 "Intel i82572EI 1000baseT Ethernet",
1132 WM_T_82572, WMP_F_COPPER },
1133
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1135 "Intel i82573E",
1136 WM_T_82573, WMP_F_COPPER },
1137
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1139 "Intel i82573E IAMT",
1140 WM_T_82573, WMP_F_COPPER },
1141
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1143 "Intel i82573L Gigabit Ethernet",
1144 WM_T_82573, WMP_F_COPPER },
1145
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1147 "Intel i82574L",
1148 WM_T_82574, WMP_F_COPPER },
1149
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1151 "Intel i82574L",
1152 WM_T_82574, WMP_F_COPPER },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1155 "Intel i82583V",
1156 WM_T_82583, WMP_F_COPPER },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1159 "i80003 dual 1000baseT Ethernet",
1160 WM_T_80003, WMP_F_COPPER },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1163 "i80003 dual 1000baseX Ethernet",
1164 WM_T_80003, WMP_F_COPPER },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1167 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1168 WM_T_80003, WMP_F_SERDES },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1171 "Intel i80003 1000baseT Ethernet",
1172 WM_T_80003, WMP_F_COPPER },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1175 "Intel i80003 Gigabit Ethernet (SERDES)",
1176 WM_T_80003, WMP_F_SERDES },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1179 "Intel i82801H (M_AMT) LAN Controller",
1180 WM_T_ICH8, WMP_F_COPPER },
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1182 "Intel i82801H (AMT) LAN Controller",
1183 WM_T_ICH8, WMP_F_COPPER },
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1185 "Intel i82801H LAN Controller",
1186 WM_T_ICH8, WMP_F_COPPER },
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1188 "Intel i82801H (IFE) 10/100 LAN Controller",
1189 WM_T_ICH8, WMP_F_COPPER },
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1191 "Intel i82801H (M) LAN Controller",
1192 WM_T_ICH8, WMP_F_COPPER },
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1194 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1195 WM_T_ICH8, WMP_F_COPPER },
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1197 "Intel i82801H IFE (G) 10/100 LAN Controller",
1198 WM_T_ICH8, WMP_F_COPPER },
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1200 "82567V-3 LAN Controller",
1201 WM_T_ICH8, WMP_F_COPPER },
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1203 "82801I (AMT) LAN Controller",
1204 WM_T_ICH9, WMP_F_COPPER },
1205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1206 "82801I 10/100 LAN Controller",
1207 WM_T_ICH9, WMP_F_COPPER },
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1209 "82801I (G) 10/100 LAN Controller",
1210 WM_T_ICH9, WMP_F_COPPER },
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1212 "82801I (GT) 10/100 LAN Controller",
1213 WM_T_ICH9, WMP_F_COPPER },
1214 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1215 "82801I (C) LAN Controller",
1216 WM_T_ICH9, WMP_F_COPPER },
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1218 "82801I mobile LAN Controller",
1219 WM_T_ICH9, WMP_F_COPPER },
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1221 "82801I mobile (V) LAN Controller",
1222 WM_T_ICH9, WMP_F_COPPER },
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1224 "82801I mobile (AMT) LAN Controller",
1225 WM_T_ICH9, WMP_F_COPPER },
1226 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1227 "82567LM-4 LAN Controller",
1228 WM_T_ICH9, WMP_F_COPPER },
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1230 "82567LM-2 LAN Controller",
1231 WM_T_ICH10, WMP_F_COPPER },
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1233 "82567LF-2 LAN Controller",
1234 WM_T_ICH10, WMP_F_COPPER },
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1236 "82567LM-3 LAN Controller",
1237 WM_T_ICH10, WMP_F_COPPER },
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1239 "82567LF-3 LAN Controller",
1240 WM_T_ICH10, WMP_F_COPPER },
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1242 "82567V-2 LAN Controller",
1243 WM_T_ICH10, WMP_F_COPPER },
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1245 "82567V-3? LAN Controller",
1246 WM_T_ICH10, WMP_F_COPPER },
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1248 "HANKSVILLE LAN Controller",
1249 WM_T_ICH10, WMP_F_COPPER },
1250 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1251 "PCH LAN (82577LM) Controller",
1252 WM_T_PCH, WMP_F_COPPER },
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1254 "PCH LAN (82577LC) Controller",
1255 WM_T_PCH, WMP_F_COPPER },
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1257 "PCH LAN (82578DM) Controller",
1258 WM_T_PCH, WMP_F_COPPER },
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1260 "PCH LAN (82578DC) Controller",
1261 WM_T_PCH, WMP_F_COPPER },
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1263 "PCH2 LAN (82579LM) Controller",
1264 WM_T_PCH2, WMP_F_COPPER },
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1266 "PCH2 LAN (82579V) Controller",
1267 WM_T_PCH2, WMP_F_COPPER },
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1269 "82575EB dual-1000baseT Ethernet",
1270 WM_T_82575, WMP_F_COPPER },
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1272 "82575EB dual-1000baseX Ethernet (SERDES)",
1273 WM_T_82575, WMP_F_SERDES },
1274 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1275 "82575GB quad-1000baseT Ethernet",
1276 WM_T_82575, WMP_F_COPPER },
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1278 "82575GB quad-1000baseT Ethernet (PM)",
1279 WM_T_82575, WMP_F_COPPER },
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1281 "82576 1000BaseT Ethernet",
1282 WM_T_82576, WMP_F_COPPER },
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1284 "82576 1000BaseX Ethernet",
1285 WM_T_82576, WMP_F_FIBER },
1286
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1288 "82576 gigabit Ethernet (SERDES)",
1289 WM_T_82576, WMP_F_SERDES },
1290
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1292 "82576 quad-1000BaseT Ethernet",
1293 WM_T_82576, WMP_F_COPPER },
1294
1295 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1296 "82576 Gigabit ET2 Quad Port Server Adapter",
1297 WM_T_82576, WMP_F_COPPER },
1298
1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1300 "82576 gigabit Ethernet",
1301 WM_T_82576, WMP_F_COPPER },
1302
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1304 "82576 gigabit Ethernet (SERDES)",
1305 WM_T_82576, WMP_F_SERDES },
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1307 "82576 quad-gigabit Ethernet (SERDES)",
1308 WM_T_82576, WMP_F_SERDES },
1309
1310 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1311 "82580 1000BaseT Ethernet",
1312 WM_T_82580, WMP_F_COPPER },
1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1314 "82580 1000BaseX Ethernet",
1315 WM_T_82580, WMP_F_FIBER },
1316
1317 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1318 "82580 1000BaseT Ethernet (SERDES)",
1319 WM_T_82580, WMP_F_SERDES },
1320
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1322 "82580 gigabit Ethernet (SGMII)",
1323 WM_T_82580, WMP_F_COPPER },
1324 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1325 "82580 dual-1000BaseT Ethernet",
1326 WM_T_82580, WMP_F_COPPER },
1327
1328 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1329 "82580 quad-1000BaseX Ethernet",
1330 WM_T_82580, WMP_F_FIBER },
1331
1332 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1333 "DH89XXCC Gigabit Ethernet (SGMII)",
1334 WM_T_82580, WMP_F_COPPER },
1335
1336 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1337 "DH89XXCC Gigabit Ethernet (SERDES)",
1338 WM_T_82580, WMP_F_SERDES },
1339
1340 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1341 "DH89XXCC 1000BASE-KX Ethernet",
1342 WM_T_82580, WMP_F_SERDES },
1343
1344 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1345 "DH89XXCC Gigabit Ethernet (SFP)",
1346 WM_T_82580, WMP_F_SERDES },
1347
1348 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1349 "I350 Gigabit Network Connection",
1350 WM_T_I350, WMP_F_COPPER },
1351
1352 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1353 "I350 Gigabit Fiber Network Connection",
1354 WM_T_I350, WMP_F_FIBER },
1355
1356 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1357 "I350 Gigabit Backplane Connection",
1358 WM_T_I350, WMP_F_SERDES },
1359
1360 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1361 "I350 Quad Port Gigabit Ethernet",
1362 WM_T_I350, WMP_F_SERDES },
1363
1364 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1365 "I350 Gigabit Connection",
1366 WM_T_I350, WMP_F_COPPER },
1367
1368 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1369 "I354 Gigabit Ethernet (KX)",
1370 WM_T_I354, WMP_F_SERDES },
1371
1372 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1373 "I354 Gigabit Ethernet (SGMII)",
1374 WM_T_I354, WMP_F_COPPER },
1375
1376 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1377 "I354 Gigabit Ethernet (2.5G)",
1378 WM_T_I354, WMP_F_COPPER },
1379
1380 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1381 "I210-T1 Ethernet Server Adapter",
1382 WM_T_I210, WMP_F_COPPER },
1383
1384 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1385 "I210 Ethernet (Copper OEM)",
1386 WM_T_I210, WMP_F_COPPER },
1387
1388 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1389 "I210 Ethernet (Copper IT)",
1390 WM_T_I210, WMP_F_COPPER },
1391
1392 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1393 "I210 Ethernet (FLASH less)",
1394 WM_T_I210, WMP_F_COPPER },
1395
1396 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1397 "I210 Gigabit Ethernet (Fiber)",
1398 WM_T_I210, WMP_F_FIBER },
1399
1400 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1401 "I210 Gigabit Ethernet (SERDES)",
1402 WM_T_I210, WMP_F_SERDES },
1403
1404 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1405 "I210 Gigabit Ethernet (FLASH less)",
1406 WM_T_I210, WMP_F_SERDES },
1407
1408 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1409 "I210 Gigabit Ethernet (SGMII)",
1410 WM_T_I210, WMP_F_COPPER },
1411
1412 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1413 "I211 Ethernet (COPPER)",
1414 WM_T_I211, WMP_F_COPPER },
1415 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1416 "I217 V Ethernet Connection",
1417 WM_T_PCH_LPT, WMP_F_COPPER },
1418 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1419 "I217 LM Ethernet Connection",
1420 WM_T_PCH_LPT, WMP_F_COPPER },
1421 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1422 "I218 V Ethernet Connection",
1423 WM_T_PCH_LPT, WMP_F_COPPER },
1424 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1425 "I218 V Ethernet Connection",
1426 WM_T_PCH_LPT, WMP_F_COPPER },
1427 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1428 "I218 V Ethernet Connection",
1429 WM_T_PCH_LPT, WMP_F_COPPER },
1430 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1431 "I218 LM Ethernet Connection",
1432 WM_T_PCH_LPT, WMP_F_COPPER },
1433 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1434 "I218 LM Ethernet Connection",
1435 WM_T_PCH_LPT, WMP_F_COPPER },
1436 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1437 "I218 LM Ethernet Connection",
1438 WM_T_PCH_LPT, WMP_F_COPPER },
1439 #if 0
1440 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1441 "I219 V Ethernet Connection",
1442 WM_T_PCH_SPT, WMP_F_COPPER },
1443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1444 "I219 V Ethernet Connection",
1445 WM_T_PCH_SPT, WMP_F_COPPER },
1446 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1447 "I219 V Ethernet Connection",
1448 WM_T_PCH_SPT, WMP_F_COPPER },
1449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1450 "I219 V Ethernet Connection",
1451 WM_T_PCH_SPT, WMP_F_COPPER },
1452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1453 "I219 LM Ethernet Connection",
1454 WM_T_PCH_SPT, WMP_F_COPPER },
1455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1456 "I219 LM Ethernet Connection",
1457 WM_T_PCH_SPT, WMP_F_COPPER },
1458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1459 "I219 LM Ethernet Connection",
1460 WM_T_PCH_SPT, WMP_F_COPPER },
1461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1462 "I219 LM Ethernet Connection",
1463 WM_T_PCH_SPT, WMP_F_COPPER },
1464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1465 "I219 LM Ethernet Connection",
1466 WM_T_PCH_SPT, WMP_F_COPPER },
1467 #endif
1468 { 0, 0,
1469 NULL,
1470 0, 0 },
1471 };
1472
1473 /*
1474 * Register read/write functions.
1475 * Other than CSR_{READ|WRITE}().
1476 */
1477
1478 #if 0 /* Not currently used */
1479 static inline uint32_t
1480 wm_io_read(struct wm_softc *sc, int reg)
1481 {
1482
1483 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1484 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1485 }
1486 #endif
1487
1488 static inline void
1489 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1490 {
1491
1492 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1493 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1494 }
1495
1496 static inline void
1497 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1498 uint32_t data)
1499 {
1500 uint32_t regval;
1501 int i;
1502
1503 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1504
1505 CSR_WRITE(sc, reg, regval);
1506
1507 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1508 delay(5);
1509 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1510 break;
1511 }
1512 if (i == SCTL_CTL_POLL_TIMEOUT) {
1513 aprint_error("%s: WARNING:"
1514 " i82575 reg 0x%08x setup did not indicate ready\n",
1515 device_xname(sc->sc_dev), reg);
1516 }
1517 }
1518
1519 static inline void
1520 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1521 {
1522 wa->wa_low = htole32(v & 0xffffffffU);
1523 if (sizeof(bus_addr_t) == 8)
1524 wa->wa_high = htole32((uint64_t) v >> 32);
1525 else
1526 wa->wa_high = 0;
1527 }
1528
1529 /*
1530 * Descriptor sync/init functions.
1531 */
1532 static inline void
1533 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1534 {
1535 struct wm_softc *sc = txq->txq_sc;
1536
1537 /* If it will wrap around, sync to the end of the ring. */
1538 if ((start + num) > WM_NTXDESC(txq)) {
1539 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1540 WM_CDTXOFF(txq, start), txq->txq_descsize *
1541 (WM_NTXDESC(txq) - start), ops);
1542 num -= (WM_NTXDESC(txq) - start);
1543 start = 0;
1544 }
1545
1546 /* Now sync whatever is left. */
1547 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1548 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1549 }
1550
1551 static inline void
1552 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1553 {
1554 struct wm_softc *sc = rxq->rxq_sc;
1555
1556 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1557 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1558 }
1559
1560 static inline void
1561 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1562 {
1563 struct wm_softc *sc = rxq->rxq_sc;
1564 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1565 struct mbuf *m = rxs->rxs_mbuf;
1566
1567 /*
1568 * Note: We scoot the packet forward 2 bytes in the buffer
1569 * so that the payload after the Ethernet header is aligned
1570 * to a 4-byte boundary.
1571
1572 * XXX BRAINDAMAGE ALERT!
1573 * The stupid chip uses the same size for every buffer, which
1574 * is set in the Receive Control register. We are using the 2K
1575 * size option, but what we REALLY want is (2K - 2)! For this
1576 * reason, we can't "scoot" packets longer than the standard
1577 * Ethernet MTU. On strict-alignment platforms, if the total
1578 * size exceeds (2K - 2) we set align_tweak to 0 and let
1579 * the upper layer copy the headers.
1580 */
1581 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1582
1583 if (sc->sc_type == WM_T_82574) {
1584 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1585 rxd->erx_data.erxd_addr =
1586 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1587 rxd->erx_data.erxd_dd = 0;
1588 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1589 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1590
1591 rxd->nqrx_data.nrxd_paddr =
1592 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1593 /* Currently, split header is not supported. */
1594 rxd->nqrx_data.nrxd_haddr = 0;
1595 } else {
1596 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1597
1598 wm_set_dma_addr(&rxd->wrx_addr,
1599 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1600 rxd->wrx_len = 0;
1601 rxd->wrx_cksum = 0;
1602 rxd->wrx_status = 0;
1603 rxd->wrx_errors = 0;
1604 rxd->wrx_special = 0;
1605 }
1606 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1607
1608 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1609 }
1610
1611 /*
1612 * Device driver interface functions and commonly used functions.
1613 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1614 */
1615
1616 /* Lookup supported device table */
1617 static const struct wm_product *
1618 wm_lookup(const struct pci_attach_args *pa)
1619 {
1620 const struct wm_product *wmp;
1621
1622 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1623 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1624 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1625 return wmp;
1626 }
1627 return NULL;
1628 }
1629
1630 /* The match function (ca_match) */
1631 static int
1632 wm_match(device_t parent, cfdata_t cf, void *aux)
1633 {
1634 struct pci_attach_args *pa = aux;
1635
1636 if (wm_lookup(pa) != NULL)
1637 return 1;
1638
1639 return 0;
1640 }
1641
1642 /* The attach function (ca_attach) */
1643 static void
1644 wm_attach(device_t parent, device_t self, void *aux)
1645 {
1646 struct wm_softc *sc = device_private(self);
1647 struct pci_attach_args *pa = aux;
1648 prop_dictionary_t dict;
1649 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1650 pci_chipset_tag_t pc = pa->pa_pc;
1651 int counts[PCI_INTR_TYPE_SIZE];
1652 pci_intr_type_t max_type;
1653 const char *eetype, *xname;
1654 bus_space_tag_t memt;
1655 bus_space_handle_t memh;
1656 bus_size_t memsize;
1657 int memh_valid;
1658 int i, error;
1659 const struct wm_product *wmp;
1660 prop_data_t ea;
1661 prop_number_t pn;
1662 uint8_t enaddr[ETHER_ADDR_LEN];
1663 uint16_t cfg1, cfg2, swdpin, nvmword;
1664 pcireg_t preg, memtype;
1665 uint16_t eeprom_data, apme_mask;
1666 bool force_clear_smbi;
1667 uint32_t link_mode;
1668 uint32_t reg;
1669
1670 sc->sc_dev = self;
1671 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1672 sc->sc_core_stopping = false;
1673
1674 wmp = wm_lookup(pa);
1675 #ifdef DIAGNOSTIC
1676 if (wmp == NULL) {
1677 printf("\n");
1678 panic("wm_attach: impossible");
1679 }
1680 #endif
1681 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1682
1683 sc->sc_pc = pa->pa_pc;
1684 sc->sc_pcitag = pa->pa_tag;
1685
1686 if (pci_dma64_available(pa))
1687 sc->sc_dmat = pa->pa_dmat64;
1688 else
1689 sc->sc_dmat = pa->pa_dmat;
1690
1691 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1692 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1693 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1694
1695 sc->sc_type = wmp->wmp_type;
1696
1697 /* Set default function pointers */
1698 sc->phy.acquire = wm_get_null;
1699 sc->phy.release = wm_put_null;
1700 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1701
1702 if (sc->sc_type < WM_T_82543) {
1703 if (sc->sc_rev < 2) {
1704 aprint_error_dev(sc->sc_dev,
1705 "i82542 must be at least rev. 2\n");
1706 return;
1707 }
1708 if (sc->sc_rev < 3)
1709 sc->sc_type = WM_T_82542_2_0;
1710 }
1711
1712 /*
1713 * Disable MSI for Errata:
1714 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1715 *
1716 * 82544: Errata 25
1717 * 82540: Errata 6 (easy to reproduce device timeout)
1718 * 82545: Errata 4 (easy to reproduce device timeout)
1719 * 82546: Errata 26 (easy to reproduce device timeout)
1720 * 82541: Errata 7 (easy to reproduce device timeout)
1721 *
1722 * "Byte Enables 2 and 3 are not set on MSI writes"
1723 *
1724 * 82571 & 82572: Errata 63
1725 */
1726 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1727 || (sc->sc_type == WM_T_82572))
1728 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1729
1730 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1731 || (sc->sc_type == WM_T_82580)
1732 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1733 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1734 sc->sc_flags |= WM_F_NEWQUEUE;
1735
1736 /* Set device properties (mactype) */
1737 dict = device_properties(sc->sc_dev);
1738 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1739
1740 /*
1741 * Map the device. All devices support memory-mapped acccess,
1742 * and it is really required for normal operation.
1743 */
1744 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1745 switch (memtype) {
1746 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1747 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1748 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1749 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1750 break;
1751 default:
1752 memh_valid = 0;
1753 break;
1754 }
1755
1756 if (memh_valid) {
1757 sc->sc_st = memt;
1758 sc->sc_sh = memh;
1759 sc->sc_ss = memsize;
1760 } else {
1761 aprint_error_dev(sc->sc_dev,
1762 "unable to map device registers\n");
1763 return;
1764 }
1765
1766 /*
1767 * In addition, i82544 and later support I/O mapped indirect
1768 * register access. It is not desirable (nor supported in
1769 * this driver) to use it for normal operation, though it is
1770 * required to work around bugs in some chip versions.
1771 */
1772 if (sc->sc_type >= WM_T_82544) {
1773 /* First we have to find the I/O BAR. */
1774 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1775 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1776 if (memtype == PCI_MAPREG_TYPE_IO)
1777 break;
1778 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1779 PCI_MAPREG_MEM_TYPE_64BIT)
1780 i += 4; /* skip high bits, too */
1781 }
1782 if (i < PCI_MAPREG_END) {
1783 /*
1784 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1785 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1786 * It's no problem because newer chips has no this
1787 * bug.
1788 *
1789 * The i8254x doesn't apparently respond when the
1790 * I/O BAR is 0, which looks somewhat like it's not
1791 * been configured.
1792 */
1793 preg = pci_conf_read(pc, pa->pa_tag, i);
1794 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1795 aprint_error_dev(sc->sc_dev,
1796 "WARNING: I/O BAR at zero.\n");
1797 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1798 0, &sc->sc_iot, &sc->sc_ioh,
1799 NULL, &sc->sc_ios) == 0) {
1800 sc->sc_flags |= WM_F_IOH_VALID;
1801 } else {
1802 aprint_error_dev(sc->sc_dev,
1803 "WARNING: unable to map I/O space\n");
1804 }
1805 }
1806
1807 }
1808
1809 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1810 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1811 preg |= PCI_COMMAND_MASTER_ENABLE;
1812 if (sc->sc_type < WM_T_82542_2_1)
1813 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1814 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1815
1816 /* power up chip */
1817 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1818 NULL)) && error != EOPNOTSUPP) {
1819 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1820 return;
1821 }
1822
1823 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1824
1825 /* Allocation settings */
1826 max_type = PCI_INTR_TYPE_MSIX;
1827 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1828 counts[PCI_INTR_TYPE_MSI] = 1;
1829 counts[PCI_INTR_TYPE_INTX] = 1;
1830
1831 alloc_retry:
1832 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1833 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1834 return;
1835 }
1836
1837 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1838 error = wm_setup_msix(sc);
1839 if (error) {
1840 pci_intr_release(pc, sc->sc_intrs,
1841 counts[PCI_INTR_TYPE_MSIX]);
1842
1843 /* Setup for MSI: Disable MSI-X */
1844 max_type = PCI_INTR_TYPE_MSI;
1845 counts[PCI_INTR_TYPE_MSI] = 1;
1846 counts[PCI_INTR_TYPE_INTX] = 1;
1847 goto alloc_retry;
1848 }
1849 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1850 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1851 error = wm_setup_legacy(sc);
1852 if (error) {
1853 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1854 counts[PCI_INTR_TYPE_MSI]);
1855
1856 /* The next try is for INTx: Disable MSI */
1857 max_type = PCI_INTR_TYPE_INTX;
1858 counts[PCI_INTR_TYPE_INTX] = 1;
1859 goto alloc_retry;
1860 }
1861 } else {
1862 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1863 error = wm_setup_legacy(sc);
1864 if (error) {
1865 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1866 counts[PCI_INTR_TYPE_INTX]);
1867 return;
1868 }
1869 }
1870
1871 /*
1872 * Check the function ID (unit number of the chip).
1873 */
1874 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1875 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1876 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1877 || (sc->sc_type == WM_T_82580)
1878 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1879 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1880 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1881 else
1882 sc->sc_funcid = 0;
1883
1884 /*
1885 * Determine a few things about the bus we're connected to.
1886 */
1887 if (sc->sc_type < WM_T_82543) {
1888 /* We don't really know the bus characteristics here. */
1889 sc->sc_bus_speed = 33;
1890 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1891 /*
1892 * CSA (Communication Streaming Architecture) is about as fast
1893 * a 32-bit 66MHz PCI Bus.
1894 */
1895 sc->sc_flags |= WM_F_CSA;
1896 sc->sc_bus_speed = 66;
1897 aprint_verbose_dev(sc->sc_dev,
1898 "Communication Streaming Architecture\n");
1899 if (sc->sc_type == WM_T_82547) {
1900 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1901 callout_setfunc(&sc->sc_txfifo_ch,
1902 wm_82547_txfifo_stall, sc);
1903 aprint_verbose_dev(sc->sc_dev,
1904 "using 82547 Tx FIFO stall work-around\n");
1905 }
1906 } else if (sc->sc_type >= WM_T_82571) {
1907 sc->sc_flags |= WM_F_PCIE;
1908 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1909 && (sc->sc_type != WM_T_ICH10)
1910 && (sc->sc_type != WM_T_PCH)
1911 && (sc->sc_type != WM_T_PCH2)
1912 && (sc->sc_type != WM_T_PCH_LPT)
1913 && (sc->sc_type != WM_T_PCH_SPT)) {
1914 /* ICH* and PCH* have no PCIe capability registers */
1915 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1916 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1917 NULL) == 0)
1918 aprint_error_dev(sc->sc_dev,
1919 "unable to find PCIe capability\n");
1920 }
1921 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1922 } else {
1923 reg = CSR_READ(sc, WMREG_STATUS);
1924 if (reg & STATUS_BUS64)
1925 sc->sc_flags |= WM_F_BUS64;
1926 if ((reg & STATUS_PCIX_MODE) != 0) {
1927 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1928
1929 sc->sc_flags |= WM_F_PCIX;
1930 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1931 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1932 aprint_error_dev(sc->sc_dev,
1933 "unable to find PCIX capability\n");
1934 else if (sc->sc_type != WM_T_82545_3 &&
1935 sc->sc_type != WM_T_82546_3) {
1936 /*
1937 * Work around a problem caused by the BIOS
1938 * setting the max memory read byte count
1939 * incorrectly.
1940 */
1941 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1942 sc->sc_pcixe_capoff + PCIX_CMD);
1943 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1944 sc->sc_pcixe_capoff + PCIX_STATUS);
1945
1946 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1947 PCIX_CMD_BYTECNT_SHIFT;
1948 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1949 PCIX_STATUS_MAXB_SHIFT;
1950 if (bytecnt > maxb) {
1951 aprint_verbose_dev(sc->sc_dev,
1952 "resetting PCI-X MMRBC: %d -> %d\n",
1953 512 << bytecnt, 512 << maxb);
1954 pcix_cmd = (pcix_cmd &
1955 ~PCIX_CMD_BYTECNT_MASK) |
1956 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1957 pci_conf_write(pa->pa_pc, pa->pa_tag,
1958 sc->sc_pcixe_capoff + PCIX_CMD,
1959 pcix_cmd);
1960 }
1961 }
1962 }
1963 /*
1964 * The quad port adapter is special; it has a PCIX-PCIX
1965 * bridge on the board, and can run the secondary bus at
1966 * a higher speed.
1967 */
1968 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1969 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1970 : 66;
1971 } else if (sc->sc_flags & WM_F_PCIX) {
1972 switch (reg & STATUS_PCIXSPD_MASK) {
1973 case STATUS_PCIXSPD_50_66:
1974 sc->sc_bus_speed = 66;
1975 break;
1976 case STATUS_PCIXSPD_66_100:
1977 sc->sc_bus_speed = 100;
1978 break;
1979 case STATUS_PCIXSPD_100_133:
1980 sc->sc_bus_speed = 133;
1981 break;
1982 default:
1983 aprint_error_dev(sc->sc_dev,
1984 "unknown PCIXSPD %d; assuming 66MHz\n",
1985 reg & STATUS_PCIXSPD_MASK);
1986 sc->sc_bus_speed = 66;
1987 break;
1988 }
1989 } else
1990 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1991 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1992 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1993 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1994 }
1995
1996 /* clear interesting stat counters */
1997 CSR_READ(sc, WMREG_COLC);
1998 CSR_READ(sc, WMREG_RXERRC);
1999
2000 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2001 || (sc->sc_type >= WM_T_ICH8))
2002 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2003 if (sc->sc_type >= WM_T_ICH8)
2004 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2005
2006 /* Set PHY, NVM mutex related stuff */
2007 switch (sc->sc_type) {
2008 case WM_T_82542_2_0:
2009 case WM_T_82542_2_1:
2010 case WM_T_82543:
2011 case WM_T_82544:
2012 /* Microwire */
2013 sc->sc_nvm_wordsize = 64;
2014 sc->sc_nvm_addrbits = 6;
2015 break;
2016 case WM_T_82540:
2017 case WM_T_82545:
2018 case WM_T_82545_3:
2019 case WM_T_82546:
2020 case WM_T_82546_3:
2021 /* Microwire */
2022 reg = CSR_READ(sc, WMREG_EECD);
2023 if (reg & EECD_EE_SIZE) {
2024 sc->sc_nvm_wordsize = 256;
2025 sc->sc_nvm_addrbits = 8;
2026 } else {
2027 sc->sc_nvm_wordsize = 64;
2028 sc->sc_nvm_addrbits = 6;
2029 }
2030 sc->sc_flags |= WM_F_LOCK_EECD;
2031 break;
2032 case WM_T_82541:
2033 case WM_T_82541_2:
2034 case WM_T_82547:
2035 case WM_T_82547_2:
2036 sc->sc_flags |= WM_F_LOCK_EECD;
2037 reg = CSR_READ(sc, WMREG_EECD);
2038 if (reg & EECD_EE_TYPE) {
2039 /* SPI */
2040 sc->sc_flags |= WM_F_EEPROM_SPI;
2041 wm_nvm_set_addrbits_size_eecd(sc);
2042 } else {
2043 /* Microwire */
2044 if ((reg & EECD_EE_ABITS) != 0) {
2045 sc->sc_nvm_wordsize = 256;
2046 sc->sc_nvm_addrbits = 8;
2047 } else {
2048 sc->sc_nvm_wordsize = 64;
2049 sc->sc_nvm_addrbits = 6;
2050 }
2051 }
2052 break;
2053 case WM_T_82571:
2054 case WM_T_82572:
2055 /* SPI */
2056 sc->sc_flags |= WM_F_EEPROM_SPI;
2057 wm_nvm_set_addrbits_size_eecd(sc);
2058 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
2059 sc->phy.acquire = wm_get_swsm_semaphore;
2060 sc->phy.release = wm_put_swsm_semaphore;
2061 break;
2062 case WM_T_82573:
2063 case WM_T_82574:
2064 case WM_T_82583:
2065 if (sc->sc_type == WM_T_82573) {
2066 sc->sc_flags |= WM_F_LOCK_SWSM;
2067 sc->phy.acquire = wm_get_swsm_semaphore;
2068 sc->phy.release = wm_put_swsm_semaphore;
2069 } else {
2070 sc->sc_flags |= WM_F_LOCK_EXTCNF;
2071 /* Both PHY and NVM use the same semaphore. */
2072 sc->phy.acquire
2073 = wm_get_swfwhw_semaphore;
2074 sc->phy.release
2075 = wm_put_swfwhw_semaphore;
2076 }
2077 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2078 sc->sc_flags |= WM_F_EEPROM_FLASH;
2079 sc->sc_nvm_wordsize = 2048;
2080 } else {
2081 /* SPI */
2082 sc->sc_flags |= WM_F_EEPROM_SPI;
2083 wm_nvm_set_addrbits_size_eecd(sc);
2084 }
2085 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2086 break;
2087 case WM_T_82575:
2088 case WM_T_82576:
2089 case WM_T_82580:
2090 case WM_T_I350:
2091 case WM_T_I354:
2092 case WM_T_80003:
2093 /* SPI */
2094 sc->sc_flags |= WM_F_EEPROM_SPI;
2095 wm_nvm_set_addrbits_size_eecd(sc);
2096 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2097 | WM_F_LOCK_SWSM;
2098 sc->phy.acquire = wm_get_phy_82575;
2099 sc->phy.release = wm_put_phy_82575;
2100 break;
2101 case WM_T_ICH8:
2102 case WM_T_ICH9:
2103 case WM_T_ICH10:
2104 case WM_T_PCH:
2105 case WM_T_PCH2:
2106 case WM_T_PCH_LPT:
2107 /* FLASH */
2108 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2109 sc->sc_nvm_wordsize = 2048;
2110 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2111 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2112 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2113 aprint_error_dev(sc->sc_dev,
2114 "can't map FLASH registers\n");
2115 goto out;
2116 }
2117 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2118 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2119 ICH_FLASH_SECTOR_SIZE;
2120 sc->sc_ich8_flash_bank_size =
2121 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2122 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2123 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2124 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2125 sc->sc_flashreg_offset = 0;
2126 sc->phy.acquire = wm_get_swflag_ich8lan;
2127 sc->phy.release = wm_put_swflag_ich8lan;
2128 break;
2129 case WM_T_PCH_SPT:
2130 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2131 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2132 sc->sc_flasht = sc->sc_st;
2133 sc->sc_flashh = sc->sc_sh;
2134 sc->sc_ich8_flash_base = 0;
2135 sc->sc_nvm_wordsize =
2136 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2137 * NVM_SIZE_MULTIPLIER;
2138 /* It is size in bytes, we want words */
2139 sc->sc_nvm_wordsize /= 2;
2140 /* assume 2 banks */
2141 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2142 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2143 sc->phy.acquire = wm_get_swflag_ich8lan;
2144 sc->phy.release = wm_put_swflag_ich8lan;
2145 break;
2146 case WM_T_I210:
2147 case WM_T_I211:
2148 if (wm_nvm_get_flash_presence_i210(sc)) {
2149 wm_nvm_set_addrbits_size_eecd(sc);
2150 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2151 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2152 } else {
2153 sc->sc_nvm_wordsize = INVM_SIZE;
2154 sc->sc_flags |= WM_F_EEPROM_INVM;
2155 }
2156 sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2157 sc->phy.acquire = wm_get_phy_82575;
2158 sc->phy.release = wm_put_phy_82575;
2159 break;
2160 default:
2161 break;
2162 }
2163
2164 /* Reset the chip to a known state. */
2165 wm_reset(sc);
2166
2167 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2168 switch (sc->sc_type) {
2169 case WM_T_82571:
2170 case WM_T_82572:
2171 reg = CSR_READ(sc, WMREG_SWSM2);
2172 if ((reg & SWSM2_LOCK) == 0) {
2173 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2174 force_clear_smbi = true;
2175 } else
2176 force_clear_smbi = false;
2177 break;
2178 case WM_T_82573:
2179 case WM_T_82574:
2180 case WM_T_82583:
2181 force_clear_smbi = true;
2182 break;
2183 default:
2184 force_clear_smbi = false;
2185 break;
2186 }
2187 if (force_clear_smbi) {
2188 reg = CSR_READ(sc, WMREG_SWSM);
2189 if ((reg & SWSM_SMBI) != 0)
2190 aprint_error_dev(sc->sc_dev,
2191 "Please update the Bootagent\n");
2192 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2193 }
2194
2195 /*
2196 * Defer printing the EEPROM type until after verifying the checksum
2197 * This allows the EEPROM type to be printed correctly in the case
2198 * that no EEPROM is attached.
2199 */
2200 /*
2201 * Validate the EEPROM checksum. If the checksum fails, flag
2202 * this for later, so we can fail future reads from the EEPROM.
2203 */
2204 if (wm_nvm_validate_checksum(sc)) {
2205 /*
2206 * Read twice again because some PCI-e parts fail the
2207 * first check due to the link being in sleep state.
2208 */
2209 if (wm_nvm_validate_checksum(sc))
2210 sc->sc_flags |= WM_F_EEPROM_INVALID;
2211 }
2212
2213 /* Set device properties (macflags) */
2214 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2215
2216 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2217 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2218 else {
2219 aprint_verbose_dev(sc->sc_dev, "%u words ",
2220 sc->sc_nvm_wordsize);
2221 if (sc->sc_flags & WM_F_EEPROM_INVM)
2222 aprint_verbose("iNVM");
2223 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2224 aprint_verbose("FLASH(HW)");
2225 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2226 aprint_verbose("FLASH");
2227 else {
2228 if (sc->sc_flags & WM_F_EEPROM_SPI)
2229 eetype = "SPI";
2230 else
2231 eetype = "MicroWire";
2232 aprint_verbose("(%d address bits) %s EEPROM",
2233 sc->sc_nvm_addrbits, eetype);
2234 }
2235 }
2236 wm_nvm_version(sc);
2237 aprint_verbose("\n");
2238
2239 /* Check for I21[01] PLL workaround */
2240 if (sc->sc_type == WM_T_I210)
2241 sc->sc_flags |= WM_F_PLL_WA_I210;
2242 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2243 /* NVM image release 3.25 has a workaround */
2244 if ((sc->sc_nvm_ver_major < 3)
2245 || ((sc->sc_nvm_ver_major == 3)
2246 && (sc->sc_nvm_ver_minor < 25))) {
2247 aprint_verbose_dev(sc->sc_dev,
2248 "ROM image version %d.%d is older than 3.25\n",
2249 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2250 sc->sc_flags |= WM_F_PLL_WA_I210;
2251 }
2252 }
2253 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2254 wm_pll_workaround_i210(sc);
2255
2256 wm_get_wakeup(sc);
2257
2258 /* Non-AMT based hardware can now take control from firmware */
2259 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2260 wm_get_hw_control(sc);
2261
2262 /*
2263 * Read the Ethernet address from the EEPROM, if not first found
2264 * in device properties.
2265 */
2266 ea = prop_dictionary_get(dict, "mac-address");
2267 if (ea != NULL) {
2268 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2269 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2270 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2271 } else {
2272 if (wm_read_mac_addr(sc, enaddr) != 0) {
2273 aprint_error_dev(sc->sc_dev,
2274 "unable to read Ethernet address\n");
2275 goto out;
2276 }
2277 }
2278
2279 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2280 ether_sprintf(enaddr));
2281
2282 /*
2283 * Read the config info from the EEPROM, and set up various
2284 * bits in the control registers based on their contents.
2285 */
2286 pn = prop_dictionary_get(dict, "i82543-cfg1");
2287 if (pn != NULL) {
2288 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2289 cfg1 = (uint16_t) prop_number_integer_value(pn);
2290 } else {
2291 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2292 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2293 goto out;
2294 }
2295 }
2296
2297 pn = prop_dictionary_get(dict, "i82543-cfg2");
2298 if (pn != NULL) {
2299 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2300 cfg2 = (uint16_t) prop_number_integer_value(pn);
2301 } else {
2302 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2303 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2304 goto out;
2305 }
2306 }
2307
2308 /* check for WM_F_WOL */
2309 switch (sc->sc_type) {
2310 case WM_T_82542_2_0:
2311 case WM_T_82542_2_1:
2312 case WM_T_82543:
2313 /* dummy? */
2314 eeprom_data = 0;
2315 apme_mask = NVM_CFG3_APME;
2316 break;
2317 case WM_T_82544:
2318 apme_mask = NVM_CFG2_82544_APM_EN;
2319 eeprom_data = cfg2;
2320 break;
2321 case WM_T_82546:
2322 case WM_T_82546_3:
2323 case WM_T_82571:
2324 case WM_T_82572:
2325 case WM_T_82573:
2326 case WM_T_82574:
2327 case WM_T_82583:
2328 case WM_T_80003:
2329 default:
2330 apme_mask = NVM_CFG3_APME;
2331 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2332 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2333 break;
2334 case WM_T_82575:
2335 case WM_T_82576:
2336 case WM_T_82580:
2337 case WM_T_I350:
2338 case WM_T_I354: /* XXX ok? */
2339 case WM_T_ICH8:
2340 case WM_T_ICH9:
2341 case WM_T_ICH10:
2342 case WM_T_PCH:
2343 case WM_T_PCH2:
2344 case WM_T_PCH_LPT:
2345 case WM_T_PCH_SPT:
2346 /* XXX The funcid should be checked on some devices */
2347 apme_mask = WUC_APME;
2348 eeprom_data = CSR_READ(sc, WMREG_WUC);
2349 break;
2350 }
2351
2352 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2353 if ((eeprom_data & apme_mask) != 0)
2354 sc->sc_flags |= WM_F_WOL;
2355 #ifdef WM_DEBUG
2356 if ((sc->sc_flags & WM_F_WOL) != 0)
2357 printf("WOL\n");
2358 #endif
2359
2360 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2361 /* Check NVM for autonegotiation */
2362 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2363 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2364 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2365 }
2366 }
2367
2368 /*
2369 * XXX need special handling for some multiple port cards
2370 * to disable a paticular port.
2371 */
2372
2373 if (sc->sc_type >= WM_T_82544) {
2374 pn = prop_dictionary_get(dict, "i82543-swdpin");
2375 if (pn != NULL) {
2376 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2377 swdpin = (uint16_t) prop_number_integer_value(pn);
2378 } else {
2379 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2380 aprint_error_dev(sc->sc_dev,
2381 "unable to read SWDPIN\n");
2382 goto out;
2383 }
2384 }
2385 }
2386
2387 if (cfg1 & NVM_CFG1_ILOS)
2388 sc->sc_ctrl |= CTRL_ILOS;
2389
2390 /*
2391 * XXX
2392 * This code isn't correct because pin 2 and 3 are located
2393 * in different position on newer chips. Check all datasheet.
2394 *
2395 * Until resolve this problem, check if a chip < 82580
2396 */
2397 if (sc->sc_type <= WM_T_82580) {
2398 if (sc->sc_type >= WM_T_82544) {
2399 sc->sc_ctrl |=
2400 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2401 CTRL_SWDPIO_SHIFT;
2402 sc->sc_ctrl |=
2403 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2404 CTRL_SWDPINS_SHIFT;
2405 } else {
2406 sc->sc_ctrl |=
2407 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2408 CTRL_SWDPIO_SHIFT;
2409 }
2410 }
2411
2412 /* XXX For other than 82580? */
2413 if (sc->sc_type == WM_T_82580) {
2414 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2415 if (nvmword & __BIT(13))
2416 sc->sc_ctrl |= CTRL_ILOS;
2417 }
2418
2419 #if 0
2420 if (sc->sc_type >= WM_T_82544) {
2421 if (cfg1 & NVM_CFG1_IPS0)
2422 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2423 if (cfg1 & NVM_CFG1_IPS1)
2424 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2425 sc->sc_ctrl_ext |=
2426 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2427 CTRL_EXT_SWDPIO_SHIFT;
2428 sc->sc_ctrl_ext |=
2429 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2430 CTRL_EXT_SWDPINS_SHIFT;
2431 } else {
2432 sc->sc_ctrl_ext |=
2433 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2434 CTRL_EXT_SWDPIO_SHIFT;
2435 }
2436 #endif
2437
2438 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2439 #if 0
2440 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2441 #endif
2442
2443 if (sc->sc_type == WM_T_PCH) {
2444 uint16_t val;
2445
2446 /* Save the NVM K1 bit setting */
2447 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2448
2449 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2450 sc->sc_nvm_k1_enabled = 1;
2451 else
2452 sc->sc_nvm_k1_enabled = 0;
2453 }
2454
2455 /*
2456 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2457 * media structures accordingly.
2458 */
2459 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2460 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2461 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2462 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2463 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2464 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2465 wm_gmii_mediainit(sc, wmp->wmp_product);
2466 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2467 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2468 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2469 || (sc->sc_type ==WM_T_I211)) {
2470 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2471 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2472 switch (link_mode) {
2473 case CTRL_EXT_LINK_MODE_1000KX:
2474 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2475 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2476 break;
2477 case CTRL_EXT_LINK_MODE_SGMII:
2478 if (wm_sgmii_uses_mdio(sc)) {
2479 aprint_verbose_dev(sc->sc_dev,
2480 "SGMII(MDIO)\n");
2481 sc->sc_flags |= WM_F_SGMII;
2482 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2483 break;
2484 }
2485 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2486 /*FALLTHROUGH*/
2487 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2488 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2489 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2490 if (link_mode
2491 == CTRL_EXT_LINK_MODE_SGMII) {
2492 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2493 sc->sc_flags |= WM_F_SGMII;
2494 } else {
2495 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2496 aprint_verbose_dev(sc->sc_dev,
2497 "SERDES\n");
2498 }
2499 break;
2500 }
2501 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2502 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2503
2504 /* Change current link mode setting */
2505 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2506 switch (sc->sc_mediatype) {
2507 case WM_MEDIATYPE_COPPER:
2508 reg |= CTRL_EXT_LINK_MODE_SGMII;
2509 break;
2510 case WM_MEDIATYPE_SERDES:
2511 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2512 break;
2513 default:
2514 break;
2515 }
2516 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2517 break;
2518 case CTRL_EXT_LINK_MODE_GMII:
2519 default:
2520 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2521 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2522 break;
2523 }
2524
2525 reg &= ~CTRL_EXT_I2C_ENA;
2526 if ((sc->sc_flags & WM_F_SGMII) != 0)
2527 reg |= CTRL_EXT_I2C_ENA;
2528 else
2529 reg &= ~CTRL_EXT_I2C_ENA;
2530 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2531
2532 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2533 wm_gmii_mediainit(sc, wmp->wmp_product);
2534 else
2535 wm_tbi_mediainit(sc);
2536 } else if (sc->sc_type < WM_T_82543 ||
2537 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2538 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2539 aprint_error_dev(sc->sc_dev,
2540 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2541 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2542 }
2543 wm_tbi_mediainit(sc);
2544 } else {
2545 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2546 aprint_error_dev(sc->sc_dev,
2547 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2548 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2549 }
2550 wm_gmii_mediainit(sc, wmp->wmp_product);
2551 }
2552
2553 ifp = &sc->sc_ethercom.ec_if;
2554 xname = device_xname(sc->sc_dev);
2555 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2556 ifp->if_softc = sc;
2557 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2558 #ifdef WM_MPSAFE
2559 ifp->if_extflags = IFEF_START_MPSAFE;
2560 #endif
2561 ifp->if_ioctl = wm_ioctl;
2562 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2563 ifp->if_start = wm_nq_start;
2564 /*
2565 * When the number of CPUs is one and the controller can use
2566 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2567 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2568 * and the other is used for link status changing.
2569 * In this situation, wm_nq_transmit() is disadvantageous
2570 * because of wm_select_txqueue() and pcq(9) overhead.
2571 */
2572 if (wm_is_using_multiqueue(sc))
2573 ifp->if_transmit = wm_nq_transmit;
2574 } else {
2575 ifp->if_start = wm_start;
2576 /*
2577 * wm_transmit() has the same disadvantage as wm_transmit().
2578 */
2579 if (wm_is_using_multiqueue(sc))
2580 ifp->if_transmit = wm_transmit;
2581 }
2582 ifp->if_watchdog = wm_watchdog;
2583 ifp->if_init = wm_init;
2584 ifp->if_stop = wm_stop;
2585 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2586 IFQ_SET_READY(&ifp->if_snd);
2587
2588 /* Check for jumbo frame */
2589 switch (sc->sc_type) {
2590 case WM_T_82573:
2591 /* XXX limited to 9234 if ASPM is disabled */
2592 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2593 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2594 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2595 break;
2596 case WM_T_82571:
2597 case WM_T_82572:
2598 case WM_T_82574:
2599 case WM_T_82575:
2600 case WM_T_82576:
2601 case WM_T_82580:
2602 case WM_T_I350:
2603 case WM_T_I354: /* XXXX ok? */
2604 case WM_T_I210:
2605 case WM_T_I211:
2606 case WM_T_80003:
2607 case WM_T_ICH9:
2608 case WM_T_ICH10:
2609 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2610 case WM_T_PCH_LPT:
2611 case WM_T_PCH_SPT:
2612 /* XXX limited to 9234 */
2613 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2614 break;
2615 case WM_T_PCH:
2616 /* XXX limited to 4096 */
2617 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2618 break;
2619 case WM_T_82542_2_0:
2620 case WM_T_82542_2_1:
2621 case WM_T_82583:
2622 case WM_T_ICH8:
2623 /* No support for jumbo frame */
2624 break;
2625 default:
2626 /* ETHER_MAX_LEN_JUMBO */
2627 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2628 break;
2629 }
2630
2631 /* If we're a i82543 or greater, we can support VLANs. */
2632 if (sc->sc_type >= WM_T_82543)
2633 sc->sc_ethercom.ec_capabilities |=
2634 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2635
2636 /*
2637 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2638 * on i82543 and later.
2639 */
2640 if (sc->sc_type >= WM_T_82543) {
2641 ifp->if_capabilities |=
2642 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2643 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2644 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2645 IFCAP_CSUM_TCPv6_Tx |
2646 IFCAP_CSUM_UDPv6_Tx;
2647 }
2648
2649 /*
2650 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2651 *
2652 * 82541GI (8086:1076) ... no
2653 * 82572EI (8086:10b9) ... yes
2654 */
2655 if (sc->sc_type >= WM_T_82571) {
2656 ifp->if_capabilities |=
2657 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2658 }
2659
2660 /*
2661 * If we're a i82544 or greater (except i82547), we can do
2662 * TCP segmentation offload.
2663 */
2664 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2665 ifp->if_capabilities |= IFCAP_TSOv4;
2666 }
2667
2668 if (sc->sc_type >= WM_T_82571) {
2669 ifp->if_capabilities |= IFCAP_TSOv6;
2670 }
2671
2672 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
2673 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
2674
2675 #ifdef WM_MPSAFE
2676 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2677 #else
2678 sc->sc_core_lock = NULL;
2679 #endif
2680
2681 /* Attach the interface. */
2682 if_initialize(ifp);
2683 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2684 ether_ifattach(ifp, enaddr);
2685 if_register(ifp);
2686 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2687 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2688 RND_FLAG_DEFAULT);
2689
2690 #ifdef WM_EVENT_COUNTERS
2691 /* Attach event counters. */
2692 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2693 NULL, xname, "linkintr");
2694
2695 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2696 NULL, xname, "tx_xoff");
2697 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2698 NULL, xname, "tx_xon");
2699 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2700 NULL, xname, "rx_xoff");
2701 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2702 NULL, xname, "rx_xon");
2703 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2704 NULL, xname, "rx_macctl");
2705 #endif /* WM_EVENT_COUNTERS */
2706
2707 if (pmf_device_register(self, wm_suspend, wm_resume))
2708 pmf_class_network_register(self, ifp);
2709 else
2710 aprint_error_dev(self, "couldn't establish power handler\n");
2711
2712 sc->sc_flags |= WM_F_ATTACHED;
2713 out:
2714 return;
2715 }
2716
2717 /* The detach function (ca_detach) */
2718 static int
2719 wm_detach(device_t self, int flags __unused)
2720 {
2721 struct wm_softc *sc = device_private(self);
2722 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2723 int i;
2724
2725 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2726 return 0;
2727
2728 /* Stop the interface. Callouts are stopped in it. */
2729 wm_stop(ifp, 1);
2730
2731 pmf_device_deregister(self);
2732
2733 #ifdef WM_EVENT_COUNTERS
2734 evcnt_detach(&sc->sc_ev_linkintr);
2735
2736 evcnt_detach(&sc->sc_ev_tx_xoff);
2737 evcnt_detach(&sc->sc_ev_tx_xon);
2738 evcnt_detach(&sc->sc_ev_rx_xoff);
2739 evcnt_detach(&sc->sc_ev_rx_xon);
2740 evcnt_detach(&sc->sc_ev_rx_macctl);
2741 #endif /* WM_EVENT_COUNTERS */
2742
2743 /* Tell the firmware about the release */
2744 WM_CORE_LOCK(sc);
2745 wm_release_manageability(sc);
2746 wm_release_hw_control(sc);
2747 wm_enable_wakeup(sc);
2748 WM_CORE_UNLOCK(sc);
2749
2750 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2751
2752 /* Delete all remaining media. */
2753 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2754
2755 ether_ifdetach(ifp);
2756 if_detach(ifp);
2757 if_percpuq_destroy(sc->sc_ipq);
2758
2759 /* Unload RX dmamaps and free mbufs */
2760 for (i = 0; i < sc->sc_nqueues; i++) {
2761 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2762 mutex_enter(rxq->rxq_lock);
2763 wm_rxdrain(rxq);
2764 mutex_exit(rxq->rxq_lock);
2765 }
2766 /* Must unlock here */
2767
2768 /* Disestablish the interrupt handler */
2769 for (i = 0; i < sc->sc_nintrs; i++) {
2770 if (sc->sc_ihs[i] != NULL) {
2771 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2772 sc->sc_ihs[i] = NULL;
2773 }
2774 }
2775 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2776
2777 wm_free_txrx_queues(sc);
2778
2779 /* Unmap the registers */
2780 if (sc->sc_ss) {
2781 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2782 sc->sc_ss = 0;
2783 }
2784 if (sc->sc_ios) {
2785 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2786 sc->sc_ios = 0;
2787 }
2788 if (sc->sc_flashs) {
2789 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2790 sc->sc_flashs = 0;
2791 }
2792
2793 if (sc->sc_core_lock)
2794 mutex_obj_free(sc->sc_core_lock);
2795 if (sc->sc_ich_phymtx)
2796 mutex_obj_free(sc->sc_ich_phymtx);
2797 if (sc->sc_ich_nvmmtx)
2798 mutex_obj_free(sc->sc_ich_nvmmtx);
2799
2800 return 0;
2801 }
2802
2803 static bool
2804 wm_suspend(device_t self, const pmf_qual_t *qual)
2805 {
2806 struct wm_softc *sc = device_private(self);
2807
2808 wm_release_manageability(sc);
2809 wm_release_hw_control(sc);
2810 wm_enable_wakeup(sc);
2811
2812 return true;
2813 }
2814
2815 static bool
2816 wm_resume(device_t self, const pmf_qual_t *qual)
2817 {
2818 struct wm_softc *sc = device_private(self);
2819
2820 wm_init_manageability(sc);
2821
2822 return true;
2823 }
2824
2825 /*
2826 * wm_watchdog: [ifnet interface function]
2827 *
2828 * Watchdog timer handler.
2829 */
2830 static void
2831 wm_watchdog(struct ifnet *ifp)
2832 {
2833 int qid;
2834 struct wm_softc *sc = ifp->if_softc;
2835
2836 for (qid = 0; qid < sc->sc_nqueues; qid++) {
2837 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2838
2839 wm_watchdog_txq(ifp, txq);
2840 }
2841
2842 /* Reset the interface. */
2843 (void) wm_init(ifp);
2844
2845 /*
2846 * There are still some upper layer processing which call
2847 * ifp->if_start(). e.g. ALTQ or one CPU system
2848 */
2849 /* Try to get more packets going. */
2850 ifp->if_start(ifp);
2851 }
2852
2853 static void
2854 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2855 {
2856 struct wm_softc *sc = ifp->if_softc;
2857
2858 /*
2859 * Since we're using delayed interrupts, sweep up
2860 * before we report an error.
2861 */
2862 mutex_enter(txq->txq_lock);
2863 wm_txeof(sc, txq);
2864 mutex_exit(txq->txq_lock);
2865
2866 if (txq->txq_free != WM_NTXDESC(txq)) {
2867 #ifdef WM_DEBUG
2868 int i, j;
2869 struct wm_txsoft *txs;
2870 #endif
2871 log(LOG_ERR,
2872 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2873 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2874 txq->txq_next);
2875 ifp->if_oerrors++;
2876 #ifdef WM_DEBUG
2877 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2878 i = WM_NEXTTXS(txq, i)) {
2879 txs = &txq->txq_soft[i];
2880 printf("txs %d tx %d -> %d\n",
2881 i, txs->txs_firstdesc, txs->txs_lastdesc);
2882 for (j = txs->txs_firstdesc; ;
2883 j = WM_NEXTTX(txq, j)) {
2884 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2885 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2886 printf("\t %#08x%08x\n",
2887 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2888 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2889 if (j == txs->txs_lastdesc)
2890 break;
2891 }
2892 }
2893 #endif
2894 }
2895 }
2896
2897 /*
2898 * wm_tick:
2899 *
2900 * One second timer, used to check link status, sweep up
2901 * completed transmit jobs, etc.
2902 */
2903 static void
2904 wm_tick(void *arg)
2905 {
2906 struct wm_softc *sc = arg;
2907 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2908 #ifndef WM_MPSAFE
2909 int s = splnet();
2910 #endif
2911
2912 WM_CORE_LOCK(sc);
2913
2914 if (sc->sc_core_stopping)
2915 goto out;
2916
2917 if (sc->sc_type >= WM_T_82542_2_1) {
2918 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2919 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2920 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2921 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2922 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2923 }
2924
2925 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2926 ifp->if_ierrors += 0ULL /* ensure quad_t */
2927 + CSR_READ(sc, WMREG_CRCERRS)
2928 + CSR_READ(sc, WMREG_ALGNERRC)
2929 + CSR_READ(sc, WMREG_SYMERRC)
2930 + CSR_READ(sc, WMREG_RXERRC)
2931 + CSR_READ(sc, WMREG_SEC)
2932 + CSR_READ(sc, WMREG_CEXTERR)
2933 + CSR_READ(sc, WMREG_RLEC);
2934 /*
2935 * WMREG_RNBC is incremented when there is no available buffers in host
2936 * memory. It does not mean the number of dropped packet. Because
2937 * ethernet controller can receive packets in such case if there is
2938 * space in phy's FIFO.
2939 *
2940 * If you want to know the nubmer of WMREG_RMBC, you should use such as
2941 * own EVCNT instead of if_iqdrops.
2942 */
2943 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
2944
2945 if (sc->sc_flags & WM_F_HAS_MII)
2946 mii_tick(&sc->sc_mii);
2947 else if ((sc->sc_type >= WM_T_82575)
2948 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2949 wm_serdes_tick(sc);
2950 else
2951 wm_tbi_tick(sc);
2952
2953 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2954 out:
2955 WM_CORE_UNLOCK(sc);
2956 #ifndef WM_MPSAFE
2957 splx(s);
2958 #endif
2959 }
2960
2961 static int
2962 wm_ifflags_cb(struct ethercom *ec)
2963 {
2964 struct ifnet *ifp = &ec->ec_if;
2965 struct wm_softc *sc = ifp->if_softc;
2966 int rc = 0;
2967
2968 WM_CORE_LOCK(sc);
2969
2970 int change = ifp->if_flags ^ sc->sc_if_flags;
2971 sc->sc_if_flags = ifp->if_flags;
2972
2973 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2974 rc = ENETRESET;
2975 goto out;
2976 }
2977
2978 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2979 wm_set_filter(sc);
2980
2981 wm_set_vlan(sc);
2982
2983 out:
2984 WM_CORE_UNLOCK(sc);
2985
2986 return rc;
2987 }
2988
2989 /*
2990 * wm_ioctl: [ifnet interface function]
2991 *
2992 * Handle control requests from the operator.
2993 */
2994 static int
2995 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2996 {
2997 struct wm_softc *sc = ifp->if_softc;
2998 struct ifreq *ifr = (struct ifreq *) data;
2999 struct ifaddr *ifa = (struct ifaddr *)data;
3000 struct sockaddr_dl *sdl;
3001 int s, error;
3002
3003 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3004 device_xname(sc->sc_dev), __func__));
3005
3006 #ifndef WM_MPSAFE
3007 s = splnet();
3008 #endif
3009 switch (cmd) {
3010 case SIOCSIFMEDIA:
3011 case SIOCGIFMEDIA:
3012 WM_CORE_LOCK(sc);
3013 /* Flow control requires full-duplex mode. */
3014 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3015 (ifr->ifr_media & IFM_FDX) == 0)
3016 ifr->ifr_media &= ~IFM_ETH_FMASK;
3017 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3018 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3019 /* We can do both TXPAUSE and RXPAUSE. */
3020 ifr->ifr_media |=
3021 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3022 }
3023 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3024 }
3025 WM_CORE_UNLOCK(sc);
3026 #ifdef WM_MPSAFE
3027 s = splnet();
3028 #endif
3029 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3030 #ifdef WM_MPSAFE
3031 splx(s);
3032 #endif
3033 break;
3034 case SIOCINITIFADDR:
3035 WM_CORE_LOCK(sc);
3036 if (ifa->ifa_addr->sa_family == AF_LINK) {
3037 sdl = satosdl(ifp->if_dl->ifa_addr);
3038 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3039 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3040 /* unicast address is first multicast entry */
3041 wm_set_filter(sc);
3042 error = 0;
3043 WM_CORE_UNLOCK(sc);
3044 break;
3045 }
3046 WM_CORE_UNLOCK(sc);
3047 /*FALLTHROUGH*/
3048 default:
3049 #ifdef WM_MPSAFE
3050 s = splnet();
3051 #endif
3052 /* It may call wm_start, so unlock here */
3053 error = ether_ioctl(ifp, cmd, data);
3054 #ifdef WM_MPSAFE
3055 splx(s);
3056 #endif
3057 if (error != ENETRESET)
3058 break;
3059
3060 error = 0;
3061
3062 if (cmd == SIOCSIFCAP) {
3063 error = (*ifp->if_init)(ifp);
3064 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3065 ;
3066 else if (ifp->if_flags & IFF_RUNNING) {
3067 /*
3068 * Multicast list has changed; set the hardware filter
3069 * accordingly.
3070 */
3071 WM_CORE_LOCK(sc);
3072 wm_set_filter(sc);
3073 WM_CORE_UNLOCK(sc);
3074 }
3075 break;
3076 }
3077
3078 #ifndef WM_MPSAFE
3079 splx(s);
3080 #endif
3081 return error;
3082 }
3083
3084 /* MAC address related */
3085
3086 /*
3087 * Get the offset of MAC address and return it.
3088 * If error occured, use offset 0.
3089 */
3090 static uint16_t
3091 wm_check_alt_mac_addr(struct wm_softc *sc)
3092 {
3093 uint16_t myea[ETHER_ADDR_LEN / 2];
3094 uint16_t offset = NVM_OFF_MACADDR;
3095
3096 /* Try to read alternative MAC address pointer */
3097 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3098 return 0;
3099
3100 /* Check pointer if it's valid or not. */
3101 if ((offset == 0x0000) || (offset == 0xffff))
3102 return 0;
3103
3104 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3105 /*
3106 * Check whether alternative MAC address is valid or not.
3107 * Some cards have non 0xffff pointer but those don't use
3108 * alternative MAC address in reality.
3109 *
3110 * Check whether the broadcast bit is set or not.
3111 */
3112 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3113 if (((myea[0] & 0xff) & 0x01) == 0)
3114 return offset; /* Found */
3115
3116 /* Not found */
3117 return 0;
3118 }
3119
3120 static int
3121 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3122 {
3123 uint16_t myea[ETHER_ADDR_LEN / 2];
3124 uint16_t offset = NVM_OFF_MACADDR;
3125 int do_invert = 0;
3126
3127 switch (sc->sc_type) {
3128 case WM_T_82580:
3129 case WM_T_I350:
3130 case WM_T_I354:
3131 /* EEPROM Top Level Partitioning */
3132 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3133 break;
3134 case WM_T_82571:
3135 case WM_T_82575:
3136 case WM_T_82576:
3137 case WM_T_80003:
3138 case WM_T_I210:
3139 case WM_T_I211:
3140 offset = wm_check_alt_mac_addr(sc);
3141 if (offset == 0)
3142 if ((sc->sc_funcid & 0x01) == 1)
3143 do_invert = 1;
3144 break;
3145 default:
3146 if ((sc->sc_funcid & 0x01) == 1)
3147 do_invert = 1;
3148 break;
3149 }
3150
3151 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3152 goto bad;
3153
3154 enaddr[0] = myea[0] & 0xff;
3155 enaddr[1] = myea[0] >> 8;
3156 enaddr[2] = myea[1] & 0xff;
3157 enaddr[3] = myea[1] >> 8;
3158 enaddr[4] = myea[2] & 0xff;
3159 enaddr[5] = myea[2] >> 8;
3160
3161 /*
3162 * Toggle the LSB of the MAC address on the second port
3163 * of some dual port cards.
3164 */
3165 if (do_invert != 0)
3166 enaddr[5] ^= 1;
3167
3168 return 0;
3169
3170 bad:
3171 return -1;
3172 }
3173
3174 /*
3175 * wm_set_ral:
3176 *
3177 * Set an entery in the receive address list.
3178 */
3179 static void
3180 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3181 {
3182 uint32_t ral_lo, ral_hi;
3183
3184 if (enaddr != NULL) {
3185 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3186 (enaddr[3] << 24);
3187 ral_hi = enaddr[4] | (enaddr[5] << 8);
3188 ral_hi |= RAL_AV;
3189 } else {
3190 ral_lo = 0;
3191 ral_hi = 0;
3192 }
3193
3194 if (sc->sc_type >= WM_T_82544) {
3195 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3196 ral_lo);
3197 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3198 ral_hi);
3199 } else {
3200 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3201 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3202 }
3203 }
3204
3205 /*
3206 * wm_mchash:
3207 *
3208 * Compute the hash of the multicast address for the 4096-bit
3209 * multicast filter.
3210 */
3211 static uint32_t
3212 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3213 {
3214 static const int lo_shift[4] = { 4, 3, 2, 0 };
3215 static const int hi_shift[4] = { 4, 5, 6, 8 };
3216 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3217 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3218 uint32_t hash;
3219
3220 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3221 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3222 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3223 || (sc->sc_type == WM_T_PCH_SPT)) {
3224 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3225 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3226 return (hash & 0x3ff);
3227 }
3228 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3229 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3230
3231 return (hash & 0xfff);
3232 }
3233
3234 /*
3235 * wm_set_filter:
3236 *
3237 * Set up the receive filter.
3238 */
3239 static void
3240 wm_set_filter(struct wm_softc *sc)
3241 {
3242 struct ethercom *ec = &sc->sc_ethercom;
3243 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3244 struct ether_multi *enm;
3245 struct ether_multistep step;
3246 bus_addr_t mta_reg;
3247 uint32_t hash, reg, bit;
3248 int i, size, ralmax;
3249
3250 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3251 device_xname(sc->sc_dev), __func__));
3252
3253 if (sc->sc_type >= WM_T_82544)
3254 mta_reg = WMREG_CORDOVA_MTA;
3255 else
3256 mta_reg = WMREG_MTA;
3257
3258 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3259
3260 if (ifp->if_flags & IFF_BROADCAST)
3261 sc->sc_rctl |= RCTL_BAM;
3262 if (ifp->if_flags & IFF_PROMISC) {
3263 sc->sc_rctl |= RCTL_UPE;
3264 goto allmulti;
3265 }
3266
3267 /*
3268 * Set the station address in the first RAL slot, and
3269 * clear the remaining slots.
3270 */
3271 if (sc->sc_type == WM_T_ICH8)
3272 size = WM_RAL_TABSIZE_ICH8 -1;
3273 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3274 || (sc->sc_type == WM_T_PCH))
3275 size = WM_RAL_TABSIZE_ICH8;
3276 else if (sc->sc_type == WM_T_PCH2)
3277 size = WM_RAL_TABSIZE_PCH2;
3278 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3279 size = WM_RAL_TABSIZE_PCH_LPT;
3280 else if (sc->sc_type == WM_T_82575)
3281 size = WM_RAL_TABSIZE_82575;
3282 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3283 size = WM_RAL_TABSIZE_82576;
3284 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3285 size = WM_RAL_TABSIZE_I350;
3286 else
3287 size = WM_RAL_TABSIZE;
3288 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3289
3290 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3291 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3292 switch (i) {
3293 case 0:
3294 /* We can use all entries */
3295 ralmax = size;
3296 break;
3297 case 1:
3298 /* Only RAR[0] */
3299 ralmax = 1;
3300 break;
3301 default:
3302 /* available SHRA + RAR[0] */
3303 ralmax = i + 1;
3304 }
3305 } else
3306 ralmax = size;
3307 for (i = 1; i < size; i++) {
3308 if (i < ralmax)
3309 wm_set_ral(sc, NULL, i);
3310 }
3311
3312 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3313 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3314 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3315 || (sc->sc_type == WM_T_PCH_SPT))
3316 size = WM_ICH8_MC_TABSIZE;
3317 else
3318 size = WM_MC_TABSIZE;
3319 /* Clear out the multicast table. */
3320 for (i = 0; i < size; i++)
3321 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3322
3323 ETHER_LOCK(ec);
3324 ETHER_FIRST_MULTI(step, ec, enm);
3325 while (enm != NULL) {
3326 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3327 ETHER_UNLOCK(ec);
3328 /*
3329 * We must listen to a range of multicast addresses.
3330 * For now, just accept all multicasts, rather than
3331 * trying to set only those filter bits needed to match
3332 * the range. (At this time, the only use of address
3333 * ranges is for IP multicast routing, for which the
3334 * range is big enough to require all bits set.)
3335 */
3336 goto allmulti;
3337 }
3338
3339 hash = wm_mchash(sc, enm->enm_addrlo);
3340
3341 reg = (hash >> 5);
3342 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3343 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3344 || (sc->sc_type == WM_T_PCH2)
3345 || (sc->sc_type == WM_T_PCH_LPT)
3346 || (sc->sc_type == WM_T_PCH_SPT))
3347 reg &= 0x1f;
3348 else
3349 reg &= 0x7f;
3350 bit = hash & 0x1f;
3351
3352 hash = CSR_READ(sc, mta_reg + (reg << 2));
3353 hash |= 1U << bit;
3354
3355 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3356 /*
3357 * 82544 Errata 9: Certain register cannot be written
3358 * with particular alignments in PCI-X bus operation
3359 * (FCAH, MTA and VFTA).
3360 */
3361 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3362 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3363 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3364 } else
3365 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3366
3367 ETHER_NEXT_MULTI(step, enm);
3368 }
3369 ETHER_UNLOCK(ec);
3370
3371 ifp->if_flags &= ~IFF_ALLMULTI;
3372 goto setit;
3373
3374 allmulti:
3375 ifp->if_flags |= IFF_ALLMULTI;
3376 sc->sc_rctl |= RCTL_MPE;
3377
3378 setit:
3379 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3380 }
3381
3382 /* Reset and init related */
3383
3384 static void
3385 wm_set_vlan(struct wm_softc *sc)
3386 {
3387
3388 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3389 device_xname(sc->sc_dev), __func__));
3390
3391 /* Deal with VLAN enables. */
3392 if (VLAN_ATTACHED(&sc->sc_ethercom))
3393 sc->sc_ctrl |= CTRL_VME;
3394 else
3395 sc->sc_ctrl &= ~CTRL_VME;
3396
3397 /* Write the control registers. */
3398 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3399 }
3400
3401 static void
3402 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3403 {
3404 uint32_t gcr;
3405 pcireg_t ctrl2;
3406
3407 gcr = CSR_READ(sc, WMREG_GCR);
3408
3409 /* Only take action if timeout value is defaulted to 0 */
3410 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3411 goto out;
3412
3413 if ((gcr & GCR_CAP_VER2) == 0) {
3414 gcr |= GCR_CMPL_TMOUT_10MS;
3415 goto out;
3416 }
3417
3418 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3419 sc->sc_pcixe_capoff + PCIE_DCSR2);
3420 ctrl2 |= WM_PCIE_DCSR2_16MS;
3421 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3422 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3423
3424 out:
3425 /* Disable completion timeout resend */
3426 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3427
3428 CSR_WRITE(sc, WMREG_GCR, gcr);
3429 }
3430
3431 void
3432 wm_get_auto_rd_done(struct wm_softc *sc)
3433 {
3434 int i;
3435
3436 /* wait for eeprom to reload */
3437 switch (sc->sc_type) {
3438 case WM_T_82571:
3439 case WM_T_82572:
3440 case WM_T_82573:
3441 case WM_T_82574:
3442 case WM_T_82583:
3443 case WM_T_82575:
3444 case WM_T_82576:
3445 case WM_T_82580:
3446 case WM_T_I350:
3447 case WM_T_I354:
3448 case WM_T_I210:
3449 case WM_T_I211:
3450 case WM_T_80003:
3451 case WM_T_ICH8:
3452 case WM_T_ICH9:
3453 for (i = 0; i < 10; i++) {
3454 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3455 break;
3456 delay(1000);
3457 }
3458 if (i == 10) {
3459 log(LOG_ERR, "%s: auto read from eeprom failed to "
3460 "complete\n", device_xname(sc->sc_dev));
3461 }
3462 break;
3463 default:
3464 break;
3465 }
3466 }
3467
3468 void
3469 wm_lan_init_done(struct wm_softc *sc)
3470 {
3471 uint32_t reg = 0;
3472 int i;
3473
3474 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3475 device_xname(sc->sc_dev), __func__));
3476
3477 /* Wait for eeprom to reload */
3478 switch (sc->sc_type) {
3479 case WM_T_ICH10:
3480 case WM_T_PCH:
3481 case WM_T_PCH2:
3482 case WM_T_PCH_LPT:
3483 case WM_T_PCH_SPT:
3484 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3485 reg = CSR_READ(sc, WMREG_STATUS);
3486 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3487 break;
3488 delay(100);
3489 }
3490 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3491 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3492 "complete\n", device_xname(sc->sc_dev), __func__);
3493 }
3494 break;
3495 default:
3496 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3497 __func__);
3498 break;
3499 }
3500
3501 reg &= ~STATUS_LAN_INIT_DONE;
3502 CSR_WRITE(sc, WMREG_STATUS, reg);
3503 }
3504
3505 void
3506 wm_get_cfg_done(struct wm_softc *sc)
3507 {
3508 int mask;
3509 uint32_t reg;
3510 int i;
3511
3512 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3513 device_xname(sc->sc_dev), __func__));
3514
3515 /* Wait for eeprom to reload */
3516 switch (sc->sc_type) {
3517 case WM_T_82542_2_0:
3518 case WM_T_82542_2_1:
3519 /* null */
3520 break;
3521 case WM_T_82543:
3522 case WM_T_82544:
3523 case WM_T_82540:
3524 case WM_T_82545:
3525 case WM_T_82545_3:
3526 case WM_T_82546:
3527 case WM_T_82546_3:
3528 case WM_T_82541:
3529 case WM_T_82541_2:
3530 case WM_T_82547:
3531 case WM_T_82547_2:
3532 case WM_T_82573:
3533 case WM_T_82574:
3534 case WM_T_82583:
3535 /* generic */
3536 delay(10*1000);
3537 break;
3538 case WM_T_80003:
3539 case WM_T_82571:
3540 case WM_T_82572:
3541 case WM_T_82575:
3542 case WM_T_82576:
3543 case WM_T_82580:
3544 case WM_T_I350:
3545 case WM_T_I354:
3546 case WM_T_I210:
3547 case WM_T_I211:
3548 if (sc->sc_type == WM_T_82571) {
3549 /* Only 82571 shares port 0 */
3550 mask = EEMNGCTL_CFGDONE_0;
3551 } else
3552 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3553 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3554 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3555 break;
3556 delay(1000);
3557 }
3558 if (i >= WM_PHY_CFG_TIMEOUT) {
3559 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3560 device_xname(sc->sc_dev), __func__));
3561 }
3562 break;
3563 case WM_T_ICH8:
3564 case WM_T_ICH9:
3565 case WM_T_ICH10:
3566 case WM_T_PCH:
3567 case WM_T_PCH2:
3568 case WM_T_PCH_LPT:
3569 case WM_T_PCH_SPT:
3570 delay(10*1000);
3571 if (sc->sc_type >= WM_T_ICH10)
3572 wm_lan_init_done(sc);
3573 else
3574 wm_get_auto_rd_done(sc);
3575
3576 reg = CSR_READ(sc, WMREG_STATUS);
3577 if ((reg & STATUS_PHYRA) != 0)
3578 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3579 break;
3580 default:
3581 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3582 __func__);
3583 break;
3584 }
3585 }
3586
3587 /* Init hardware bits */
3588 void
3589 wm_initialize_hardware_bits(struct wm_softc *sc)
3590 {
3591 uint32_t tarc0, tarc1, reg;
3592
3593 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3594 device_xname(sc->sc_dev), __func__));
3595
3596 /* For 82571 variant, 80003 and ICHs */
3597 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3598 || (sc->sc_type >= WM_T_80003)) {
3599
3600 /* Transmit Descriptor Control 0 */
3601 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3602 reg |= TXDCTL_COUNT_DESC;
3603 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3604
3605 /* Transmit Descriptor Control 1 */
3606 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3607 reg |= TXDCTL_COUNT_DESC;
3608 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3609
3610 /* TARC0 */
3611 tarc0 = CSR_READ(sc, WMREG_TARC0);
3612 switch (sc->sc_type) {
3613 case WM_T_82571:
3614 case WM_T_82572:
3615 case WM_T_82573:
3616 case WM_T_82574:
3617 case WM_T_82583:
3618 case WM_T_80003:
3619 /* Clear bits 30..27 */
3620 tarc0 &= ~__BITS(30, 27);
3621 break;
3622 default:
3623 break;
3624 }
3625
3626 switch (sc->sc_type) {
3627 case WM_T_82571:
3628 case WM_T_82572:
3629 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3630
3631 tarc1 = CSR_READ(sc, WMREG_TARC1);
3632 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3633 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3634 /* 8257[12] Errata No.7 */
3635 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3636
3637 /* TARC1 bit 28 */
3638 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3639 tarc1 &= ~__BIT(28);
3640 else
3641 tarc1 |= __BIT(28);
3642 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3643
3644 /*
3645 * 8257[12] Errata No.13
3646 * Disable Dyamic Clock Gating.
3647 */
3648 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3649 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3650 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3651 break;
3652 case WM_T_82573:
3653 case WM_T_82574:
3654 case WM_T_82583:
3655 if ((sc->sc_type == WM_T_82574)
3656 || (sc->sc_type == WM_T_82583))
3657 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3658
3659 /* Extended Device Control */
3660 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3661 reg &= ~__BIT(23); /* Clear bit 23 */
3662 reg |= __BIT(22); /* Set bit 22 */
3663 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3664
3665 /* Device Control */
3666 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3667 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3668
3669 /* PCIe Control Register */
3670 /*
3671 * 82573 Errata (unknown).
3672 *
3673 * 82574 Errata 25 and 82583 Errata 12
3674 * "Dropped Rx Packets":
3675 * NVM Image Version 2.1.4 and newer has no this bug.
3676 */
3677 reg = CSR_READ(sc, WMREG_GCR);
3678 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3679 CSR_WRITE(sc, WMREG_GCR, reg);
3680
3681 if ((sc->sc_type == WM_T_82574)
3682 || (sc->sc_type == WM_T_82583)) {
3683 /*
3684 * Document says this bit must be set for
3685 * proper operation.
3686 */
3687 reg = CSR_READ(sc, WMREG_GCR);
3688 reg |= __BIT(22);
3689 CSR_WRITE(sc, WMREG_GCR, reg);
3690
3691 /*
3692 * Apply workaround for hardware errata
3693 * documented in errata docs Fixes issue where
3694 * some error prone or unreliable PCIe
3695 * completions are occurring, particularly
3696 * with ASPM enabled. Without fix, issue can
3697 * cause Tx timeouts.
3698 */
3699 reg = CSR_READ(sc, WMREG_GCR2);
3700 reg |= __BIT(0);
3701 CSR_WRITE(sc, WMREG_GCR2, reg);
3702 }
3703 break;
3704 case WM_T_80003:
3705 /* TARC0 */
3706 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3707 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3708 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3709
3710 /* TARC1 bit 28 */
3711 tarc1 = CSR_READ(sc, WMREG_TARC1);
3712 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3713 tarc1 &= ~__BIT(28);
3714 else
3715 tarc1 |= __BIT(28);
3716 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3717 break;
3718 case WM_T_ICH8:
3719 case WM_T_ICH9:
3720 case WM_T_ICH10:
3721 case WM_T_PCH:
3722 case WM_T_PCH2:
3723 case WM_T_PCH_LPT:
3724 case WM_T_PCH_SPT:
3725 /* TARC0 */
3726 if ((sc->sc_type == WM_T_ICH8)
3727 || (sc->sc_type == WM_T_PCH_SPT)) {
3728 /* Set TARC0 bits 29 and 28 */
3729 tarc0 |= __BITS(29, 28);
3730 }
3731 /* Set TARC0 bits 23,24,26,27 */
3732 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3733
3734 /* CTRL_EXT */
3735 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3736 reg |= __BIT(22); /* Set bit 22 */
3737 /*
3738 * Enable PHY low-power state when MAC is at D3
3739 * w/o WoL
3740 */
3741 if (sc->sc_type >= WM_T_PCH)
3742 reg |= CTRL_EXT_PHYPDEN;
3743 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3744
3745 /* TARC1 */
3746 tarc1 = CSR_READ(sc, WMREG_TARC1);
3747 /* bit 28 */
3748 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3749 tarc1 &= ~__BIT(28);
3750 else
3751 tarc1 |= __BIT(28);
3752 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3753 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3754
3755 /* Device Status */
3756 if (sc->sc_type == WM_T_ICH8) {
3757 reg = CSR_READ(sc, WMREG_STATUS);
3758 reg &= ~__BIT(31);
3759 CSR_WRITE(sc, WMREG_STATUS, reg);
3760
3761 }
3762
3763 /* IOSFPC */
3764 if (sc->sc_type == WM_T_PCH_SPT) {
3765 reg = CSR_READ(sc, WMREG_IOSFPC);
3766 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3767 CSR_WRITE(sc, WMREG_IOSFPC, reg);
3768 }
3769 /*
3770 * Work-around descriptor data corruption issue during
3771 * NFS v2 UDP traffic, just disable the NFS filtering
3772 * capability.
3773 */
3774 reg = CSR_READ(sc, WMREG_RFCTL);
3775 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3776 CSR_WRITE(sc, WMREG_RFCTL, reg);
3777 break;
3778 default:
3779 break;
3780 }
3781 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3782
3783 switch (sc->sc_type) {
3784 /*
3785 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
3786 * Avoid RSS Hash Value bug.
3787 */
3788 case WM_T_82571:
3789 case WM_T_82572:
3790 case WM_T_82573:
3791 case WM_T_80003:
3792 case WM_T_ICH8:
3793 reg = CSR_READ(sc, WMREG_RFCTL);
3794 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3795 CSR_WRITE(sc, WMREG_RFCTL, reg);
3796 break;
3797 case WM_T_82574:
3798 /* use extened Rx descriptor. */
3799 reg = CSR_READ(sc, WMREG_RFCTL);
3800 reg |= WMREG_RFCTL_EXSTEN;
3801 CSR_WRITE(sc, WMREG_RFCTL, reg);
3802 break;
3803 default:
3804 break;
3805 }
3806 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
3807 /*
3808 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
3809 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
3810 * "Certain Malformed IPv6 Extension Headers are Not Processed
3811 * Correctly by the Device"
3812 *
3813 * I354(C2000) Errata AVR53:
3814 * "Malformed IPv6 Extension Headers May Result in LAN Device
3815 * Hang"
3816 */
3817 reg = CSR_READ(sc, WMREG_RFCTL);
3818 reg |= WMREG_RFCTL_IPV6EXDIS;
3819 CSR_WRITE(sc, WMREG_RFCTL, reg);
3820 }
3821 }
3822
3823 static uint32_t
3824 wm_rxpbs_adjust_82580(uint32_t val)
3825 {
3826 uint32_t rv = 0;
3827
3828 if (val < __arraycount(wm_82580_rxpbs_table))
3829 rv = wm_82580_rxpbs_table[val];
3830
3831 return rv;
3832 }
3833
3834 /*
3835 * wm_reset_phy:
3836 *
3837 * generic PHY reset function.
3838 * Same as e1000_phy_hw_reset_generic()
3839 */
3840 static void
3841 wm_reset_phy(struct wm_softc *sc)
3842 {
3843 uint32_t reg;
3844
3845 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3846 device_xname(sc->sc_dev), __func__));
3847 if (wm_phy_resetisblocked(sc))
3848 return;
3849
3850 sc->phy.acquire(sc);
3851
3852 reg = CSR_READ(sc, WMREG_CTRL);
3853 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
3854 CSR_WRITE_FLUSH(sc);
3855
3856 delay(sc->phy.reset_delay_us);
3857
3858 CSR_WRITE(sc, WMREG_CTRL, reg);
3859 CSR_WRITE_FLUSH(sc);
3860
3861 delay(150);
3862
3863 sc->phy.release(sc);
3864
3865 wm_get_cfg_done(sc);
3866 }
3867
3868 static void
3869 wm_flush_desc_rings(struct wm_softc *sc)
3870 {
3871 pcireg_t preg;
3872 uint32_t reg;
3873 int nexttx;
3874
3875 /* First, disable MULR fix in FEXTNVM11 */
3876 reg = CSR_READ(sc, WMREG_FEXTNVM11);
3877 reg |= FEXTNVM11_DIS_MULRFIX;
3878 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
3879
3880 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3881 reg = CSR_READ(sc, WMREG_TDLEN(0));
3882 if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
3883 struct wm_txqueue *txq;
3884 wiseman_txdesc_t *txd;
3885
3886 /* TX */
3887 printf("%s: Need TX flush (reg = %08x, len = %u)\n",
3888 device_xname(sc->sc_dev), preg, reg);
3889 reg = CSR_READ(sc, WMREG_TCTL);
3890 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
3891
3892 txq = &sc->sc_queue[0].wmq_txq;
3893 nexttx = txq->txq_next;
3894 txd = &txq->txq_descs[nexttx];
3895 wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
3896 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
3897 txd->wtx_fields.wtxu_status = 0;
3898 txd->wtx_fields.wtxu_options = 0;
3899 txd->wtx_fields.wtxu_vlan = 0;
3900
3901 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3902 BUS_SPACE_BARRIER_WRITE);
3903
3904 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
3905 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
3906 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3907 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3908 delay(250);
3909 }
3910 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3911 if (preg & DESCRING_STATUS_FLUSH_REQ) {
3912 uint32_t rctl;
3913
3914 /* RX */
3915 printf("%s: Need RX flush (reg = %08x)\n",
3916 device_xname(sc->sc_dev), preg);
3917 rctl = CSR_READ(sc, WMREG_RCTL);
3918 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3919 CSR_WRITE_FLUSH(sc);
3920 delay(150);
3921
3922 reg = CSR_READ(sc, WMREG_RXDCTL(0));
3923 /* zero the lower 14 bits (prefetch and host thresholds) */
3924 reg &= 0xffffc000;
3925 /*
3926 * update thresholds: prefetch threshold to 31, host threshold
3927 * to 1 and make sure the granularity is "descriptors" and not
3928 * "cache lines"
3929 */
3930 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
3931 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
3932
3933 /*
3934 * momentarily enable the RX ring for the changes to take
3935 * effect
3936 */
3937 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
3938 CSR_WRITE_FLUSH(sc);
3939 delay(150);
3940 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3941 }
3942 }
3943
3944 /*
3945 * wm_reset:
3946 *
3947 * Reset the i82542 chip.
3948 */
3949 static void
3950 wm_reset(struct wm_softc *sc)
3951 {
3952 int phy_reset = 0;
3953 int i, error = 0;
3954 uint32_t reg;
3955
3956 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3957 device_xname(sc->sc_dev), __func__));
3958 KASSERT(sc->sc_type != 0);
3959
3960 /*
3961 * Allocate on-chip memory according to the MTU size.
3962 * The Packet Buffer Allocation register must be written
3963 * before the chip is reset.
3964 */
3965 switch (sc->sc_type) {
3966 case WM_T_82547:
3967 case WM_T_82547_2:
3968 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3969 PBA_22K : PBA_30K;
3970 for (i = 0; i < sc->sc_nqueues; i++) {
3971 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3972 txq->txq_fifo_head = 0;
3973 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3974 txq->txq_fifo_size =
3975 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3976 txq->txq_fifo_stall = 0;
3977 }
3978 break;
3979 case WM_T_82571:
3980 case WM_T_82572:
3981 case WM_T_82575: /* XXX need special handing for jumbo frames */
3982 case WM_T_80003:
3983 sc->sc_pba = PBA_32K;
3984 break;
3985 case WM_T_82573:
3986 sc->sc_pba = PBA_12K;
3987 break;
3988 case WM_T_82574:
3989 case WM_T_82583:
3990 sc->sc_pba = PBA_20K;
3991 break;
3992 case WM_T_82576:
3993 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3994 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3995 break;
3996 case WM_T_82580:
3997 case WM_T_I350:
3998 case WM_T_I354:
3999 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4000 break;
4001 case WM_T_I210:
4002 case WM_T_I211:
4003 sc->sc_pba = PBA_34K;
4004 break;
4005 case WM_T_ICH8:
4006 /* Workaround for a bit corruption issue in FIFO memory */
4007 sc->sc_pba = PBA_8K;
4008 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4009 break;
4010 case WM_T_ICH9:
4011 case WM_T_ICH10:
4012 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4013 PBA_14K : PBA_10K;
4014 break;
4015 case WM_T_PCH:
4016 case WM_T_PCH2:
4017 case WM_T_PCH_LPT:
4018 case WM_T_PCH_SPT:
4019 sc->sc_pba = PBA_26K;
4020 break;
4021 default:
4022 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4023 PBA_40K : PBA_48K;
4024 break;
4025 }
4026 /*
4027 * Only old or non-multiqueue devices have the PBA register
4028 * XXX Need special handling for 82575.
4029 */
4030 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4031 || (sc->sc_type == WM_T_82575))
4032 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4033
4034 /* Prevent the PCI-E bus from sticking */
4035 if (sc->sc_flags & WM_F_PCIE) {
4036 int timeout = 800;
4037
4038 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4039 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4040
4041 while (timeout--) {
4042 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4043 == 0)
4044 break;
4045 delay(100);
4046 }
4047 }
4048
4049 /* Set the completion timeout for interface */
4050 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4051 || (sc->sc_type == WM_T_82580)
4052 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4053 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4054 wm_set_pcie_completion_timeout(sc);
4055
4056 /* Clear interrupt */
4057 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4058 if (wm_is_using_msix(sc)) {
4059 if (sc->sc_type != WM_T_82574) {
4060 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4061 CSR_WRITE(sc, WMREG_EIAC, 0);
4062 } else {
4063 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4064 }
4065 }
4066
4067 /* Stop the transmit and receive processes. */
4068 CSR_WRITE(sc, WMREG_RCTL, 0);
4069 sc->sc_rctl &= ~RCTL_EN;
4070 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4071 CSR_WRITE_FLUSH(sc);
4072
4073 /* XXX set_tbi_sbp_82543() */
4074
4075 delay(10*1000);
4076
4077 /* Must acquire the MDIO ownership before MAC reset */
4078 switch (sc->sc_type) {
4079 case WM_T_82573:
4080 case WM_T_82574:
4081 case WM_T_82583:
4082 error = wm_get_hw_semaphore_82573(sc);
4083 break;
4084 default:
4085 break;
4086 }
4087
4088 /*
4089 * 82541 Errata 29? & 82547 Errata 28?
4090 * See also the description about PHY_RST bit in CTRL register
4091 * in 8254x_GBe_SDM.pdf.
4092 */
4093 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4094 CSR_WRITE(sc, WMREG_CTRL,
4095 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4096 CSR_WRITE_FLUSH(sc);
4097 delay(5000);
4098 }
4099
4100 switch (sc->sc_type) {
4101 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4102 case WM_T_82541:
4103 case WM_T_82541_2:
4104 case WM_T_82547:
4105 case WM_T_82547_2:
4106 /*
4107 * On some chipsets, a reset through a memory-mapped write
4108 * cycle can cause the chip to reset before completing the
4109 * write cycle. This causes major headache that can be
4110 * avoided by issuing the reset via indirect register writes
4111 * through I/O space.
4112 *
4113 * So, if we successfully mapped the I/O BAR at attach time,
4114 * use that. Otherwise, try our luck with a memory-mapped
4115 * reset.
4116 */
4117 if (sc->sc_flags & WM_F_IOH_VALID)
4118 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4119 else
4120 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4121 break;
4122 case WM_T_82545_3:
4123 case WM_T_82546_3:
4124 /* Use the shadow control register on these chips. */
4125 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4126 break;
4127 case WM_T_80003:
4128 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4129 sc->phy.acquire(sc);
4130 CSR_WRITE(sc, WMREG_CTRL, reg);
4131 sc->phy.release(sc);
4132 break;
4133 case WM_T_ICH8:
4134 case WM_T_ICH9:
4135 case WM_T_ICH10:
4136 case WM_T_PCH:
4137 case WM_T_PCH2:
4138 case WM_T_PCH_LPT:
4139 case WM_T_PCH_SPT:
4140 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4141 if (wm_phy_resetisblocked(sc) == false) {
4142 /*
4143 * Gate automatic PHY configuration by hardware on
4144 * non-managed 82579
4145 */
4146 if ((sc->sc_type == WM_T_PCH2)
4147 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4148 == 0))
4149 wm_gate_hw_phy_config_ich8lan(sc, true);
4150
4151 reg |= CTRL_PHY_RESET;
4152 phy_reset = 1;
4153 } else
4154 printf("XXX reset is blocked!!!\n");
4155 sc->phy.acquire(sc);
4156 CSR_WRITE(sc, WMREG_CTRL, reg);
4157 /* Don't insert a completion barrier when reset */
4158 delay(20*1000);
4159 mutex_exit(sc->sc_ich_phymtx);
4160 break;
4161 case WM_T_82580:
4162 case WM_T_I350:
4163 case WM_T_I354:
4164 case WM_T_I210:
4165 case WM_T_I211:
4166 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4167 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4168 CSR_WRITE_FLUSH(sc);
4169 delay(5000);
4170 break;
4171 case WM_T_82542_2_0:
4172 case WM_T_82542_2_1:
4173 case WM_T_82543:
4174 case WM_T_82540:
4175 case WM_T_82545:
4176 case WM_T_82546:
4177 case WM_T_82571:
4178 case WM_T_82572:
4179 case WM_T_82573:
4180 case WM_T_82574:
4181 case WM_T_82575:
4182 case WM_T_82576:
4183 case WM_T_82583:
4184 default:
4185 /* Everything else can safely use the documented method. */
4186 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4187 break;
4188 }
4189
4190 /* Must release the MDIO ownership after MAC reset */
4191 switch (sc->sc_type) {
4192 case WM_T_82573:
4193 case WM_T_82574:
4194 case WM_T_82583:
4195 if (error == 0)
4196 wm_put_hw_semaphore_82573(sc);
4197 break;
4198 default:
4199 break;
4200 }
4201
4202 if (phy_reset != 0)
4203 wm_get_cfg_done(sc);
4204
4205 /* reload EEPROM */
4206 switch (sc->sc_type) {
4207 case WM_T_82542_2_0:
4208 case WM_T_82542_2_1:
4209 case WM_T_82543:
4210 case WM_T_82544:
4211 delay(10);
4212 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4213 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4214 CSR_WRITE_FLUSH(sc);
4215 delay(2000);
4216 break;
4217 case WM_T_82540:
4218 case WM_T_82545:
4219 case WM_T_82545_3:
4220 case WM_T_82546:
4221 case WM_T_82546_3:
4222 delay(5*1000);
4223 /* XXX Disable HW ARPs on ASF enabled adapters */
4224 break;
4225 case WM_T_82541:
4226 case WM_T_82541_2:
4227 case WM_T_82547:
4228 case WM_T_82547_2:
4229 delay(20000);
4230 /* XXX Disable HW ARPs on ASF enabled adapters */
4231 break;
4232 case WM_T_82571:
4233 case WM_T_82572:
4234 case WM_T_82573:
4235 case WM_T_82574:
4236 case WM_T_82583:
4237 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4238 delay(10);
4239 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4240 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4241 CSR_WRITE_FLUSH(sc);
4242 }
4243 /* check EECD_EE_AUTORD */
4244 wm_get_auto_rd_done(sc);
4245 /*
4246 * Phy configuration from NVM just starts after EECD_AUTO_RD
4247 * is set.
4248 */
4249 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4250 || (sc->sc_type == WM_T_82583))
4251 delay(25*1000);
4252 break;
4253 case WM_T_82575:
4254 case WM_T_82576:
4255 case WM_T_82580:
4256 case WM_T_I350:
4257 case WM_T_I354:
4258 case WM_T_I210:
4259 case WM_T_I211:
4260 case WM_T_80003:
4261 /* check EECD_EE_AUTORD */
4262 wm_get_auto_rd_done(sc);
4263 break;
4264 case WM_T_ICH8:
4265 case WM_T_ICH9:
4266 case WM_T_ICH10:
4267 case WM_T_PCH:
4268 case WM_T_PCH2:
4269 case WM_T_PCH_LPT:
4270 case WM_T_PCH_SPT:
4271 break;
4272 default:
4273 panic("%s: unknown type\n", __func__);
4274 }
4275
4276 /* Check whether EEPROM is present or not */
4277 switch (sc->sc_type) {
4278 case WM_T_82575:
4279 case WM_T_82576:
4280 case WM_T_82580:
4281 case WM_T_I350:
4282 case WM_T_I354:
4283 case WM_T_ICH8:
4284 case WM_T_ICH9:
4285 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4286 /* Not found */
4287 sc->sc_flags |= WM_F_EEPROM_INVALID;
4288 if (sc->sc_type == WM_T_82575)
4289 wm_reset_init_script_82575(sc);
4290 }
4291 break;
4292 default:
4293 break;
4294 }
4295
4296 if ((sc->sc_type == WM_T_82580)
4297 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4298 /* clear global device reset status bit */
4299 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4300 }
4301
4302 /* Clear any pending interrupt events. */
4303 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4304 reg = CSR_READ(sc, WMREG_ICR);
4305 if (wm_is_using_msix(sc)) {
4306 if (sc->sc_type != WM_T_82574) {
4307 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4308 CSR_WRITE(sc, WMREG_EIAC, 0);
4309 } else
4310 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4311 }
4312
4313 /* reload sc_ctrl */
4314 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4315
4316 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4317 wm_set_eee_i350(sc);
4318
4319 /* Clear the host wakeup bit after lcd reset */
4320 if (sc->sc_type >= WM_T_PCH) {
4321 reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
4322 BM_PORT_GEN_CFG);
4323 reg &= ~BM_WUC_HOST_WU_BIT;
4324 wm_gmii_hv_writereg(sc->sc_dev, 2,
4325 BM_PORT_GEN_CFG, reg);
4326 }
4327
4328 /*
4329 * For PCH, this write will make sure that any noise will be detected
4330 * as a CRC error and be dropped rather than show up as a bad packet
4331 * to the DMA engine
4332 */
4333 if (sc->sc_type == WM_T_PCH)
4334 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4335
4336 if (sc->sc_type >= WM_T_82544)
4337 CSR_WRITE(sc, WMREG_WUC, 0);
4338
4339 wm_reset_mdicnfg_82580(sc);
4340
4341 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4342 wm_pll_workaround_i210(sc);
4343 }
4344
4345 /*
4346 * wm_add_rxbuf:
4347 *
4348 * Add a receive buffer to the indiciated descriptor.
4349 */
4350 static int
4351 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4352 {
4353 struct wm_softc *sc = rxq->rxq_sc;
4354 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4355 struct mbuf *m;
4356 int error;
4357
4358 KASSERT(mutex_owned(rxq->rxq_lock));
4359
4360 MGETHDR(m, M_DONTWAIT, MT_DATA);
4361 if (m == NULL)
4362 return ENOBUFS;
4363
4364 MCLGET(m, M_DONTWAIT);
4365 if ((m->m_flags & M_EXT) == 0) {
4366 m_freem(m);
4367 return ENOBUFS;
4368 }
4369
4370 if (rxs->rxs_mbuf != NULL)
4371 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4372
4373 rxs->rxs_mbuf = m;
4374
4375 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4376 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4377 BUS_DMA_READ | BUS_DMA_NOWAIT);
4378 if (error) {
4379 /* XXX XXX XXX */
4380 aprint_error_dev(sc->sc_dev,
4381 "unable to load rx DMA map %d, error = %d\n",
4382 idx, error);
4383 panic("wm_add_rxbuf");
4384 }
4385
4386 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4387 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4388
4389 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4390 if ((sc->sc_rctl & RCTL_EN) != 0)
4391 wm_init_rxdesc(rxq, idx);
4392 } else
4393 wm_init_rxdesc(rxq, idx);
4394
4395 return 0;
4396 }
4397
4398 /*
4399 * wm_rxdrain:
4400 *
4401 * Drain the receive queue.
4402 */
4403 static void
4404 wm_rxdrain(struct wm_rxqueue *rxq)
4405 {
4406 struct wm_softc *sc = rxq->rxq_sc;
4407 struct wm_rxsoft *rxs;
4408 int i;
4409
4410 KASSERT(mutex_owned(rxq->rxq_lock));
4411
4412 for (i = 0; i < WM_NRXDESC; i++) {
4413 rxs = &rxq->rxq_soft[i];
4414 if (rxs->rxs_mbuf != NULL) {
4415 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4416 m_freem(rxs->rxs_mbuf);
4417 rxs->rxs_mbuf = NULL;
4418 }
4419 }
4420 }
4421
4422
4423 /*
4424 * XXX copy from FreeBSD's sys/net/rss_config.c
4425 */
4426 /*
4427 * RSS secret key, intended to prevent attacks on load-balancing. Its
4428 * effectiveness may be limited by algorithm choice and available entropy
4429 * during the boot.
4430 *
4431 * XXXRW: And that we don't randomize it yet!
4432 *
4433 * This is the default Microsoft RSS specification key which is also
4434 * the Chelsio T5 firmware default key.
4435 */
4436 #define RSS_KEYSIZE 40
4437 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4438 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4439 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4440 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4441 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4442 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4443 };
4444
4445 /*
4446 * Caller must pass an array of size sizeof(rss_key).
4447 *
4448 * XXX
4449 * As if_ixgbe may use this function, this function should not be
4450 * if_wm specific function.
4451 */
4452 static void
4453 wm_rss_getkey(uint8_t *key)
4454 {
4455
4456 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4457 }
4458
4459 /*
4460 * Setup registers for RSS.
4461 *
4462 * XXX not yet VMDq support
4463 */
4464 static void
4465 wm_init_rss(struct wm_softc *sc)
4466 {
4467 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4468 int i;
4469
4470 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4471
4472 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4473 int qid, reta_ent;
4474
4475 qid = i % sc->sc_nqueues;
4476 switch(sc->sc_type) {
4477 case WM_T_82574:
4478 reta_ent = __SHIFTIN(qid,
4479 RETA_ENT_QINDEX_MASK_82574);
4480 break;
4481 case WM_T_82575:
4482 reta_ent = __SHIFTIN(qid,
4483 RETA_ENT_QINDEX1_MASK_82575);
4484 break;
4485 default:
4486 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4487 break;
4488 }
4489
4490 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4491 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4492 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4493 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4494 }
4495
4496 wm_rss_getkey((uint8_t *)rss_key);
4497 for (i = 0; i < RSSRK_NUM_REGS; i++)
4498 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4499
4500 if (sc->sc_type == WM_T_82574)
4501 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4502 else
4503 mrqc = MRQC_ENABLE_RSS_MQ;
4504
4505 /*
4506 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
4507 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
4508 */
4509 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4510 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4511 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4512 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4513
4514 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4515 }
4516
4517 /*
4518 * Adjust TX and RX queue numbers which the system actulally uses.
4519 *
4520 * The numbers are affected by below parameters.
4521 * - The nubmer of hardware queues
4522 * - The number of MSI-X vectors (= "nvectors" argument)
4523 * - ncpu
4524 */
4525 static void
4526 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4527 {
4528 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4529
4530 if (nvectors < 2) {
4531 sc->sc_nqueues = 1;
4532 return;
4533 }
4534
4535 switch(sc->sc_type) {
4536 case WM_T_82572:
4537 hw_ntxqueues = 2;
4538 hw_nrxqueues = 2;
4539 break;
4540 case WM_T_82574:
4541 hw_ntxqueues = 2;
4542 hw_nrxqueues = 2;
4543 break;
4544 case WM_T_82575:
4545 hw_ntxqueues = 4;
4546 hw_nrxqueues = 4;
4547 break;
4548 case WM_T_82576:
4549 hw_ntxqueues = 16;
4550 hw_nrxqueues = 16;
4551 break;
4552 case WM_T_82580:
4553 case WM_T_I350:
4554 case WM_T_I354:
4555 hw_ntxqueues = 8;
4556 hw_nrxqueues = 8;
4557 break;
4558 case WM_T_I210:
4559 hw_ntxqueues = 4;
4560 hw_nrxqueues = 4;
4561 break;
4562 case WM_T_I211:
4563 hw_ntxqueues = 2;
4564 hw_nrxqueues = 2;
4565 break;
4566 /*
4567 * As below ethernet controllers does not support MSI-X,
4568 * this driver let them not use multiqueue.
4569 * - WM_T_80003
4570 * - WM_T_ICH8
4571 * - WM_T_ICH9
4572 * - WM_T_ICH10
4573 * - WM_T_PCH
4574 * - WM_T_PCH2
4575 * - WM_T_PCH_LPT
4576 */
4577 default:
4578 hw_ntxqueues = 1;
4579 hw_nrxqueues = 1;
4580 break;
4581 }
4582
4583 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4584
4585 /*
4586 * As queues more than MSI-X vectors cannot improve scaling, we limit
4587 * the number of queues used actually.
4588 */
4589 if (nvectors < hw_nqueues + 1) {
4590 sc->sc_nqueues = nvectors - 1;
4591 } else {
4592 sc->sc_nqueues = hw_nqueues;
4593 }
4594
4595 /*
4596 * As queues more then cpus cannot improve scaling, we limit
4597 * the number of queues used actually.
4598 */
4599 if (ncpu < sc->sc_nqueues)
4600 sc->sc_nqueues = ncpu;
4601 }
4602
4603 static inline bool
4604 wm_is_using_msix(struct wm_softc *sc)
4605 {
4606
4607 return (sc->sc_nintrs > 1);
4608 }
4609
4610 static inline bool
4611 wm_is_using_multiqueue(struct wm_softc *sc)
4612 {
4613
4614 return (sc->sc_nqueues > 1);
4615 }
4616
4617 static int
4618 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
4619 {
4620 struct wm_queue *wmq = &sc->sc_queue[qidx];
4621 wmq->wmq_id = qidx;
4622 wmq->wmq_intr_idx = intr_idx;
4623 wmq->wmq_si = softint_establish(SOFTINT_NET
4624 #ifdef WM_MPSAFE
4625 | SOFTINT_MPSAFE
4626 #endif
4627 , wm_handle_queue, wmq);
4628 if (wmq->wmq_si != NULL)
4629 return 0;
4630
4631 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
4632 wmq->wmq_id);
4633
4634 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
4635 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4636 return ENOMEM;
4637 }
4638
4639 /*
4640 * Both single interrupt MSI and INTx can use this function.
4641 */
4642 static int
4643 wm_setup_legacy(struct wm_softc *sc)
4644 {
4645 pci_chipset_tag_t pc = sc->sc_pc;
4646 const char *intrstr = NULL;
4647 char intrbuf[PCI_INTRSTR_LEN];
4648 int error;
4649
4650 error = wm_alloc_txrx_queues(sc);
4651 if (error) {
4652 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4653 error);
4654 return ENOMEM;
4655 }
4656 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4657 sizeof(intrbuf));
4658 #ifdef WM_MPSAFE
4659 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4660 #endif
4661 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4662 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4663 if (sc->sc_ihs[0] == NULL) {
4664 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4665 (pci_intr_type(pc, sc->sc_intrs[0])
4666 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4667 return ENOMEM;
4668 }
4669
4670 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4671 sc->sc_nintrs = 1;
4672
4673 return wm_softint_establish(sc, 0, 0);
4674 }
4675
4676 static int
4677 wm_setup_msix(struct wm_softc *sc)
4678 {
4679 void *vih;
4680 kcpuset_t *affinity;
4681 int qidx, error, intr_idx, txrx_established;
4682 pci_chipset_tag_t pc = sc->sc_pc;
4683 const char *intrstr = NULL;
4684 char intrbuf[PCI_INTRSTR_LEN];
4685 char intr_xname[INTRDEVNAMEBUF];
4686
4687 if (sc->sc_nqueues < ncpu) {
4688 /*
4689 * To avoid other devices' interrupts, the affinity of Tx/Rx
4690 * interrupts start from CPU#1.
4691 */
4692 sc->sc_affinity_offset = 1;
4693 } else {
4694 /*
4695 * In this case, this device use all CPUs. So, we unify
4696 * affinitied cpu_index to msix vector number for readability.
4697 */
4698 sc->sc_affinity_offset = 0;
4699 }
4700
4701 error = wm_alloc_txrx_queues(sc);
4702 if (error) {
4703 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4704 error);
4705 return ENOMEM;
4706 }
4707
4708 kcpuset_create(&affinity, false);
4709 intr_idx = 0;
4710
4711 /*
4712 * TX and RX
4713 */
4714 txrx_established = 0;
4715 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4716 struct wm_queue *wmq = &sc->sc_queue[qidx];
4717 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4718
4719 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4720 sizeof(intrbuf));
4721 #ifdef WM_MPSAFE
4722 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4723 PCI_INTR_MPSAFE, true);
4724 #endif
4725 memset(intr_xname, 0, sizeof(intr_xname));
4726 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4727 device_xname(sc->sc_dev), qidx);
4728 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4729 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4730 if (vih == NULL) {
4731 aprint_error_dev(sc->sc_dev,
4732 "unable to establish MSI-X(for TX and RX)%s%s\n",
4733 intrstr ? " at " : "",
4734 intrstr ? intrstr : "");
4735
4736 goto fail;
4737 }
4738 kcpuset_zero(affinity);
4739 /* Round-robin affinity */
4740 kcpuset_set(affinity, affinity_to);
4741 error = interrupt_distribute(vih, affinity, NULL);
4742 if (error == 0) {
4743 aprint_normal_dev(sc->sc_dev,
4744 "for TX and RX interrupting at %s affinity to %u\n",
4745 intrstr, affinity_to);
4746 } else {
4747 aprint_normal_dev(sc->sc_dev,
4748 "for TX and RX interrupting at %s\n", intrstr);
4749 }
4750 sc->sc_ihs[intr_idx] = vih;
4751 if (wm_softint_establish(sc, qidx, intr_idx) != 0)
4752 goto fail;
4753 txrx_established++;
4754 intr_idx++;
4755 }
4756
4757 /*
4758 * LINK
4759 */
4760 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4761 sizeof(intrbuf));
4762 #ifdef WM_MPSAFE
4763 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4764 #endif
4765 memset(intr_xname, 0, sizeof(intr_xname));
4766 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4767 device_xname(sc->sc_dev));
4768 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4769 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4770 if (vih == NULL) {
4771 aprint_error_dev(sc->sc_dev,
4772 "unable to establish MSI-X(for LINK)%s%s\n",
4773 intrstr ? " at " : "",
4774 intrstr ? intrstr : "");
4775
4776 goto fail;
4777 }
4778 /* keep default affinity to LINK interrupt */
4779 aprint_normal_dev(sc->sc_dev,
4780 "for LINK interrupting at %s\n", intrstr);
4781 sc->sc_ihs[intr_idx] = vih;
4782 sc->sc_link_intr_idx = intr_idx;
4783
4784 sc->sc_nintrs = sc->sc_nqueues + 1;
4785 kcpuset_destroy(affinity);
4786 return 0;
4787
4788 fail:
4789 for (qidx = 0; qidx < txrx_established; qidx++) {
4790 struct wm_queue *wmq = &sc->sc_queue[qidx];
4791 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4792 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4793 }
4794
4795 kcpuset_destroy(affinity);
4796 return ENOMEM;
4797 }
4798
4799 static void
4800 wm_turnon(struct wm_softc *sc)
4801 {
4802 int i;
4803
4804 KASSERT(WM_CORE_LOCKED(sc));
4805
4806 /*
4807 * must unset stopping flags in ascending order.
4808 */
4809 for(i = 0; i < sc->sc_nqueues; i++) {
4810 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4811 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4812
4813 mutex_enter(txq->txq_lock);
4814 txq->txq_stopping = false;
4815 mutex_exit(txq->txq_lock);
4816
4817 mutex_enter(rxq->rxq_lock);
4818 rxq->rxq_stopping = false;
4819 mutex_exit(rxq->rxq_lock);
4820 }
4821
4822 sc->sc_core_stopping = false;
4823 }
4824
4825 static void
4826 wm_turnoff(struct wm_softc *sc)
4827 {
4828 int i;
4829
4830 KASSERT(WM_CORE_LOCKED(sc));
4831
4832 sc->sc_core_stopping = true;
4833
4834 /*
4835 * must set stopping flags in ascending order.
4836 */
4837 for(i = 0; i < sc->sc_nqueues; i++) {
4838 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4839 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4840
4841 mutex_enter(rxq->rxq_lock);
4842 rxq->rxq_stopping = true;
4843 mutex_exit(rxq->rxq_lock);
4844
4845 mutex_enter(txq->txq_lock);
4846 txq->txq_stopping = true;
4847 mutex_exit(txq->txq_lock);
4848 }
4849 }
4850
4851 /*
4852 * write interrupt interval value to ITR or EITR
4853 */
4854 static void
4855 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
4856 {
4857
4858 if (!wmq->wmq_set_itr)
4859 return;
4860
4861 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4862 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
4863
4864 /*
4865 * 82575 doesn't have CNT_INGR field.
4866 * So, overwrite counter field by software.
4867 */
4868 if (sc->sc_type == WM_T_82575)
4869 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
4870 else
4871 eitr |= EITR_CNT_INGR;
4872
4873 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
4874 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
4875 /*
4876 * 82574 has both ITR and EITR. SET EITR when we use
4877 * the multi queue function with MSI-X.
4878 */
4879 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
4880 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
4881 } else {
4882 KASSERT(wmq->wmq_id == 0);
4883 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
4884 }
4885
4886 wmq->wmq_set_itr = false;
4887 }
4888
4889 /*
4890 * TODO
4891 * Below dynamic calculation of itr is almost the same as linux igb,
4892 * however it does not fit to wm(4). So, we will have been disable AIM
4893 * until we will find appropriate calculation of itr.
4894 */
4895 /*
4896 * calculate interrupt interval value to be going to write register in
4897 * wm_itrs_writereg(). This function does not write ITR/EITR register.
4898 */
4899 static void
4900 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
4901 {
4902 #ifdef NOTYET
4903 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
4904 struct wm_txqueue *txq = &wmq->wmq_txq;
4905 uint32_t avg_size = 0;
4906 uint32_t new_itr;
4907
4908 if (rxq->rxq_packets)
4909 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
4910 if (txq->txq_packets)
4911 avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
4912
4913 if (avg_size == 0) {
4914 new_itr = 450; /* restore default value */
4915 goto out;
4916 }
4917
4918 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4919 avg_size += 24;
4920
4921 /* Don't starve jumbo frames */
4922 avg_size = min(avg_size, 3000);
4923
4924 /* Give a little boost to mid-size frames */
4925 if ((avg_size > 300) && (avg_size < 1200))
4926 new_itr = avg_size / 3;
4927 else
4928 new_itr = avg_size / 2;
4929
4930 out:
4931 /*
4932 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
4933 * controllers. See sc->sc_itr_init setting in wm_init_locked().
4934 */
4935 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
4936 new_itr *= 4;
4937
4938 if (new_itr != wmq->wmq_itr) {
4939 wmq->wmq_itr = new_itr;
4940 wmq->wmq_set_itr = true;
4941 } else
4942 wmq->wmq_set_itr = false;
4943
4944 rxq->rxq_packets = 0;
4945 rxq->rxq_bytes = 0;
4946 txq->txq_packets = 0;
4947 txq->txq_bytes = 0;
4948 #endif
4949 }
4950
4951 /*
4952 * wm_init: [ifnet interface function]
4953 *
4954 * Initialize the interface.
4955 */
4956 static int
4957 wm_init(struct ifnet *ifp)
4958 {
4959 struct wm_softc *sc = ifp->if_softc;
4960 int ret;
4961
4962 WM_CORE_LOCK(sc);
4963 ret = wm_init_locked(ifp);
4964 WM_CORE_UNLOCK(sc);
4965
4966 return ret;
4967 }
4968
4969 static int
4970 wm_init_locked(struct ifnet *ifp)
4971 {
4972 struct wm_softc *sc = ifp->if_softc;
4973 int i, j, trynum, error = 0;
4974 uint32_t reg;
4975
4976 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4977 device_xname(sc->sc_dev), __func__));
4978 KASSERT(WM_CORE_LOCKED(sc));
4979
4980 /*
4981 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4982 * There is a small but measurable benefit to avoiding the adjusment
4983 * of the descriptor so that the headers are aligned, for normal mtu,
4984 * on such platforms. One possibility is that the DMA itself is
4985 * slightly more efficient if the front of the entire packet (instead
4986 * of the front of the headers) is aligned.
4987 *
4988 * Note we must always set align_tweak to 0 if we are using
4989 * jumbo frames.
4990 */
4991 #ifdef __NO_STRICT_ALIGNMENT
4992 sc->sc_align_tweak = 0;
4993 #else
4994 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4995 sc->sc_align_tweak = 0;
4996 else
4997 sc->sc_align_tweak = 2;
4998 #endif /* __NO_STRICT_ALIGNMENT */
4999
5000 /* Cancel any pending I/O. */
5001 wm_stop_locked(ifp, 0);
5002
5003 /* update statistics before reset */
5004 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
5005 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
5006
5007 /* PCH_SPT hardware workaround */
5008 if (sc->sc_type == WM_T_PCH_SPT)
5009 wm_flush_desc_rings(sc);
5010
5011 /* Reset the chip to a known state. */
5012 wm_reset(sc);
5013
5014 /* AMT based hardware can now take control from firmware */
5015 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5016 wm_get_hw_control(sc);
5017
5018 /* Init hardware bits */
5019 wm_initialize_hardware_bits(sc);
5020
5021 /* Reset the PHY. */
5022 if (sc->sc_flags & WM_F_HAS_MII)
5023 wm_gmii_reset(sc);
5024
5025 /* Calculate (E)ITR value */
5026 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5027 /*
5028 * For NEWQUEUE's EITR (except for 82575).
5029 * 82575's EITR should be set same throttling value as other
5030 * old controllers' ITR because the interrupt/sec calculation
5031 * is the same, that is, 1,000,000,000 / (N * 256).
5032 *
5033 * 82574's EITR should be set same throttling value as ITR.
5034 *
5035 * For N interrupts/sec, set this value to:
5036 * 1,000,000 / N in contrast to ITR throttoling value.
5037 */
5038 sc->sc_itr_init = 450;
5039 } else if (sc->sc_type >= WM_T_82543) {
5040 /*
5041 * Set up the interrupt throttling register (units of 256ns)
5042 * Note that a footnote in Intel's documentation says this
5043 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5044 * or 10Mbit mode. Empirically, it appears to be the case
5045 * that that is also true for the 1024ns units of the other
5046 * interrupt-related timer registers -- so, really, we ought
5047 * to divide this value by 4 when the link speed is low.
5048 *
5049 * XXX implement this division at link speed change!
5050 */
5051
5052 /*
5053 * For N interrupts/sec, set this value to:
5054 * 1,000,000,000 / (N * 256). Note that we set the
5055 * absolute and packet timer values to this value
5056 * divided by 4 to get "simple timer" behavior.
5057 */
5058 sc->sc_itr_init = 1500; /* 2604 ints/sec */
5059 }
5060
5061 error = wm_init_txrx_queues(sc);
5062 if (error)
5063 goto out;
5064
5065 /*
5066 * Clear out the VLAN table -- we don't use it (yet).
5067 */
5068 CSR_WRITE(sc, WMREG_VET, 0);
5069 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5070 trynum = 10; /* Due to hw errata */
5071 else
5072 trynum = 1;
5073 for (i = 0; i < WM_VLAN_TABSIZE; i++)
5074 for (j = 0; j < trynum; j++)
5075 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5076
5077 /*
5078 * Set up flow-control parameters.
5079 *
5080 * XXX Values could probably stand some tuning.
5081 */
5082 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5083 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
5084 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
5085 && (sc->sc_type != WM_T_PCH_SPT)) {
5086 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
5087 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
5088 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
5089 }
5090
5091 sc->sc_fcrtl = FCRTL_DFLT;
5092 if (sc->sc_type < WM_T_82543) {
5093 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
5094 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
5095 } else {
5096 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
5097 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
5098 }
5099
5100 if (sc->sc_type == WM_T_80003)
5101 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
5102 else
5103 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
5104
5105 /* Writes the control register. */
5106 wm_set_vlan(sc);
5107
5108 if (sc->sc_flags & WM_F_HAS_MII) {
5109 int val;
5110
5111 switch (sc->sc_type) {
5112 case WM_T_80003:
5113 case WM_T_ICH8:
5114 case WM_T_ICH9:
5115 case WM_T_ICH10:
5116 case WM_T_PCH:
5117 case WM_T_PCH2:
5118 case WM_T_PCH_LPT:
5119 case WM_T_PCH_SPT:
5120 /*
5121 * Set the mac to wait the maximum time between each
5122 * iteration and increase the max iterations when
5123 * polling the phy; this fixes erroneous timeouts at
5124 * 10Mbps.
5125 */
5126 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
5127 0xFFFF);
5128 val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
5129 val |= 0x3F;
5130 wm_kmrn_writereg(sc,
5131 KUMCTRLSTA_OFFSET_INB_PARAM, val);
5132 break;
5133 default:
5134 break;
5135 }
5136
5137 if (sc->sc_type == WM_T_80003) {
5138 val = CSR_READ(sc, WMREG_CTRL_EXT);
5139 val &= ~CTRL_EXT_LINK_MODE_MASK;
5140 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
5141
5142 /* Bypass RX and TX FIFO's */
5143 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
5144 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
5145 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
5146 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
5147 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
5148 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
5149 }
5150 }
5151 #if 0
5152 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
5153 #endif
5154
5155 /* Set up checksum offload parameters. */
5156 reg = CSR_READ(sc, WMREG_RXCSUM);
5157 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
5158 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
5159 reg |= RXCSUM_IPOFL;
5160 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
5161 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
5162 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
5163 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
5164 CSR_WRITE(sc, WMREG_RXCSUM, reg);
5165
5166 /* Set registers about MSI-X */
5167 if (wm_is_using_msix(sc)) {
5168 uint32_t ivar;
5169 struct wm_queue *wmq;
5170 int qid, qintr_idx;
5171
5172 if (sc->sc_type == WM_T_82575) {
5173 /* Interrupt control */
5174 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5175 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
5176 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5177
5178 /* TX and RX */
5179 for (i = 0; i < sc->sc_nqueues; i++) {
5180 wmq = &sc->sc_queue[i];
5181 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
5182 EITR_TX_QUEUE(wmq->wmq_id)
5183 | EITR_RX_QUEUE(wmq->wmq_id));
5184 }
5185 /* Link status */
5186 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
5187 EITR_OTHER);
5188 } else if (sc->sc_type == WM_T_82574) {
5189 /* Interrupt control */
5190 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5191 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
5192 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5193
5194 /*
5195 * workaround issue with spurious interrupts
5196 * in MSI-X mode.
5197 * At wm_initialize_hardware_bits(), sc_nintrs has not
5198 * initialized yet. So re-initialize WMREG_RFCTL here.
5199 */
5200 reg = CSR_READ(sc, WMREG_RFCTL);
5201 reg |= WMREG_RFCTL_ACKDIS;
5202 CSR_WRITE(sc, WMREG_RFCTL, reg);
5203
5204 ivar = 0;
5205 /* TX and RX */
5206 for (i = 0; i < sc->sc_nqueues; i++) {
5207 wmq = &sc->sc_queue[i];
5208 qid = wmq->wmq_id;
5209 qintr_idx = wmq->wmq_intr_idx;
5210
5211 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5212 IVAR_TX_MASK_Q_82574(qid));
5213 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5214 IVAR_RX_MASK_Q_82574(qid));
5215 }
5216 /* Link status */
5217 ivar |= __SHIFTIN((IVAR_VALID_82574
5218 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
5219 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
5220 } else {
5221 /* Interrupt control */
5222 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
5223 | GPIE_EIAME | GPIE_PBA);
5224
5225 switch (sc->sc_type) {
5226 case WM_T_82580:
5227 case WM_T_I350:
5228 case WM_T_I354:
5229 case WM_T_I210:
5230 case WM_T_I211:
5231 /* TX and RX */
5232 for (i = 0; i < sc->sc_nqueues; i++) {
5233 wmq = &sc->sc_queue[i];
5234 qid = wmq->wmq_id;
5235 qintr_idx = wmq->wmq_intr_idx;
5236
5237 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
5238 ivar &= ~IVAR_TX_MASK_Q(qid);
5239 ivar |= __SHIFTIN((qintr_idx
5240 | IVAR_VALID),
5241 IVAR_TX_MASK_Q(qid));
5242 ivar &= ~IVAR_RX_MASK_Q(qid);
5243 ivar |= __SHIFTIN((qintr_idx
5244 | IVAR_VALID),
5245 IVAR_RX_MASK_Q(qid));
5246 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5247 }
5248 break;
5249 case WM_T_82576:
5250 /* TX and RX */
5251 for (i = 0; i < sc->sc_nqueues; i++) {
5252 wmq = &sc->sc_queue[i];
5253 qid = wmq->wmq_id;
5254 qintr_idx = wmq->wmq_intr_idx;
5255
5256 ivar = CSR_READ(sc,
5257 WMREG_IVAR_Q_82576(qid));
5258 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5259 ivar |= __SHIFTIN((qintr_idx
5260 | IVAR_VALID),
5261 IVAR_TX_MASK_Q_82576(qid));
5262 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5263 ivar |= __SHIFTIN((qintr_idx
5264 | IVAR_VALID),
5265 IVAR_RX_MASK_Q_82576(qid));
5266 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5267 ivar);
5268 }
5269 break;
5270 default:
5271 break;
5272 }
5273
5274 /* Link status */
5275 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5276 IVAR_MISC_OTHER);
5277 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5278 }
5279
5280 if (wm_is_using_multiqueue(sc)) {
5281 wm_init_rss(sc);
5282
5283 /*
5284 ** NOTE: Receive Full-Packet Checksum Offload
5285 ** is mutually exclusive with Multiqueue. However
5286 ** this is not the same as TCP/IP checksums which
5287 ** still work.
5288 */
5289 reg = CSR_READ(sc, WMREG_RXCSUM);
5290 reg |= RXCSUM_PCSD;
5291 CSR_WRITE(sc, WMREG_RXCSUM, reg);
5292 }
5293 }
5294
5295 /* Set up the interrupt registers. */
5296 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5297 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5298 ICR_RXO | ICR_RXT0;
5299 if (wm_is_using_msix(sc)) {
5300 uint32_t mask;
5301 struct wm_queue *wmq;
5302
5303 switch (sc->sc_type) {
5304 case WM_T_82574:
5305 mask = 0;
5306 for (i = 0; i < sc->sc_nqueues; i++) {
5307 wmq = &sc->sc_queue[i];
5308 mask |= ICR_TXQ(wmq->wmq_id);
5309 mask |= ICR_RXQ(wmq->wmq_id);
5310 }
5311 mask |= ICR_OTHER;
5312 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
5313 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
5314 break;
5315 default:
5316 if (sc->sc_type == WM_T_82575) {
5317 mask = 0;
5318 for (i = 0; i < sc->sc_nqueues; i++) {
5319 wmq = &sc->sc_queue[i];
5320 mask |= EITR_TX_QUEUE(wmq->wmq_id);
5321 mask |= EITR_RX_QUEUE(wmq->wmq_id);
5322 }
5323 mask |= EITR_OTHER;
5324 } else {
5325 mask = 0;
5326 for (i = 0; i < sc->sc_nqueues; i++) {
5327 wmq = &sc->sc_queue[i];
5328 mask |= 1 << wmq->wmq_intr_idx;
5329 }
5330 mask |= 1 << sc->sc_link_intr_idx;
5331 }
5332 CSR_WRITE(sc, WMREG_EIAC, mask);
5333 CSR_WRITE(sc, WMREG_EIAM, mask);
5334 CSR_WRITE(sc, WMREG_EIMS, mask);
5335 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5336 break;
5337 }
5338 } else
5339 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5340
5341 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5342 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5343 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5344 || (sc->sc_type == WM_T_PCH_SPT)) {
5345 reg = CSR_READ(sc, WMREG_KABGTXD);
5346 reg |= KABGTXD_BGSQLBIAS;
5347 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5348 }
5349
5350 /* Set up the inter-packet gap. */
5351 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5352
5353 if (sc->sc_type >= WM_T_82543) {
5354 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5355 struct wm_queue *wmq = &sc->sc_queue[qidx];
5356 wm_itrs_writereg(sc, wmq);
5357 }
5358 /*
5359 * Link interrupts occur much less than TX
5360 * interrupts and RX interrupts. So, we don't
5361 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5362 * FreeBSD's if_igb.
5363 */
5364 }
5365
5366 /* Set the VLAN ethernetype. */
5367 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5368
5369 /*
5370 * Set up the transmit control register; we start out with
5371 * a collision distance suitable for FDX, but update it whe
5372 * we resolve the media type.
5373 */
5374 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5375 | TCTL_CT(TX_COLLISION_THRESHOLD)
5376 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5377 if (sc->sc_type >= WM_T_82571)
5378 sc->sc_tctl |= TCTL_MULR;
5379 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5380
5381 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5382 /* Write TDT after TCTL.EN is set. See the document. */
5383 CSR_WRITE(sc, WMREG_TDT(0), 0);
5384 }
5385
5386 if (sc->sc_type == WM_T_80003) {
5387 reg = CSR_READ(sc, WMREG_TCTL_EXT);
5388 reg &= ~TCTL_EXT_GCEX_MASK;
5389 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5390 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5391 }
5392
5393 /* Set the media. */
5394 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5395 goto out;
5396
5397 /* Configure for OS presence */
5398 wm_init_manageability(sc);
5399
5400 /*
5401 * Set up the receive control register; we actually program
5402 * the register when we set the receive filter. Use multicast
5403 * address offset type 0.
5404 *
5405 * Only the i82544 has the ability to strip the incoming
5406 * CRC, so we don't enable that feature.
5407 */
5408 sc->sc_mchash_type = 0;
5409 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5410 | RCTL_MO(sc->sc_mchash_type);
5411
5412 /*
5413 * 82574 use one buffer extended Rx descriptor.
5414 */
5415 if (sc->sc_type == WM_T_82574)
5416 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
5417
5418 /*
5419 * The I350 has a bug where it always strips the CRC whether
5420 * asked to or not. So ask for stripped CRC here and cope in rxeof
5421 */
5422 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5423 || (sc->sc_type == WM_T_I210))
5424 sc->sc_rctl |= RCTL_SECRC;
5425
5426 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5427 && (ifp->if_mtu > ETHERMTU)) {
5428 sc->sc_rctl |= RCTL_LPE;
5429 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5430 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5431 }
5432
5433 if (MCLBYTES == 2048) {
5434 sc->sc_rctl |= RCTL_2k;
5435 } else {
5436 if (sc->sc_type >= WM_T_82543) {
5437 switch (MCLBYTES) {
5438 case 4096:
5439 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5440 break;
5441 case 8192:
5442 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5443 break;
5444 case 16384:
5445 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5446 break;
5447 default:
5448 panic("wm_init: MCLBYTES %d unsupported",
5449 MCLBYTES);
5450 break;
5451 }
5452 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
5453 }
5454
5455 /* Set the receive filter. */
5456 wm_set_filter(sc);
5457
5458 /* Enable ECC */
5459 switch (sc->sc_type) {
5460 case WM_T_82571:
5461 reg = CSR_READ(sc, WMREG_PBA_ECC);
5462 reg |= PBA_ECC_CORR_EN;
5463 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5464 break;
5465 case WM_T_PCH_LPT:
5466 case WM_T_PCH_SPT:
5467 reg = CSR_READ(sc, WMREG_PBECCSTS);
5468 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5469 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5470
5471 sc->sc_ctrl |= CTRL_MEHE;
5472 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5473 break;
5474 default:
5475 break;
5476 }
5477
5478 /* On 575 and later set RDT only if RX enabled */
5479 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5480 int qidx;
5481 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5482 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5483 for (i = 0; i < WM_NRXDESC; i++) {
5484 mutex_enter(rxq->rxq_lock);
5485 wm_init_rxdesc(rxq, i);
5486 mutex_exit(rxq->rxq_lock);
5487
5488 }
5489 }
5490 }
5491
5492 wm_turnon(sc);
5493
5494 /* Start the one second link check clock. */
5495 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5496
5497 /* ...all done! */
5498 ifp->if_flags |= IFF_RUNNING;
5499 ifp->if_flags &= ~IFF_OACTIVE;
5500
5501 out:
5502 sc->sc_if_flags = ifp->if_flags;
5503 if (error)
5504 log(LOG_ERR, "%s: interface not running\n",
5505 device_xname(sc->sc_dev));
5506 return error;
5507 }
5508
5509 /*
5510 * wm_stop: [ifnet interface function]
5511 *
5512 * Stop transmission on the interface.
5513 */
5514 static void
5515 wm_stop(struct ifnet *ifp, int disable)
5516 {
5517 struct wm_softc *sc = ifp->if_softc;
5518
5519 WM_CORE_LOCK(sc);
5520 wm_stop_locked(ifp, disable);
5521 WM_CORE_UNLOCK(sc);
5522 }
5523
5524 static void
5525 wm_stop_locked(struct ifnet *ifp, int disable)
5526 {
5527 struct wm_softc *sc = ifp->if_softc;
5528 struct wm_txsoft *txs;
5529 int i, qidx;
5530
5531 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5532 device_xname(sc->sc_dev), __func__));
5533 KASSERT(WM_CORE_LOCKED(sc));
5534
5535 wm_turnoff(sc);
5536
5537 /* Stop the one second clock. */
5538 callout_stop(&sc->sc_tick_ch);
5539
5540 /* Stop the 82547 Tx FIFO stall check timer. */
5541 if (sc->sc_type == WM_T_82547)
5542 callout_stop(&sc->sc_txfifo_ch);
5543
5544 if (sc->sc_flags & WM_F_HAS_MII) {
5545 /* Down the MII. */
5546 mii_down(&sc->sc_mii);
5547 } else {
5548 #if 0
5549 /* Should we clear PHY's status properly? */
5550 wm_reset(sc);
5551 #endif
5552 }
5553
5554 /* Stop the transmit and receive processes. */
5555 CSR_WRITE(sc, WMREG_TCTL, 0);
5556 CSR_WRITE(sc, WMREG_RCTL, 0);
5557 sc->sc_rctl &= ~RCTL_EN;
5558
5559 /*
5560 * Clear the interrupt mask to ensure the device cannot assert its
5561 * interrupt line.
5562 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5563 * service any currently pending or shared interrupt.
5564 */
5565 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5566 sc->sc_icr = 0;
5567 if (wm_is_using_msix(sc)) {
5568 if (sc->sc_type != WM_T_82574) {
5569 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5570 CSR_WRITE(sc, WMREG_EIAC, 0);
5571 } else
5572 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5573 }
5574
5575 /* Release any queued transmit buffers. */
5576 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5577 struct wm_queue *wmq = &sc->sc_queue[qidx];
5578 struct wm_txqueue *txq = &wmq->wmq_txq;
5579 mutex_enter(txq->txq_lock);
5580 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5581 txs = &txq->txq_soft[i];
5582 if (txs->txs_mbuf != NULL) {
5583 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5584 m_freem(txs->txs_mbuf);
5585 txs->txs_mbuf = NULL;
5586 }
5587 }
5588 mutex_exit(txq->txq_lock);
5589 }
5590
5591 /* Mark the interface as down and cancel the watchdog timer. */
5592 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5593 ifp->if_timer = 0;
5594
5595 if (disable) {
5596 for (i = 0; i < sc->sc_nqueues; i++) {
5597 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5598 mutex_enter(rxq->rxq_lock);
5599 wm_rxdrain(rxq);
5600 mutex_exit(rxq->rxq_lock);
5601 }
5602 }
5603
5604 #if 0 /* notyet */
5605 if (sc->sc_type >= WM_T_82544)
5606 CSR_WRITE(sc, WMREG_WUC, 0);
5607 #endif
5608 }
5609
5610 static void
5611 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5612 {
5613 struct mbuf *m;
5614 int i;
5615
5616 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5617 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5618 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5619 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5620 m->m_data, m->m_len, m->m_flags);
5621 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5622 i, i == 1 ? "" : "s");
5623 }
5624
5625 /*
5626 * wm_82547_txfifo_stall:
5627 *
5628 * Callout used to wait for the 82547 Tx FIFO to drain,
5629 * reset the FIFO pointers, and restart packet transmission.
5630 */
5631 static void
5632 wm_82547_txfifo_stall(void *arg)
5633 {
5634 struct wm_softc *sc = arg;
5635 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5636
5637 mutex_enter(txq->txq_lock);
5638
5639 if (txq->txq_stopping)
5640 goto out;
5641
5642 if (txq->txq_fifo_stall) {
5643 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5644 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5645 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5646 /*
5647 * Packets have drained. Stop transmitter, reset
5648 * FIFO pointers, restart transmitter, and kick
5649 * the packet queue.
5650 */
5651 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5652 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5653 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5654 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5655 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5656 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5657 CSR_WRITE(sc, WMREG_TCTL, tctl);
5658 CSR_WRITE_FLUSH(sc);
5659
5660 txq->txq_fifo_head = 0;
5661 txq->txq_fifo_stall = 0;
5662 wm_start_locked(&sc->sc_ethercom.ec_if);
5663 } else {
5664 /*
5665 * Still waiting for packets to drain; try again in
5666 * another tick.
5667 */
5668 callout_schedule(&sc->sc_txfifo_ch, 1);
5669 }
5670 }
5671
5672 out:
5673 mutex_exit(txq->txq_lock);
5674 }
5675
5676 /*
5677 * wm_82547_txfifo_bugchk:
5678 *
5679 * Check for bug condition in the 82547 Tx FIFO. We need to
5680 * prevent enqueueing a packet that would wrap around the end
5681 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5682 *
5683 * We do this by checking the amount of space before the end
5684 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5685 * the Tx FIFO, wait for all remaining packets to drain, reset
5686 * the internal FIFO pointers to the beginning, and restart
5687 * transmission on the interface.
5688 */
5689 #define WM_FIFO_HDR 0x10
5690 #define WM_82547_PAD_LEN 0x3e0
5691 static int
5692 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5693 {
5694 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5695 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5696 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5697
5698 /* Just return if already stalled. */
5699 if (txq->txq_fifo_stall)
5700 return 1;
5701
5702 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5703 /* Stall only occurs in half-duplex mode. */
5704 goto send_packet;
5705 }
5706
5707 if (len >= WM_82547_PAD_LEN + space) {
5708 txq->txq_fifo_stall = 1;
5709 callout_schedule(&sc->sc_txfifo_ch, 1);
5710 return 1;
5711 }
5712
5713 send_packet:
5714 txq->txq_fifo_head += len;
5715 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5716 txq->txq_fifo_head -= txq->txq_fifo_size;
5717
5718 return 0;
5719 }
5720
5721 static int
5722 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5723 {
5724 int error;
5725
5726 /*
5727 * Allocate the control data structures, and create and load the
5728 * DMA map for it.
5729 *
5730 * NOTE: All Tx descriptors must be in the same 4G segment of
5731 * memory. So must Rx descriptors. We simplify by allocating
5732 * both sets within the same 4G segment.
5733 */
5734 if (sc->sc_type < WM_T_82544)
5735 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5736 else
5737 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5738 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5739 txq->txq_descsize = sizeof(nq_txdesc_t);
5740 else
5741 txq->txq_descsize = sizeof(wiseman_txdesc_t);
5742
5743 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5744 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5745 1, &txq->txq_desc_rseg, 0)) != 0) {
5746 aprint_error_dev(sc->sc_dev,
5747 "unable to allocate TX control data, error = %d\n",
5748 error);
5749 goto fail_0;
5750 }
5751
5752 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5753 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5754 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5755 aprint_error_dev(sc->sc_dev,
5756 "unable to map TX control data, error = %d\n", error);
5757 goto fail_1;
5758 }
5759
5760 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5761 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5762 aprint_error_dev(sc->sc_dev,
5763 "unable to create TX control data DMA map, error = %d\n",
5764 error);
5765 goto fail_2;
5766 }
5767
5768 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5769 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5770 aprint_error_dev(sc->sc_dev,
5771 "unable to load TX control data DMA map, error = %d\n",
5772 error);
5773 goto fail_3;
5774 }
5775
5776 return 0;
5777
5778 fail_3:
5779 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5780 fail_2:
5781 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5782 WM_TXDESCS_SIZE(txq));
5783 fail_1:
5784 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5785 fail_0:
5786 return error;
5787 }
5788
5789 static void
5790 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5791 {
5792
5793 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5794 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5795 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5796 WM_TXDESCS_SIZE(txq));
5797 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5798 }
5799
5800 static int
5801 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5802 {
5803 int error;
5804 size_t rxq_descs_size;
5805
5806 /*
5807 * Allocate the control data structures, and create and load the
5808 * DMA map for it.
5809 *
5810 * NOTE: All Tx descriptors must be in the same 4G segment of
5811 * memory. So must Rx descriptors. We simplify by allocating
5812 * both sets within the same 4G segment.
5813 */
5814 rxq->rxq_ndesc = WM_NRXDESC;
5815 if (sc->sc_type == WM_T_82574)
5816 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
5817 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5818 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
5819 else
5820 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
5821 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
5822
5823 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
5824 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5825 1, &rxq->rxq_desc_rseg, 0)) != 0) {
5826 aprint_error_dev(sc->sc_dev,
5827 "unable to allocate RX control data, error = %d\n",
5828 error);
5829 goto fail_0;
5830 }
5831
5832 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5833 rxq->rxq_desc_rseg, rxq_descs_size,
5834 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
5835 aprint_error_dev(sc->sc_dev,
5836 "unable to map RX control data, error = %d\n", error);
5837 goto fail_1;
5838 }
5839
5840 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
5841 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5842 aprint_error_dev(sc->sc_dev,
5843 "unable to create RX control data DMA map, error = %d\n",
5844 error);
5845 goto fail_2;
5846 }
5847
5848 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5849 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
5850 aprint_error_dev(sc->sc_dev,
5851 "unable to load RX control data DMA map, error = %d\n",
5852 error);
5853 goto fail_3;
5854 }
5855
5856 return 0;
5857
5858 fail_3:
5859 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5860 fail_2:
5861 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
5862 rxq_descs_size);
5863 fail_1:
5864 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5865 fail_0:
5866 return error;
5867 }
5868
5869 static void
5870 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5871 {
5872
5873 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5874 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5875 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
5876 rxq->rxq_descsize * rxq->rxq_ndesc);
5877 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5878 }
5879
5880
5881 static int
5882 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5883 {
5884 int i, error;
5885
5886 /* Create the transmit buffer DMA maps. */
5887 WM_TXQUEUELEN(txq) =
5888 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5889 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5890 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5891 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5892 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5893 &txq->txq_soft[i].txs_dmamap)) != 0) {
5894 aprint_error_dev(sc->sc_dev,
5895 "unable to create Tx DMA map %d, error = %d\n",
5896 i, error);
5897 goto fail;
5898 }
5899 }
5900
5901 return 0;
5902
5903 fail:
5904 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5905 if (txq->txq_soft[i].txs_dmamap != NULL)
5906 bus_dmamap_destroy(sc->sc_dmat,
5907 txq->txq_soft[i].txs_dmamap);
5908 }
5909 return error;
5910 }
5911
5912 static void
5913 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5914 {
5915 int i;
5916
5917 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5918 if (txq->txq_soft[i].txs_dmamap != NULL)
5919 bus_dmamap_destroy(sc->sc_dmat,
5920 txq->txq_soft[i].txs_dmamap);
5921 }
5922 }
5923
5924 static int
5925 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5926 {
5927 int i, error;
5928
5929 /* Create the receive buffer DMA maps. */
5930 for (i = 0; i < rxq->rxq_ndesc; i++) {
5931 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5932 MCLBYTES, 0, 0,
5933 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5934 aprint_error_dev(sc->sc_dev,
5935 "unable to create Rx DMA map %d error = %d\n",
5936 i, error);
5937 goto fail;
5938 }
5939 rxq->rxq_soft[i].rxs_mbuf = NULL;
5940 }
5941
5942 return 0;
5943
5944 fail:
5945 for (i = 0; i < rxq->rxq_ndesc; i++) {
5946 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5947 bus_dmamap_destroy(sc->sc_dmat,
5948 rxq->rxq_soft[i].rxs_dmamap);
5949 }
5950 return error;
5951 }
5952
5953 static void
5954 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5955 {
5956 int i;
5957
5958 for (i = 0; i < rxq->rxq_ndesc; i++) {
5959 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5960 bus_dmamap_destroy(sc->sc_dmat,
5961 rxq->rxq_soft[i].rxs_dmamap);
5962 }
5963 }
5964
5965 /*
5966 * wm_alloc_quques:
5967 * Allocate {tx,rx}descs and {tx,rx} buffers
5968 */
5969 static int
5970 wm_alloc_txrx_queues(struct wm_softc *sc)
5971 {
5972 int i, error, tx_done, rx_done;
5973
5974 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5975 KM_SLEEP);
5976 if (sc->sc_queue == NULL) {
5977 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5978 error = ENOMEM;
5979 goto fail_0;
5980 }
5981
5982 /*
5983 * For transmission
5984 */
5985 error = 0;
5986 tx_done = 0;
5987 for (i = 0; i < sc->sc_nqueues; i++) {
5988 #ifdef WM_EVENT_COUNTERS
5989 int j;
5990 const char *xname;
5991 #endif
5992 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5993 txq->txq_sc = sc;
5994 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5995
5996 error = wm_alloc_tx_descs(sc, txq);
5997 if (error)
5998 break;
5999 error = wm_alloc_tx_buffer(sc, txq);
6000 if (error) {
6001 wm_free_tx_descs(sc, txq);
6002 break;
6003 }
6004 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6005 if (txq->txq_interq == NULL) {
6006 wm_free_tx_descs(sc, txq);
6007 wm_free_tx_buffer(sc, txq);
6008 error = ENOMEM;
6009 break;
6010 }
6011
6012 #ifdef WM_EVENT_COUNTERS
6013 xname = device_xname(sc->sc_dev);
6014
6015 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6016 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6017 WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
6018 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6019 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6020
6021 WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
6022 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
6023 WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
6024 WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
6025 WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
6026 WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
6027
6028 for (j = 0; j < WM_NTXSEGS; j++) {
6029 snprintf(txq->txq_txseg_evcnt_names[j],
6030 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6031 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6032 NULL, xname, txq->txq_txseg_evcnt_names[j]);
6033 }
6034
6035 WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
6036
6037 WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
6038 #endif /* WM_EVENT_COUNTERS */
6039
6040 tx_done++;
6041 }
6042 if (error)
6043 goto fail_1;
6044
6045 /*
6046 * For recieve
6047 */
6048 error = 0;
6049 rx_done = 0;
6050 for (i = 0; i < sc->sc_nqueues; i++) {
6051 #ifdef WM_EVENT_COUNTERS
6052 const char *xname;
6053 #endif
6054 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6055 rxq->rxq_sc = sc;
6056 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6057
6058 error = wm_alloc_rx_descs(sc, rxq);
6059 if (error)
6060 break;
6061
6062 error = wm_alloc_rx_buffer(sc, rxq);
6063 if (error) {
6064 wm_free_rx_descs(sc, rxq);
6065 break;
6066 }
6067
6068 #ifdef WM_EVENT_COUNTERS
6069 xname = device_xname(sc->sc_dev);
6070
6071 WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
6072
6073 WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
6074 WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
6075 #endif /* WM_EVENT_COUNTERS */
6076
6077 rx_done++;
6078 }
6079 if (error)
6080 goto fail_2;
6081
6082 return 0;
6083
6084 fail_2:
6085 for (i = 0; i < rx_done; i++) {
6086 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6087 wm_free_rx_buffer(sc, rxq);
6088 wm_free_rx_descs(sc, rxq);
6089 if (rxq->rxq_lock)
6090 mutex_obj_free(rxq->rxq_lock);
6091 }
6092 fail_1:
6093 for (i = 0; i < tx_done; i++) {
6094 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6095 pcq_destroy(txq->txq_interq);
6096 wm_free_tx_buffer(sc, txq);
6097 wm_free_tx_descs(sc, txq);
6098 if (txq->txq_lock)
6099 mutex_obj_free(txq->txq_lock);
6100 }
6101
6102 kmem_free(sc->sc_queue,
6103 sizeof(struct wm_queue) * sc->sc_nqueues);
6104 fail_0:
6105 return error;
6106 }
6107
6108 /*
6109 * wm_free_quques:
6110 * Free {tx,rx}descs and {tx,rx} buffers
6111 */
6112 static void
6113 wm_free_txrx_queues(struct wm_softc *sc)
6114 {
6115 int i;
6116
6117 for (i = 0; i < sc->sc_nqueues; i++) {
6118 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6119
6120 #ifdef WM_EVENT_COUNTERS
6121 WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
6122 WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
6123 WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
6124 #endif /* WM_EVENT_COUNTERS */
6125
6126 wm_free_rx_buffer(sc, rxq);
6127 wm_free_rx_descs(sc, rxq);
6128 if (rxq->rxq_lock)
6129 mutex_obj_free(rxq->rxq_lock);
6130 }
6131
6132 for (i = 0; i < sc->sc_nqueues; i++) {
6133 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6134 struct mbuf *m;
6135 #ifdef WM_EVENT_COUNTERS
6136 int j;
6137
6138 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
6139 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
6140 WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
6141 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
6142 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
6143 WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
6144 WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
6145 WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
6146 WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
6147 WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
6148 WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
6149
6150 for (j = 0; j < WM_NTXSEGS; j++)
6151 evcnt_detach(&txq->txq_ev_txseg[j]);
6152
6153 WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
6154 WM_Q_EVCNT_DETACH(txq, tu, txq, i);
6155 #endif /* WM_EVENT_COUNTERS */
6156
6157 /* drain txq_interq */
6158 while ((m = pcq_get(txq->txq_interq)) != NULL)
6159 m_freem(m);
6160 pcq_destroy(txq->txq_interq);
6161
6162 wm_free_tx_buffer(sc, txq);
6163 wm_free_tx_descs(sc, txq);
6164 if (txq->txq_lock)
6165 mutex_obj_free(txq->txq_lock);
6166 }
6167
6168 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
6169 }
6170
6171 static void
6172 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6173 {
6174
6175 KASSERT(mutex_owned(txq->txq_lock));
6176
6177 /* Initialize the transmit descriptor ring. */
6178 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
6179 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
6180 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6181 txq->txq_free = WM_NTXDESC(txq);
6182 txq->txq_next = 0;
6183 }
6184
6185 static void
6186 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6187 struct wm_txqueue *txq)
6188 {
6189
6190 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6191 device_xname(sc->sc_dev), __func__));
6192 KASSERT(mutex_owned(txq->txq_lock));
6193
6194 if (sc->sc_type < WM_T_82543) {
6195 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
6196 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
6197 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
6198 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
6199 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
6200 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
6201 } else {
6202 int qid = wmq->wmq_id;
6203
6204 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
6205 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
6206 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
6207 CSR_WRITE(sc, WMREG_TDH(qid), 0);
6208
6209 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6210 /*
6211 * Don't write TDT before TCTL.EN is set.
6212 * See the document.
6213 */
6214 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
6215 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
6216 | TXDCTL_WTHRESH(0));
6217 else {
6218 /* XXX should update with AIM? */
6219 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
6220 if (sc->sc_type >= WM_T_82540) {
6221 /* should be same */
6222 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
6223 }
6224
6225 CSR_WRITE(sc, WMREG_TDT(qid), 0);
6226 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
6227 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
6228 }
6229 }
6230 }
6231
6232 static void
6233 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6234 {
6235 int i;
6236
6237 KASSERT(mutex_owned(txq->txq_lock));
6238
6239 /* Initialize the transmit job descriptors. */
6240 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
6241 txq->txq_soft[i].txs_mbuf = NULL;
6242 txq->txq_sfree = WM_TXQUEUELEN(txq);
6243 txq->txq_snext = 0;
6244 txq->txq_sdirty = 0;
6245 }
6246
6247 static void
6248 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6249 struct wm_txqueue *txq)
6250 {
6251
6252 KASSERT(mutex_owned(txq->txq_lock));
6253
6254 /*
6255 * Set up some register offsets that are different between
6256 * the i82542 and the i82543 and later chips.
6257 */
6258 if (sc->sc_type < WM_T_82543)
6259 txq->txq_tdt_reg = WMREG_OLD_TDT;
6260 else
6261 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
6262
6263 wm_init_tx_descs(sc, txq);
6264 wm_init_tx_regs(sc, wmq, txq);
6265 wm_init_tx_buffer(sc, txq);
6266 }
6267
6268 static void
6269 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6270 struct wm_rxqueue *rxq)
6271 {
6272
6273 KASSERT(mutex_owned(rxq->rxq_lock));
6274
6275 /*
6276 * Initialize the receive descriptor and receive job
6277 * descriptor rings.
6278 */
6279 if (sc->sc_type < WM_T_82543) {
6280 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
6281 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
6282 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
6283 rxq->rxq_descsize * rxq->rxq_ndesc);
6284 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
6285 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
6286 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6287
6288 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6289 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6290 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6291 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6292 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6293 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6294 } else {
6295 int qid = wmq->wmq_id;
6296
6297 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6298 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6299 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
6300
6301 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6302 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6303 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
6304
6305 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
6306 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
6307 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6308 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6309 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6310 | RXDCTL_WTHRESH(1));
6311 CSR_WRITE(sc, WMREG_RDH(qid), 0);
6312 CSR_WRITE(sc, WMREG_RDT(qid), 0);
6313 } else {
6314 CSR_WRITE(sc, WMREG_RDH(qid), 0);
6315 CSR_WRITE(sc, WMREG_RDT(qid), 0);
6316 /* XXX should update with AIM? */
6317 CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
6318 /* MUST be same */
6319 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
6320 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6321 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6322 }
6323 }
6324 }
6325
6326 static int
6327 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6328 {
6329 struct wm_rxsoft *rxs;
6330 int error, i;
6331
6332 KASSERT(mutex_owned(rxq->rxq_lock));
6333
6334 for (i = 0; i < rxq->rxq_ndesc; i++) {
6335 rxs = &rxq->rxq_soft[i];
6336 if (rxs->rxs_mbuf == NULL) {
6337 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6338 log(LOG_ERR, "%s: unable to allocate or map "
6339 "rx buffer %d, error = %d\n",
6340 device_xname(sc->sc_dev), i, error);
6341 /*
6342 * XXX Should attempt to run with fewer receive
6343 * XXX buffers instead of just failing.
6344 */
6345 wm_rxdrain(rxq);
6346 return ENOMEM;
6347 }
6348 } else {
6349 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6350 wm_init_rxdesc(rxq, i);
6351 /*
6352 * For 82575 and newer device, the RX descriptors
6353 * must be initialized after the setting of RCTL.EN in
6354 * wm_set_filter()
6355 */
6356 }
6357 }
6358 rxq->rxq_ptr = 0;
6359 rxq->rxq_discard = 0;
6360 WM_RXCHAIN_RESET(rxq);
6361
6362 return 0;
6363 }
6364
6365 static int
6366 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6367 struct wm_rxqueue *rxq)
6368 {
6369
6370 KASSERT(mutex_owned(rxq->rxq_lock));
6371
6372 /*
6373 * Set up some register offsets that are different between
6374 * the i82542 and the i82543 and later chips.
6375 */
6376 if (sc->sc_type < WM_T_82543)
6377 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6378 else
6379 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6380
6381 wm_init_rx_regs(sc, wmq, rxq);
6382 return wm_init_rx_buffer(sc, rxq);
6383 }
6384
6385 /*
6386 * wm_init_quques:
6387 * Initialize {tx,rx}descs and {tx,rx} buffers
6388 */
6389 static int
6390 wm_init_txrx_queues(struct wm_softc *sc)
6391 {
6392 int i, error = 0;
6393
6394 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6395 device_xname(sc->sc_dev), __func__));
6396
6397 for (i = 0; i < sc->sc_nqueues; i++) {
6398 struct wm_queue *wmq = &sc->sc_queue[i];
6399 struct wm_txqueue *txq = &wmq->wmq_txq;
6400 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6401
6402 /*
6403 * TODO
6404 * Currently, use constant variable instead of AIM.
6405 * Furthermore, the interrupt interval of multiqueue which use
6406 * polling mode is less than default value.
6407 * More tuning and AIM are required.
6408 */
6409 if (wm_is_using_multiqueue(sc))
6410 wmq->wmq_itr = 50;
6411 else
6412 wmq->wmq_itr = sc->sc_itr_init;
6413 wmq->wmq_set_itr = true;
6414
6415 mutex_enter(txq->txq_lock);
6416 wm_init_tx_queue(sc, wmq, txq);
6417 mutex_exit(txq->txq_lock);
6418
6419 mutex_enter(rxq->rxq_lock);
6420 error = wm_init_rx_queue(sc, wmq, rxq);
6421 mutex_exit(rxq->rxq_lock);
6422 if (error)
6423 break;
6424 }
6425
6426 return error;
6427 }
6428
6429 /*
6430 * wm_tx_offload:
6431 *
6432 * Set up TCP/IP checksumming parameters for the
6433 * specified packet.
6434 */
6435 static int
6436 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6437 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
6438 {
6439 struct mbuf *m0 = txs->txs_mbuf;
6440 struct livengood_tcpip_ctxdesc *t;
6441 uint32_t ipcs, tucs, cmd, cmdlen, seg;
6442 uint32_t ipcse;
6443 struct ether_header *eh;
6444 int offset, iphl;
6445 uint8_t fields;
6446
6447 /*
6448 * XXX It would be nice if the mbuf pkthdr had offset
6449 * fields for the protocol headers.
6450 */
6451
6452 eh = mtod(m0, struct ether_header *);
6453 switch (htons(eh->ether_type)) {
6454 case ETHERTYPE_IP:
6455 case ETHERTYPE_IPV6:
6456 offset = ETHER_HDR_LEN;
6457 break;
6458
6459 case ETHERTYPE_VLAN:
6460 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6461 break;
6462
6463 default:
6464 /*
6465 * Don't support this protocol or encapsulation.
6466 */
6467 *fieldsp = 0;
6468 *cmdp = 0;
6469 return 0;
6470 }
6471
6472 if ((m0->m_pkthdr.csum_flags &
6473 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6474 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6475 } else {
6476 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6477 }
6478 ipcse = offset + iphl - 1;
6479
6480 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6481 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6482 seg = 0;
6483 fields = 0;
6484
6485 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6486 int hlen = offset + iphl;
6487 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6488
6489 if (__predict_false(m0->m_len <
6490 (hlen + sizeof(struct tcphdr)))) {
6491 /*
6492 * TCP/IP headers are not in the first mbuf; we need
6493 * to do this the slow and painful way. Let's just
6494 * hope this doesn't happen very often.
6495 */
6496 struct tcphdr th;
6497
6498 WM_Q_EVCNT_INCR(txq, txtsopain);
6499
6500 m_copydata(m0, hlen, sizeof(th), &th);
6501 if (v4) {
6502 struct ip ip;
6503
6504 m_copydata(m0, offset, sizeof(ip), &ip);
6505 ip.ip_len = 0;
6506 m_copyback(m0,
6507 offset + offsetof(struct ip, ip_len),
6508 sizeof(ip.ip_len), &ip.ip_len);
6509 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6510 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6511 } else {
6512 struct ip6_hdr ip6;
6513
6514 m_copydata(m0, offset, sizeof(ip6), &ip6);
6515 ip6.ip6_plen = 0;
6516 m_copyback(m0,
6517 offset + offsetof(struct ip6_hdr, ip6_plen),
6518 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6519 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6520 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6521 }
6522 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6523 sizeof(th.th_sum), &th.th_sum);
6524
6525 hlen += th.th_off << 2;
6526 } else {
6527 /*
6528 * TCP/IP headers are in the first mbuf; we can do
6529 * this the easy way.
6530 */
6531 struct tcphdr *th;
6532
6533 if (v4) {
6534 struct ip *ip =
6535 (void *)(mtod(m0, char *) + offset);
6536 th = (void *)(mtod(m0, char *) + hlen);
6537
6538 ip->ip_len = 0;
6539 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6540 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6541 } else {
6542 struct ip6_hdr *ip6 =
6543 (void *)(mtod(m0, char *) + offset);
6544 th = (void *)(mtod(m0, char *) + hlen);
6545
6546 ip6->ip6_plen = 0;
6547 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6548 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6549 }
6550 hlen += th->th_off << 2;
6551 }
6552
6553 if (v4) {
6554 WM_Q_EVCNT_INCR(txq, txtso);
6555 cmdlen |= WTX_TCPIP_CMD_IP;
6556 } else {
6557 WM_Q_EVCNT_INCR(txq, txtso6);
6558 ipcse = 0;
6559 }
6560 cmd |= WTX_TCPIP_CMD_TSE;
6561 cmdlen |= WTX_TCPIP_CMD_TSE |
6562 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6563 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6564 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6565 }
6566
6567 /*
6568 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6569 * offload feature, if we load the context descriptor, we
6570 * MUST provide valid values for IPCSS and TUCSS fields.
6571 */
6572
6573 ipcs = WTX_TCPIP_IPCSS(offset) |
6574 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6575 WTX_TCPIP_IPCSE(ipcse);
6576 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6577 WM_Q_EVCNT_INCR(txq, txipsum);
6578 fields |= WTX_IXSM;
6579 }
6580
6581 offset += iphl;
6582
6583 if (m0->m_pkthdr.csum_flags &
6584 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6585 WM_Q_EVCNT_INCR(txq, txtusum);
6586 fields |= WTX_TXSM;
6587 tucs = WTX_TCPIP_TUCSS(offset) |
6588 WTX_TCPIP_TUCSO(offset +
6589 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6590 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6591 } else if ((m0->m_pkthdr.csum_flags &
6592 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6593 WM_Q_EVCNT_INCR(txq, txtusum6);
6594 fields |= WTX_TXSM;
6595 tucs = WTX_TCPIP_TUCSS(offset) |
6596 WTX_TCPIP_TUCSO(offset +
6597 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6598 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6599 } else {
6600 /* Just initialize it to a valid TCP context. */
6601 tucs = WTX_TCPIP_TUCSS(offset) |
6602 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6603 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6604 }
6605
6606 /*
6607 * We don't have to write context descriptor for every packet
6608 * except for 82574. For 82574, we must write context descriptor
6609 * for every packet when we use two descriptor queues.
6610 * It would be overhead to write context descriptor for every packet,
6611 * however it does not cause problems.
6612 */
6613 /* Fill in the context descriptor. */
6614 t = (struct livengood_tcpip_ctxdesc *)
6615 &txq->txq_descs[txq->txq_next];
6616 t->tcpip_ipcs = htole32(ipcs);
6617 t->tcpip_tucs = htole32(tucs);
6618 t->tcpip_cmdlen = htole32(cmdlen);
6619 t->tcpip_seg = htole32(seg);
6620 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6621
6622 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6623 txs->txs_ndesc++;
6624
6625 *cmdp = cmd;
6626 *fieldsp = fields;
6627
6628 return 0;
6629 }
6630
6631 static inline int
6632 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6633 {
6634 struct wm_softc *sc = ifp->if_softc;
6635 u_int cpuid = cpu_index(curcpu());
6636
6637 /*
6638 * Currently, simple distribute strategy.
6639 * TODO:
6640 * distribute by flowid(RSS has value).
6641 */
6642 return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
6643 }
6644
6645 /*
6646 * wm_start: [ifnet interface function]
6647 *
6648 * Start packet transmission on the interface.
6649 */
6650 static void
6651 wm_start(struct ifnet *ifp)
6652 {
6653 struct wm_softc *sc = ifp->if_softc;
6654 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6655
6656 #ifdef WM_MPSAFE
6657 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6658 #endif
6659 /*
6660 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6661 */
6662
6663 mutex_enter(txq->txq_lock);
6664 if (!txq->txq_stopping)
6665 wm_start_locked(ifp);
6666 mutex_exit(txq->txq_lock);
6667 }
6668
6669 static void
6670 wm_start_locked(struct ifnet *ifp)
6671 {
6672 struct wm_softc *sc = ifp->if_softc;
6673 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6674
6675 wm_send_common_locked(ifp, txq, false);
6676 }
6677
6678 static int
6679 wm_transmit(struct ifnet *ifp, struct mbuf *m)
6680 {
6681 int qid;
6682 struct wm_softc *sc = ifp->if_softc;
6683 struct wm_txqueue *txq;
6684
6685 qid = wm_select_txqueue(ifp, m);
6686 txq = &sc->sc_queue[qid].wmq_txq;
6687
6688 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6689 m_freem(m);
6690 WM_Q_EVCNT_INCR(txq, txdrop);
6691 return ENOBUFS;
6692 }
6693
6694 /*
6695 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6696 */
6697 ifp->if_obytes += m->m_pkthdr.len;
6698 if (m->m_flags & M_MCAST)
6699 ifp->if_omcasts++;
6700
6701 if (mutex_tryenter(txq->txq_lock)) {
6702 if (!txq->txq_stopping)
6703 wm_transmit_locked(ifp, txq);
6704 mutex_exit(txq->txq_lock);
6705 }
6706
6707 return 0;
6708 }
6709
6710 static void
6711 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6712 {
6713
6714 wm_send_common_locked(ifp, txq, true);
6715 }
6716
6717 static void
6718 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6719 bool is_transmit)
6720 {
6721 struct wm_softc *sc = ifp->if_softc;
6722 struct mbuf *m0;
6723 struct m_tag *mtag;
6724 struct wm_txsoft *txs;
6725 bus_dmamap_t dmamap;
6726 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6727 bus_addr_t curaddr;
6728 bus_size_t seglen, curlen;
6729 uint32_t cksumcmd;
6730 uint8_t cksumfields;
6731
6732 KASSERT(mutex_owned(txq->txq_lock));
6733
6734 if ((ifp->if_flags & IFF_RUNNING) == 0)
6735 return;
6736 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
6737 return;
6738 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6739 return;
6740
6741 /* Remember the previous number of free descriptors. */
6742 ofree = txq->txq_free;
6743
6744 /*
6745 * Loop through the send queue, setting up transmit descriptors
6746 * until we drain the queue, or use up all available transmit
6747 * descriptors.
6748 */
6749 for (;;) {
6750 m0 = NULL;
6751
6752 /* Get a work queue entry. */
6753 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6754 wm_txeof(sc, txq);
6755 if (txq->txq_sfree == 0) {
6756 DPRINTF(WM_DEBUG_TX,
6757 ("%s: TX: no free job descriptors\n",
6758 device_xname(sc->sc_dev)));
6759 WM_Q_EVCNT_INCR(txq, txsstall);
6760 break;
6761 }
6762 }
6763
6764 /* Grab a packet off the queue. */
6765 if (is_transmit)
6766 m0 = pcq_get(txq->txq_interq);
6767 else
6768 IFQ_DEQUEUE(&ifp->if_snd, m0);
6769 if (m0 == NULL)
6770 break;
6771
6772 DPRINTF(WM_DEBUG_TX,
6773 ("%s: TX: have packet to transmit: %p\n",
6774 device_xname(sc->sc_dev), m0));
6775
6776 txs = &txq->txq_soft[txq->txq_snext];
6777 dmamap = txs->txs_dmamap;
6778
6779 use_tso = (m0->m_pkthdr.csum_flags &
6780 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6781
6782 /*
6783 * So says the Linux driver:
6784 * The controller does a simple calculation to make sure
6785 * there is enough room in the FIFO before initiating the
6786 * DMA for each buffer. The calc is:
6787 * 4 = ceil(buffer len / MSS)
6788 * To make sure we don't overrun the FIFO, adjust the max
6789 * buffer len if the MSS drops.
6790 */
6791 dmamap->dm_maxsegsz =
6792 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6793 ? m0->m_pkthdr.segsz << 2
6794 : WTX_MAX_LEN;
6795
6796 /*
6797 * Load the DMA map. If this fails, the packet either
6798 * didn't fit in the allotted number of segments, or we
6799 * were short on resources. For the too-many-segments
6800 * case, we simply report an error and drop the packet,
6801 * since we can't sanely copy a jumbo packet to a single
6802 * buffer.
6803 */
6804 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6805 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6806 if (error) {
6807 if (error == EFBIG) {
6808 WM_Q_EVCNT_INCR(txq, txdrop);
6809 log(LOG_ERR, "%s: Tx packet consumes too many "
6810 "DMA segments, dropping...\n",
6811 device_xname(sc->sc_dev));
6812 wm_dump_mbuf_chain(sc, m0);
6813 m_freem(m0);
6814 continue;
6815 }
6816 /* Short on resources, just stop for now. */
6817 DPRINTF(WM_DEBUG_TX,
6818 ("%s: TX: dmamap load failed: %d\n",
6819 device_xname(sc->sc_dev), error));
6820 break;
6821 }
6822
6823 segs_needed = dmamap->dm_nsegs;
6824 if (use_tso) {
6825 /* For sentinel descriptor; see below. */
6826 segs_needed++;
6827 }
6828
6829 /*
6830 * Ensure we have enough descriptors free to describe
6831 * the packet. Note, we always reserve one descriptor
6832 * at the end of the ring due to the semantics of the
6833 * TDT register, plus one more in the event we need
6834 * to load offload context.
6835 */
6836 if (segs_needed > txq->txq_free - 2) {
6837 /*
6838 * Not enough free descriptors to transmit this
6839 * packet. We haven't committed anything yet,
6840 * so just unload the DMA map, put the packet
6841 * pack on the queue, and punt. Notify the upper
6842 * layer that there are no more slots left.
6843 */
6844 DPRINTF(WM_DEBUG_TX,
6845 ("%s: TX: need %d (%d) descriptors, have %d\n",
6846 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6847 segs_needed, txq->txq_free - 1));
6848 if (!is_transmit)
6849 ifp->if_flags |= IFF_OACTIVE;
6850 txq->txq_flags |= WM_TXQ_NO_SPACE;
6851 bus_dmamap_unload(sc->sc_dmat, dmamap);
6852 WM_Q_EVCNT_INCR(txq, txdstall);
6853 break;
6854 }
6855
6856 /*
6857 * Check for 82547 Tx FIFO bug. We need to do this
6858 * once we know we can transmit the packet, since we
6859 * do some internal FIFO space accounting here.
6860 */
6861 if (sc->sc_type == WM_T_82547 &&
6862 wm_82547_txfifo_bugchk(sc, m0)) {
6863 DPRINTF(WM_DEBUG_TX,
6864 ("%s: TX: 82547 Tx FIFO bug detected\n",
6865 device_xname(sc->sc_dev)));
6866 if (!is_transmit)
6867 ifp->if_flags |= IFF_OACTIVE;
6868 txq->txq_flags |= WM_TXQ_NO_SPACE;
6869 bus_dmamap_unload(sc->sc_dmat, dmamap);
6870 WM_Q_EVCNT_INCR(txq, txfifo_stall);
6871 break;
6872 }
6873
6874 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6875
6876 DPRINTF(WM_DEBUG_TX,
6877 ("%s: TX: packet has %d (%d) DMA segments\n",
6878 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6879
6880 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6881
6882 /*
6883 * Store a pointer to the packet so that we can free it
6884 * later.
6885 *
6886 * Initially, we consider the number of descriptors the
6887 * packet uses the number of DMA segments. This may be
6888 * incremented by 1 if we do checksum offload (a descriptor
6889 * is used to set the checksum context).
6890 */
6891 txs->txs_mbuf = m0;
6892 txs->txs_firstdesc = txq->txq_next;
6893 txs->txs_ndesc = segs_needed;
6894
6895 /* Set up offload parameters for this packet. */
6896 if (m0->m_pkthdr.csum_flags &
6897 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6898 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6899 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6900 if (wm_tx_offload(sc, txq, txs, &cksumcmd,
6901 &cksumfields) != 0) {
6902 /* Error message already displayed. */
6903 bus_dmamap_unload(sc->sc_dmat, dmamap);
6904 continue;
6905 }
6906 } else {
6907 cksumcmd = 0;
6908 cksumfields = 0;
6909 }
6910
6911 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6912
6913 /* Sync the DMA map. */
6914 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6915 BUS_DMASYNC_PREWRITE);
6916
6917 /* Initialize the transmit descriptor. */
6918 for (nexttx = txq->txq_next, seg = 0;
6919 seg < dmamap->dm_nsegs; seg++) {
6920 for (seglen = dmamap->dm_segs[seg].ds_len,
6921 curaddr = dmamap->dm_segs[seg].ds_addr;
6922 seglen != 0;
6923 curaddr += curlen, seglen -= curlen,
6924 nexttx = WM_NEXTTX(txq, nexttx)) {
6925 curlen = seglen;
6926
6927 /*
6928 * So says the Linux driver:
6929 * Work around for premature descriptor
6930 * write-backs in TSO mode. Append a
6931 * 4-byte sentinel descriptor.
6932 */
6933 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6934 curlen > 8)
6935 curlen -= 4;
6936
6937 wm_set_dma_addr(
6938 &txq->txq_descs[nexttx].wtx_addr, curaddr);
6939 txq->txq_descs[nexttx].wtx_cmdlen
6940 = htole32(cksumcmd | curlen);
6941 txq->txq_descs[nexttx].wtx_fields.wtxu_status
6942 = 0;
6943 txq->txq_descs[nexttx].wtx_fields.wtxu_options
6944 = cksumfields;
6945 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6946 lasttx = nexttx;
6947
6948 DPRINTF(WM_DEBUG_TX,
6949 ("%s: TX: desc %d: low %#" PRIx64 ", "
6950 "len %#04zx\n",
6951 device_xname(sc->sc_dev), nexttx,
6952 (uint64_t)curaddr, curlen));
6953 }
6954 }
6955
6956 KASSERT(lasttx != -1);
6957
6958 /*
6959 * Set up the command byte on the last descriptor of
6960 * the packet. If we're in the interrupt delay window,
6961 * delay the interrupt.
6962 */
6963 txq->txq_descs[lasttx].wtx_cmdlen |=
6964 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6965
6966 /*
6967 * If VLANs are enabled and the packet has a VLAN tag, set
6968 * up the descriptor to encapsulate the packet for us.
6969 *
6970 * This is only valid on the last descriptor of the packet.
6971 */
6972 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6973 txq->txq_descs[lasttx].wtx_cmdlen |=
6974 htole32(WTX_CMD_VLE);
6975 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6976 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6977 }
6978
6979 txs->txs_lastdesc = lasttx;
6980
6981 DPRINTF(WM_DEBUG_TX,
6982 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6983 device_xname(sc->sc_dev),
6984 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6985
6986 /* Sync the descriptors we're using. */
6987 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6988 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6989
6990 /* Give the packet to the chip. */
6991 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6992
6993 DPRINTF(WM_DEBUG_TX,
6994 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6995
6996 DPRINTF(WM_DEBUG_TX,
6997 ("%s: TX: finished transmitting packet, job %d\n",
6998 device_xname(sc->sc_dev), txq->txq_snext));
6999
7000 /* Advance the tx pointer. */
7001 txq->txq_free -= txs->txs_ndesc;
7002 txq->txq_next = nexttx;
7003
7004 txq->txq_sfree--;
7005 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7006
7007 /* Pass the packet to any BPF listeners. */
7008 bpf_mtap(ifp, m0);
7009 }
7010
7011 if (m0 != NULL) {
7012 if (!is_transmit)
7013 ifp->if_flags |= IFF_OACTIVE;
7014 txq->txq_flags |= WM_TXQ_NO_SPACE;
7015 WM_Q_EVCNT_INCR(txq, txdrop);
7016 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7017 __func__));
7018 m_freem(m0);
7019 }
7020
7021 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7022 /* No more slots; notify upper layer. */
7023 if (!is_transmit)
7024 ifp->if_flags |= IFF_OACTIVE;
7025 txq->txq_flags |= WM_TXQ_NO_SPACE;
7026 }
7027
7028 if (txq->txq_free != ofree) {
7029 /* Set a watchdog timer in case the chip flakes out. */
7030 ifp->if_timer = 5;
7031 }
7032 }
7033
7034 /*
7035 * wm_nq_tx_offload:
7036 *
7037 * Set up TCP/IP checksumming parameters for the
7038 * specified packet, for NEWQUEUE devices
7039 */
7040 static int
7041 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7042 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
7043 {
7044 struct mbuf *m0 = txs->txs_mbuf;
7045 struct m_tag *mtag;
7046 uint32_t vl_len, mssidx, cmdc;
7047 struct ether_header *eh;
7048 int offset, iphl;
7049
7050 /*
7051 * XXX It would be nice if the mbuf pkthdr had offset
7052 * fields for the protocol headers.
7053 */
7054 *cmdlenp = 0;
7055 *fieldsp = 0;
7056
7057 eh = mtod(m0, struct ether_header *);
7058 switch (htons(eh->ether_type)) {
7059 case ETHERTYPE_IP:
7060 case ETHERTYPE_IPV6:
7061 offset = ETHER_HDR_LEN;
7062 break;
7063
7064 case ETHERTYPE_VLAN:
7065 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7066 break;
7067
7068 default:
7069 /* Don't support this protocol or encapsulation. */
7070 *do_csum = false;
7071 return 0;
7072 }
7073 *do_csum = true;
7074 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
7075 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
7076
7077 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
7078 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
7079
7080 if ((m0->m_pkthdr.csum_flags &
7081 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7082 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7083 } else {
7084 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
7085 }
7086 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
7087 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
7088
7089 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
7090 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
7091 << NQTXC_VLLEN_VLAN_SHIFT);
7092 *cmdlenp |= NQTX_CMD_VLE;
7093 }
7094
7095 mssidx = 0;
7096
7097 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7098 int hlen = offset + iphl;
7099 int tcp_hlen;
7100 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7101
7102 if (__predict_false(m0->m_len <
7103 (hlen + sizeof(struct tcphdr)))) {
7104 /*
7105 * TCP/IP headers are not in the first mbuf; we need
7106 * to do this the slow and painful way. Let's just
7107 * hope this doesn't happen very often.
7108 */
7109 struct tcphdr th;
7110
7111 WM_Q_EVCNT_INCR(txq, txtsopain);
7112
7113 m_copydata(m0, hlen, sizeof(th), &th);
7114 if (v4) {
7115 struct ip ip;
7116
7117 m_copydata(m0, offset, sizeof(ip), &ip);
7118 ip.ip_len = 0;
7119 m_copyback(m0,
7120 offset + offsetof(struct ip, ip_len),
7121 sizeof(ip.ip_len), &ip.ip_len);
7122 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7123 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7124 } else {
7125 struct ip6_hdr ip6;
7126
7127 m_copydata(m0, offset, sizeof(ip6), &ip6);
7128 ip6.ip6_plen = 0;
7129 m_copyback(m0,
7130 offset + offsetof(struct ip6_hdr, ip6_plen),
7131 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7132 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7133 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7134 }
7135 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7136 sizeof(th.th_sum), &th.th_sum);
7137
7138 tcp_hlen = th.th_off << 2;
7139 } else {
7140 /*
7141 * TCP/IP headers are in the first mbuf; we can do
7142 * this the easy way.
7143 */
7144 struct tcphdr *th;
7145
7146 if (v4) {
7147 struct ip *ip =
7148 (void *)(mtod(m0, char *) + offset);
7149 th = (void *)(mtod(m0, char *) + hlen);
7150
7151 ip->ip_len = 0;
7152 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7153 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7154 } else {
7155 struct ip6_hdr *ip6 =
7156 (void *)(mtod(m0, char *) + offset);
7157 th = (void *)(mtod(m0, char *) + hlen);
7158
7159 ip6->ip6_plen = 0;
7160 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7161 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7162 }
7163 tcp_hlen = th->th_off << 2;
7164 }
7165 hlen += tcp_hlen;
7166 *cmdlenp |= NQTX_CMD_TSE;
7167
7168 if (v4) {
7169 WM_Q_EVCNT_INCR(txq, txtso);
7170 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
7171 } else {
7172 WM_Q_EVCNT_INCR(txq, txtso6);
7173 *fieldsp |= NQTXD_FIELDS_TUXSM;
7174 }
7175 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
7176 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7177 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
7178 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
7179 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
7180 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
7181 } else {
7182 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
7183 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7184 }
7185
7186 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
7187 *fieldsp |= NQTXD_FIELDS_IXSM;
7188 cmdc |= NQTXC_CMD_IP4;
7189 }
7190
7191 if (m0->m_pkthdr.csum_flags &
7192 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7193 WM_Q_EVCNT_INCR(txq, txtusum);
7194 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7195 cmdc |= NQTXC_CMD_TCP;
7196 } else {
7197 cmdc |= NQTXC_CMD_UDP;
7198 }
7199 cmdc |= NQTXC_CMD_IP4;
7200 *fieldsp |= NQTXD_FIELDS_TUXSM;
7201 }
7202 if (m0->m_pkthdr.csum_flags &
7203 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7204 WM_Q_EVCNT_INCR(txq, txtusum6);
7205 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7206 cmdc |= NQTXC_CMD_TCP;
7207 } else {
7208 cmdc |= NQTXC_CMD_UDP;
7209 }
7210 cmdc |= NQTXC_CMD_IP6;
7211 *fieldsp |= NQTXD_FIELDS_TUXSM;
7212 }
7213
7214 /*
7215 * We don't have to write context descriptor for every packet to
7216 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
7217 * I210 and I211. It is enough to write once per a Tx queue for these
7218 * controllers.
7219 * It would be overhead to write context descriptor for every packet,
7220 * however it does not cause problems.
7221 */
7222 /* Fill in the context descriptor. */
7223 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
7224 htole32(vl_len);
7225 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
7226 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
7227 htole32(cmdc);
7228 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
7229 htole32(mssidx);
7230 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7231 DPRINTF(WM_DEBUG_TX,
7232 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
7233 txq->txq_next, 0, vl_len));
7234 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
7235 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7236 txs->txs_ndesc++;
7237 return 0;
7238 }
7239
7240 /*
7241 * wm_nq_start: [ifnet interface function]
7242 *
7243 * Start packet transmission on the interface for NEWQUEUE devices
7244 */
7245 static void
7246 wm_nq_start(struct ifnet *ifp)
7247 {
7248 struct wm_softc *sc = ifp->if_softc;
7249 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7250
7251 #ifdef WM_MPSAFE
7252 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
7253 #endif
7254 /*
7255 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7256 */
7257
7258 mutex_enter(txq->txq_lock);
7259 if (!txq->txq_stopping)
7260 wm_nq_start_locked(ifp);
7261 mutex_exit(txq->txq_lock);
7262 }
7263
7264 static void
7265 wm_nq_start_locked(struct ifnet *ifp)
7266 {
7267 struct wm_softc *sc = ifp->if_softc;
7268 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7269
7270 wm_nq_send_common_locked(ifp, txq, false);
7271 }
7272
7273 static int
7274 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
7275 {
7276 int qid;
7277 struct wm_softc *sc = ifp->if_softc;
7278 struct wm_txqueue *txq;
7279
7280 qid = wm_select_txqueue(ifp, m);
7281 txq = &sc->sc_queue[qid].wmq_txq;
7282
7283 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7284 m_freem(m);
7285 WM_Q_EVCNT_INCR(txq, txdrop);
7286 return ENOBUFS;
7287 }
7288
7289 /*
7290 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7291 */
7292 ifp->if_obytes += m->m_pkthdr.len;
7293 if (m->m_flags & M_MCAST)
7294 ifp->if_omcasts++;
7295
7296 /*
7297 * The situations which this mutex_tryenter() fails at running time
7298 * are below two patterns.
7299 * (1) contention with interrupt handler(wm_txrxintr_msix())
7300 * (2) contention with deferred if_start softint(wm_handle_queue())
7301 * In the case of (1), the last packet enqueued to txq->txq_interq is
7302 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
7303 * In the case of (2), the last packet enqueued to txq->txq_interq is also
7304 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
7305 */
7306 if (mutex_tryenter(txq->txq_lock)) {
7307 if (!txq->txq_stopping)
7308 wm_nq_transmit_locked(ifp, txq);
7309 mutex_exit(txq->txq_lock);
7310 }
7311
7312 return 0;
7313 }
7314
7315 static void
7316 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7317 {
7318
7319 wm_nq_send_common_locked(ifp, txq, true);
7320 }
7321
7322 static void
7323 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7324 bool is_transmit)
7325 {
7326 struct wm_softc *sc = ifp->if_softc;
7327 struct mbuf *m0;
7328 struct m_tag *mtag;
7329 struct wm_txsoft *txs;
7330 bus_dmamap_t dmamap;
7331 int error, nexttx, lasttx = -1, seg, segs_needed;
7332 bool do_csum, sent;
7333
7334 KASSERT(mutex_owned(txq->txq_lock));
7335
7336 if ((ifp->if_flags & IFF_RUNNING) == 0)
7337 return;
7338 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7339 return;
7340 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7341 return;
7342
7343 sent = false;
7344
7345 /*
7346 * Loop through the send queue, setting up transmit descriptors
7347 * until we drain the queue, or use up all available transmit
7348 * descriptors.
7349 */
7350 for (;;) {
7351 m0 = NULL;
7352
7353 /* Get a work queue entry. */
7354 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7355 wm_txeof(sc, txq);
7356 if (txq->txq_sfree == 0) {
7357 DPRINTF(WM_DEBUG_TX,
7358 ("%s: TX: no free job descriptors\n",
7359 device_xname(sc->sc_dev)));
7360 WM_Q_EVCNT_INCR(txq, txsstall);
7361 break;
7362 }
7363 }
7364
7365 /* Grab a packet off the queue. */
7366 if (is_transmit)
7367 m0 = pcq_get(txq->txq_interq);
7368 else
7369 IFQ_DEQUEUE(&ifp->if_snd, m0);
7370 if (m0 == NULL)
7371 break;
7372
7373 DPRINTF(WM_DEBUG_TX,
7374 ("%s: TX: have packet to transmit: %p\n",
7375 device_xname(sc->sc_dev), m0));
7376
7377 txs = &txq->txq_soft[txq->txq_snext];
7378 dmamap = txs->txs_dmamap;
7379
7380 /*
7381 * Load the DMA map. If this fails, the packet either
7382 * didn't fit in the allotted number of segments, or we
7383 * were short on resources. For the too-many-segments
7384 * case, we simply report an error and drop the packet,
7385 * since we can't sanely copy a jumbo packet to a single
7386 * buffer.
7387 */
7388 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7389 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7390 if (error) {
7391 if (error == EFBIG) {
7392 WM_Q_EVCNT_INCR(txq, txdrop);
7393 log(LOG_ERR, "%s: Tx packet consumes too many "
7394 "DMA segments, dropping...\n",
7395 device_xname(sc->sc_dev));
7396 wm_dump_mbuf_chain(sc, m0);
7397 m_freem(m0);
7398 continue;
7399 }
7400 /* Short on resources, just stop for now. */
7401 DPRINTF(WM_DEBUG_TX,
7402 ("%s: TX: dmamap load failed: %d\n",
7403 device_xname(sc->sc_dev), error));
7404 break;
7405 }
7406
7407 segs_needed = dmamap->dm_nsegs;
7408
7409 /*
7410 * Ensure we have enough descriptors free to describe
7411 * the packet. Note, we always reserve one descriptor
7412 * at the end of the ring due to the semantics of the
7413 * TDT register, plus one more in the event we need
7414 * to load offload context.
7415 */
7416 if (segs_needed > txq->txq_free - 2) {
7417 /*
7418 * Not enough free descriptors to transmit this
7419 * packet. We haven't committed anything yet,
7420 * so just unload the DMA map, put the packet
7421 * pack on the queue, and punt. Notify the upper
7422 * layer that there are no more slots left.
7423 */
7424 DPRINTF(WM_DEBUG_TX,
7425 ("%s: TX: need %d (%d) descriptors, have %d\n",
7426 device_xname(sc->sc_dev), dmamap->dm_nsegs,
7427 segs_needed, txq->txq_free - 1));
7428 if (!is_transmit)
7429 ifp->if_flags |= IFF_OACTIVE;
7430 txq->txq_flags |= WM_TXQ_NO_SPACE;
7431 bus_dmamap_unload(sc->sc_dmat, dmamap);
7432 WM_Q_EVCNT_INCR(txq, txdstall);
7433 break;
7434 }
7435
7436 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7437
7438 DPRINTF(WM_DEBUG_TX,
7439 ("%s: TX: packet has %d (%d) DMA segments\n",
7440 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7441
7442 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7443
7444 /*
7445 * Store a pointer to the packet so that we can free it
7446 * later.
7447 *
7448 * Initially, we consider the number of descriptors the
7449 * packet uses the number of DMA segments. This may be
7450 * incremented by 1 if we do checksum offload (a descriptor
7451 * is used to set the checksum context).
7452 */
7453 txs->txs_mbuf = m0;
7454 txs->txs_firstdesc = txq->txq_next;
7455 txs->txs_ndesc = segs_needed;
7456
7457 /* Set up offload parameters for this packet. */
7458 uint32_t cmdlen, fields, dcmdlen;
7459 if (m0->m_pkthdr.csum_flags &
7460 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7461 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7462 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7463 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7464 &do_csum) != 0) {
7465 /* Error message already displayed. */
7466 bus_dmamap_unload(sc->sc_dmat, dmamap);
7467 continue;
7468 }
7469 } else {
7470 do_csum = false;
7471 cmdlen = 0;
7472 fields = 0;
7473 }
7474
7475 /* Sync the DMA map. */
7476 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7477 BUS_DMASYNC_PREWRITE);
7478
7479 /* Initialize the first transmit descriptor. */
7480 nexttx = txq->txq_next;
7481 if (!do_csum) {
7482 /* setup a legacy descriptor */
7483 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7484 dmamap->dm_segs[0].ds_addr);
7485 txq->txq_descs[nexttx].wtx_cmdlen =
7486 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7487 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7488 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7489 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7490 NULL) {
7491 txq->txq_descs[nexttx].wtx_cmdlen |=
7492 htole32(WTX_CMD_VLE);
7493 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7494 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7495 } else {
7496 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7497 }
7498 dcmdlen = 0;
7499 } else {
7500 /* setup an advanced data descriptor */
7501 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7502 htole64(dmamap->dm_segs[0].ds_addr);
7503 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7504 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7505 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7506 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7507 htole32(fields);
7508 DPRINTF(WM_DEBUG_TX,
7509 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7510 device_xname(sc->sc_dev), nexttx,
7511 (uint64_t)dmamap->dm_segs[0].ds_addr));
7512 DPRINTF(WM_DEBUG_TX,
7513 ("\t 0x%08x%08x\n", fields,
7514 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7515 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7516 }
7517
7518 lasttx = nexttx;
7519 nexttx = WM_NEXTTX(txq, nexttx);
7520 /*
7521 * fill in the next descriptors. legacy or adcanced format
7522 * is the same here
7523 */
7524 for (seg = 1; seg < dmamap->dm_nsegs;
7525 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7526 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7527 htole64(dmamap->dm_segs[seg].ds_addr);
7528 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7529 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7530 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7531 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7532 lasttx = nexttx;
7533
7534 DPRINTF(WM_DEBUG_TX,
7535 ("%s: TX: desc %d: %#" PRIx64 ", "
7536 "len %#04zx\n",
7537 device_xname(sc->sc_dev), nexttx,
7538 (uint64_t)dmamap->dm_segs[seg].ds_addr,
7539 dmamap->dm_segs[seg].ds_len));
7540 }
7541
7542 KASSERT(lasttx != -1);
7543
7544 /*
7545 * Set up the command byte on the last descriptor of
7546 * the packet. If we're in the interrupt delay window,
7547 * delay the interrupt.
7548 */
7549 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7550 (NQTX_CMD_EOP | NQTX_CMD_RS));
7551 txq->txq_descs[lasttx].wtx_cmdlen |=
7552 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7553
7554 txs->txs_lastdesc = lasttx;
7555
7556 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7557 device_xname(sc->sc_dev),
7558 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7559
7560 /* Sync the descriptors we're using. */
7561 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7562 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7563
7564 /* Give the packet to the chip. */
7565 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7566 sent = true;
7567
7568 DPRINTF(WM_DEBUG_TX,
7569 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7570
7571 DPRINTF(WM_DEBUG_TX,
7572 ("%s: TX: finished transmitting packet, job %d\n",
7573 device_xname(sc->sc_dev), txq->txq_snext));
7574
7575 /* Advance the tx pointer. */
7576 txq->txq_free -= txs->txs_ndesc;
7577 txq->txq_next = nexttx;
7578
7579 txq->txq_sfree--;
7580 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7581
7582 /* Pass the packet to any BPF listeners. */
7583 bpf_mtap(ifp, m0);
7584 }
7585
7586 if (m0 != NULL) {
7587 if (!is_transmit)
7588 ifp->if_flags |= IFF_OACTIVE;
7589 txq->txq_flags |= WM_TXQ_NO_SPACE;
7590 WM_Q_EVCNT_INCR(txq, txdrop);
7591 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7592 __func__));
7593 m_freem(m0);
7594 }
7595
7596 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7597 /* No more slots; notify upper layer. */
7598 if (!is_transmit)
7599 ifp->if_flags |= IFF_OACTIVE;
7600 txq->txq_flags |= WM_TXQ_NO_SPACE;
7601 }
7602
7603 if (sent) {
7604 /* Set a watchdog timer in case the chip flakes out. */
7605 ifp->if_timer = 5;
7606 }
7607 }
7608
7609 static void
7610 wm_deferred_start_locked(struct wm_txqueue *txq)
7611 {
7612 struct wm_softc *sc = txq->txq_sc;
7613 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7614 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
7615 int qid = wmq->wmq_id;
7616
7617 KASSERT(mutex_owned(txq->txq_lock));
7618
7619 if (txq->txq_stopping) {
7620 mutex_exit(txq->txq_lock);
7621 return;
7622 }
7623
7624 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7625 /* XXX need for ALTQ or one CPU system */
7626 if (qid == 0)
7627 wm_nq_start_locked(ifp);
7628 wm_nq_transmit_locked(ifp, txq);
7629 } else {
7630 /* XXX need for ALTQ or one CPU system */
7631 if (qid == 0)
7632 wm_start_locked(ifp);
7633 wm_transmit_locked(ifp, txq);
7634 }
7635 }
7636
7637 /* Interrupt */
7638
7639 /*
7640 * wm_txeof:
7641 *
7642 * Helper; handle transmit interrupts.
7643 */
7644 static int
7645 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7646 {
7647 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7648 struct wm_txsoft *txs;
7649 bool processed = false;
7650 int count = 0;
7651 int i;
7652 uint8_t status;
7653 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
7654
7655 KASSERT(mutex_owned(txq->txq_lock));
7656
7657 if (txq->txq_stopping)
7658 return 0;
7659
7660 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7661 /* for ALTQ and legacy(not use multiqueue) ethernet controller */
7662 if (wmq->wmq_id == 0)
7663 ifp->if_flags &= ~IFF_OACTIVE;
7664
7665 /*
7666 * Go through the Tx list and free mbufs for those
7667 * frames which have been transmitted.
7668 */
7669 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7670 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7671 txs = &txq->txq_soft[i];
7672
7673 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7674 device_xname(sc->sc_dev), i));
7675
7676 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7677 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7678
7679 status =
7680 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7681 if ((status & WTX_ST_DD) == 0) {
7682 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7683 BUS_DMASYNC_PREREAD);
7684 break;
7685 }
7686
7687 processed = true;
7688 count++;
7689 DPRINTF(WM_DEBUG_TX,
7690 ("%s: TX: job %d done: descs %d..%d\n",
7691 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7692 txs->txs_lastdesc));
7693
7694 /*
7695 * XXX We should probably be using the statistics
7696 * XXX registers, but I don't know if they exist
7697 * XXX on chips before the i82544.
7698 */
7699
7700 #ifdef WM_EVENT_COUNTERS
7701 if (status & WTX_ST_TU)
7702 WM_Q_EVCNT_INCR(txq, tu);
7703 #endif /* WM_EVENT_COUNTERS */
7704
7705 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7706 ifp->if_oerrors++;
7707 if (status & WTX_ST_LC)
7708 log(LOG_WARNING, "%s: late collision\n",
7709 device_xname(sc->sc_dev));
7710 else if (status & WTX_ST_EC) {
7711 ifp->if_collisions += 16;
7712 log(LOG_WARNING, "%s: excessive collisions\n",
7713 device_xname(sc->sc_dev));
7714 }
7715 } else
7716 ifp->if_opackets++;
7717
7718 txq->txq_packets++;
7719 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
7720
7721 txq->txq_free += txs->txs_ndesc;
7722 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7723 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7724 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7725 m_freem(txs->txs_mbuf);
7726 txs->txs_mbuf = NULL;
7727 }
7728
7729 /* Update the dirty transmit buffer pointer. */
7730 txq->txq_sdirty = i;
7731 DPRINTF(WM_DEBUG_TX,
7732 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7733
7734 if (count != 0)
7735 rnd_add_uint32(&sc->rnd_source, count);
7736
7737 /*
7738 * If there are no more pending transmissions, cancel the watchdog
7739 * timer.
7740 */
7741 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7742 ifp->if_timer = 0;
7743
7744 return processed;
7745 }
7746
7747 static inline uint32_t
7748 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
7749 {
7750 struct wm_softc *sc = rxq->rxq_sc;
7751
7752 if (sc->sc_type == WM_T_82574)
7753 return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
7754 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7755 return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
7756 else
7757 return rxq->rxq_descs[idx].wrx_status;
7758 }
7759
7760 static inline uint32_t
7761 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
7762 {
7763 struct wm_softc *sc = rxq->rxq_sc;
7764
7765 if (sc->sc_type == WM_T_82574)
7766 return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
7767 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7768 return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
7769 else
7770 return rxq->rxq_descs[idx].wrx_errors;
7771 }
7772
7773 static inline uint16_t
7774 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
7775 {
7776 struct wm_softc *sc = rxq->rxq_sc;
7777
7778 if (sc->sc_type == WM_T_82574)
7779 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
7780 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7781 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
7782 else
7783 return rxq->rxq_descs[idx].wrx_special;
7784 }
7785
7786 static inline int
7787 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
7788 {
7789 struct wm_softc *sc = rxq->rxq_sc;
7790
7791 if (sc->sc_type == WM_T_82574)
7792 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
7793 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7794 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
7795 else
7796 return rxq->rxq_descs[idx].wrx_len;
7797 }
7798
7799 #ifdef WM_DEBUG
7800 static inline uint32_t
7801 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
7802 {
7803 struct wm_softc *sc = rxq->rxq_sc;
7804
7805 if (sc->sc_type == WM_T_82574)
7806 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
7807 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7808 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
7809 else
7810 return 0;
7811 }
7812
7813 static inline uint8_t
7814 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
7815 {
7816 struct wm_softc *sc = rxq->rxq_sc;
7817
7818 if (sc->sc_type == WM_T_82574)
7819 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
7820 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7821 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
7822 else
7823 return 0;
7824 }
7825 #endif /* WM_DEBUG */
7826
7827 static inline bool
7828 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
7829 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
7830 {
7831
7832 if (sc->sc_type == WM_T_82574)
7833 return (status & ext_bit) != 0;
7834 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7835 return (status & nq_bit) != 0;
7836 else
7837 return (status & legacy_bit) != 0;
7838 }
7839
7840 static inline bool
7841 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
7842 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
7843 {
7844
7845 if (sc->sc_type == WM_T_82574)
7846 return (error & ext_bit) != 0;
7847 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7848 return (error & nq_bit) != 0;
7849 else
7850 return (error & legacy_bit) != 0;
7851 }
7852
7853 static inline bool
7854 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
7855 {
7856
7857 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
7858 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
7859 return true;
7860 else
7861 return false;
7862 }
7863
7864 static inline bool
7865 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
7866 {
7867 struct wm_softc *sc = rxq->rxq_sc;
7868
7869 /* XXXX missing error bit for newqueue? */
7870 if (wm_rxdesc_is_set_error(sc, errors,
7871 WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
7872 EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
7873 NQRXC_ERROR_RXE)) {
7874 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
7875 log(LOG_WARNING, "%s: symbol error\n",
7876 device_xname(sc->sc_dev));
7877 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
7878 log(LOG_WARNING, "%s: receive sequence error\n",
7879 device_xname(sc->sc_dev));
7880 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
7881 log(LOG_WARNING, "%s: CRC error\n",
7882 device_xname(sc->sc_dev));
7883 return true;
7884 }
7885
7886 return false;
7887 }
7888
7889 static inline bool
7890 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
7891 {
7892 struct wm_softc *sc = rxq->rxq_sc;
7893
7894 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
7895 NQRXC_STATUS_DD)) {
7896 /* We have processed all of the receive descriptors. */
7897 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
7898 return false;
7899 }
7900
7901 return true;
7902 }
7903
7904 static inline bool
7905 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
7906 struct mbuf *m)
7907 {
7908 struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
7909
7910 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
7911 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
7912 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
7913 }
7914
7915 return true;
7916 }
7917
7918 static inline void
7919 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
7920 uint32_t errors, struct mbuf *m)
7921 {
7922 struct wm_softc *sc = rxq->rxq_sc;
7923
7924 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
7925 if (wm_rxdesc_is_set_status(sc, status,
7926 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
7927 WM_Q_EVCNT_INCR(rxq, rxipsum);
7928 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7929 if (wm_rxdesc_is_set_error(sc, errors,
7930 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
7931 m->m_pkthdr.csum_flags |=
7932 M_CSUM_IPv4_BAD;
7933 }
7934 if (wm_rxdesc_is_set_status(sc, status,
7935 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
7936 /*
7937 * Note: we don't know if this was TCP or UDP,
7938 * so we just set both bits, and expect the
7939 * upper layers to deal.
7940 */
7941 WM_Q_EVCNT_INCR(rxq, rxtusum);
7942 m->m_pkthdr.csum_flags |=
7943 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7944 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7945 if (wm_rxdesc_is_set_error(sc, errors,
7946 WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
7947 m->m_pkthdr.csum_flags |=
7948 M_CSUM_TCP_UDP_BAD;
7949 }
7950 }
7951 }
7952
7953 /*
7954 * wm_rxeof:
7955 *
7956 * Helper; handle receive interrupts.
7957 */
7958 static void
7959 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
7960 {
7961 struct wm_softc *sc = rxq->rxq_sc;
7962 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7963 struct wm_rxsoft *rxs;
7964 struct mbuf *m;
7965 int i, len;
7966 int count = 0;
7967 uint32_t status, errors;
7968 uint16_t vlantag;
7969
7970 KASSERT(mutex_owned(rxq->rxq_lock));
7971
7972 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7973 if (limit-- == 0) {
7974 rxq->rxq_ptr = i;
7975 break;
7976 }
7977
7978 rxs = &rxq->rxq_soft[i];
7979
7980 DPRINTF(WM_DEBUG_RX,
7981 ("%s: RX: checking descriptor %d\n",
7982 device_xname(sc->sc_dev), i));
7983 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7984
7985 status = wm_rxdesc_get_status(rxq, i);
7986 errors = wm_rxdesc_get_errors(rxq, i);
7987 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
7988 vlantag = wm_rxdesc_get_vlantag(rxq, i);
7989 #ifdef WM_DEBUG
7990 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
7991 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
7992 #endif
7993
7994 if (!wm_rxdesc_dd(rxq, i, status)) {
7995 /*
7996 * Update the receive pointer holding rxq_lock
7997 * consistent with increment counter.
7998 */
7999 rxq->rxq_ptr = i;
8000 break;
8001 }
8002
8003 count++;
8004 if (__predict_false(rxq->rxq_discard)) {
8005 DPRINTF(WM_DEBUG_RX,
8006 ("%s: RX: discarding contents of descriptor %d\n",
8007 device_xname(sc->sc_dev), i));
8008 wm_init_rxdesc(rxq, i);
8009 if (wm_rxdesc_is_eop(rxq, status)) {
8010 /* Reset our state. */
8011 DPRINTF(WM_DEBUG_RX,
8012 ("%s: RX: resetting rxdiscard -> 0\n",
8013 device_xname(sc->sc_dev)));
8014 rxq->rxq_discard = 0;
8015 }
8016 continue;
8017 }
8018
8019 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8020 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
8021
8022 m = rxs->rxs_mbuf;
8023
8024 /*
8025 * Add a new receive buffer to the ring, unless of
8026 * course the length is zero. Treat the latter as a
8027 * failed mapping.
8028 */
8029 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
8030 /*
8031 * Failed, throw away what we've done so
8032 * far, and discard the rest of the packet.
8033 */
8034 ifp->if_ierrors++;
8035 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8036 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
8037 wm_init_rxdesc(rxq, i);
8038 if (!wm_rxdesc_is_eop(rxq, status))
8039 rxq->rxq_discard = 1;
8040 if (rxq->rxq_head != NULL)
8041 m_freem(rxq->rxq_head);
8042 WM_RXCHAIN_RESET(rxq);
8043 DPRINTF(WM_DEBUG_RX,
8044 ("%s: RX: Rx buffer allocation failed, "
8045 "dropping packet%s\n", device_xname(sc->sc_dev),
8046 rxq->rxq_discard ? " (discard)" : ""));
8047 continue;
8048 }
8049
8050 m->m_len = len;
8051 rxq->rxq_len += len;
8052 DPRINTF(WM_DEBUG_RX,
8053 ("%s: RX: buffer at %p len %d\n",
8054 device_xname(sc->sc_dev), m->m_data, len));
8055
8056 /* If this is not the end of the packet, keep looking. */
8057 if (!wm_rxdesc_is_eop(rxq, status)) {
8058 WM_RXCHAIN_LINK(rxq, m);
8059 DPRINTF(WM_DEBUG_RX,
8060 ("%s: RX: not yet EOP, rxlen -> %d\n",
8061 device_xname(sc->sc_dev), rxq->rxq_len));
8062 continue;
8063 }
8064
8065 /*
8066 * Okay, we have the entire packet now. The chip is
8067 * configured to include the FCS except I350 and I21[01]
8068 * (not all chips can be configured to strip it),
8069 * so we need to trim it.
8070 * May need to adjust length of previous mbuf in the
8071 * chain if the current mbuf is too short.
8072 * For an eratta, the RCTL_SECRC bit in RCTL register
8073 * is always set in I350, so we don't trim it.
8074 */
8075 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
8076 && (sc->sc_type != WM_T_I210)
8077 && (sc->sc_type != WM_T_I211)) {
8078 if (m->m_len < ETHER_CRC_LEN) {
8079 rxq->rxq_tail->m_len
8080 -= (ETHER_CRC_LEN - m->m_len);
8081 m->m_len = 0;
8082 } else
8083 m->m_len -= ETHER_CRC_LEN;
8084 len = rxq->rxq_len - ETHER_CRC_LEN;
8085 } else
8086 len = rxq->rxq_len;
8087
8088 WM_RXCHAIN_LINK(rxq, m);
8089
8090 *rxq->rxq_tailp = NULL;
8091 m = rxq->rxq_head;
8092
8093 WM_RXCHAIN_RESET(rxq);
8094
8095 DPRINTF(WM_DEBUG_RX,
8096 ("%s: RX: have entire packet, len -> %d\n",
8097 device_xname(sc->sc_dev), len));
8098
8099 /* If an error occurred, update stats and drop the packet. */
8100 if (wm_rxdesc_has_errors(rxq, errors)) {
8101 m_freem(m);
8102 continue;
8103 }
8104
8105 /* No errors. Receive the packet. */
8106 m_set_rcvif(m, ifp);
8107 m->m_pkthdr.len = len;
8108 /*
8109 * TODO
8110 * should be save rsshash and rsstype to this mbuf.
8111 */
8112 DPRINTF(WM_DEBUG_RX,
8113 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
8114 device_xname(sc->sc_dev), rsstype, rsshash));
8115
8116 /*
8117 * If VLANs are enabled, VLAN packets have been unwrapped
8118 * for us. Associate the tag with the packet.
8119 */
8120 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
8121 continue;
8122
8123 /* Set up checksum info for this packet. */
8124 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
8125 /*
8126 * Update the receive pointer holding rxq_lock consistent with
8127 * increment counter.
8128 */
8129 rxq->rxq_ptr = i;
8130 rxq->rxq_packets++;
8131 rxq->rxq_bytes += len;
8132 mutex_exit(rxq->rxq_lock);
8133
8134 /* Pass it on. */
8135 if_percpuq_enqueue(sc->sc_ipq, m);
8136
8137 mutex_enter(rxq->rxq_lock);
8138
8139 if (rxq->rxq_stopping)
8140 break;
8141 }
8142
8143 if (count != 0)
8144 rnd_add_uint32(&sc->rnd_source, count);
8145
8146 DPRINTF(WM_DEBUG_RX,
8147 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
8148 }
8149
8150 /*
8151 * wm_linkintr_gmii:
8152 *
8153 * Helper; handle link interrupts for GMII.
8154 */
8155 static void
8156 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
8157 {
8158
8159 KASSERT(WM_CORE_LOCKED(sc));
8160
8161 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8162 __func__));
8163
8164 if (icr & ICR_LSC) {
8165 uint32_t reg;
8166 uint32_t status = CSR_READ(sc, WMREG_STATUS);
8167
8168 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
8169 wm_gig_downshift_workaround_ich8lan(sc);
8170
8171 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
8172 device_xname(sc->sc_dev)));
8173 mii_pollstat(&sc->sc_mii);
8174 if (sc->sc_type == WM_T_82543) {
8175 int miistatus, active;
8176
8177 /*
8178 * With 82543, we need to force speed and
8179 * duplex on the MAC equal to what the PHY
8180 * speed and duplex configuration is.
8181 */
8182 miistatus = sc->sc_mii.mii_media_status;
8183
8184 if (miistatus & IFM_ACTIVE) {
8185 active = sc->sc_mii.mii_media_active;
8186 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8187 switch (IFM_SUBTYPE(active)) {
8188 case IFM_10_T:
8189 sc->sc_ctrl |= CTRL_SPEED_10;
8190 break;
8191 case IFM_100_TX:
8192 sc->sc_ctrl |= CTRL_SPEED_100;
8193 break;
8194 case IFM_1000_T:
8195 sc->sc_ctrl |= CTRL_SPEED_1000;
8196 break;
8197 default:
8198 /*
8199 * fiber?
8200 * Shoud not enter here.
8201 */
8202 printf("unknown media (%x)\n", active);
8203 break;
8204 }
8205 if (active & IFM_FDX)
8206 sc->sc_ctrl |= CTRL_FD;
8207 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8208 }
8209 } else if ((sc->sc_type == WM_T_ICH8)
8210 && (sc->sc_phytype == WMPHY_IGP_3)) {
8211 wm_kmrn_lock_loss_workaround_ich8lan(sc);
8212 } else if (sc->sc_type == WM_T_PCH) {
8213 wm_k1_gig_workaround_hv(sc,
8214 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
8215 }
8216
8217 if ((sc->sc_phytype == WMPHY_82578)
8218 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
8219 == IFM_1000_T)) {
8220
8221 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
8222 delay(200*1000); /* XXX too big */
8223
8224 /* Link stall fix for link up */
8225 wm_gmii_hv_writereg(sc->sc_dev, 1,
8226 HV_MUX_DATA_CTRL,
8227 HV_MUX_DATA_CTRL_GEN_TO_MAC
8228 | HV_MUX_DATA_CTRL_FORCE_SPEED);
8229 wm_gmii_hv_writereg(sc->sc_dev, 1,
8230 HV_MUX_DATA_CTRL,
8231 HV_MUX_DATA_CTRL_GEN_TO_MAC);
8232 }
8233 }
8234 /*
8235 * I217 Packet Loss issue:
8236 * ensure that FEXTNVM4 Beacon Duration is set correctly
8237 * on power up.
8238 * Set the Beacon Duration for I217 to 8 usec
8239 */
8240 if ((sc->sc_type == WM_T_PCH_LPT)
8241 || (sc->sc_type == WM_T_PCH_SPT)) {
8242 reg = CSR_READ(sc, WMREG_FEXTNVM4);
8243 reg &= ~FEXTNVM4_BEACON_DURATION;
8244 reg |= FEXTNVM4_BEACON_DURATION_8US;
8245 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
8246 }
8247
8248 /* XXX Work-around I218 hang issue */
8249 /* e1000_k1_workaround_lpt_lp() */
8250
8251 if ((sc->sc_type == WM_T_PCH_LPT)
8252 || (sc->sc_type == WM_T_PCH_SPT)) {
8253 /*
8254 * Set platform power management values for Latency
8255 * Tolerance Reporting (LTR)
8256 */
8257 wm_platform_pm_pch_lpt(sc,
8258 ((sc->sc_mii.mii_media_status & IFM_ACTIVE)
8259 != 0));
8260 }
8261
8262 /* FEXTNVM6 K1-off workaround */
8263 if (sc->sc_type == WM_T_PCH_SPT) {
8264 reg = CSR_READ(sc, WMREG_FEXTNVM6);
8265 if (CSR_READ(sc, WMREG_PCIEANACFG)
8266 & FEXTNVM6_K1_OFF_ENABLE)
8267 reg |= FEXTNVM6_K1_OFF_ENABLE;
8268 else
8269 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
8270 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
8271 }
8272 } else if (icr & ICR_RXSEQ) {
8273 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
8274 device_xname(sc->sc_dev)));
8275 }
8276 }
8277
8278 /*
8279 * wm_linkintr_tbi:
8280 *
8281 * Helper; handle link interrupts for TBI mode.
8282 */
8283 static void
8284 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
8285 {
8286 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8287 uint32_t status;
8288
8289 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8290 __func__));
8291
8292 status = CSR_READ(sc, WMREG_STATUS);
8293 if (icr & ICR_LSC) {
8294 if (status & STATUS_LU) {
8295 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8296 device_xname(sc->sc_dev),
8297 (status & STATUS_FD) ? "FDX" : "HDX"));
8298 /*
8299 * NOTE: CTRL will update TFCE and RFCE automatically,
8300 * so we should update sc->sc_ctrl
8301 */
8302
8303 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8304 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8305 sc->sc_fcrtl &= ~FCRTL_XONE;
8306 if (status & STATUS_FD)
8307 sc->sc_tctl |=
8308 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8309 else
8310 sc->sc_tctl |=
8311 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8312 if (sc->sc_ctrl & CTRL_TFCE)
8313 sc->sc_fcrtl |= FCRTL_XONE;
8314 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8315 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8316 WMREG_OLD_FCRTL : WMREG_FCRTL,
8317 sc->sc_fcrtl);
8318 sc->sc_tbi_linkup = 1;
8319 if_link_state_change(ifp, LINK_STATE_UP);
8320 } else {
8321 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8322 device_xname(sc->sc_dev)));
8323 sc->sc_tbi_linkup = 0;
8324 if_link_state_change(ifp, LINK_STATE_DOWN);
8325 }
8326 /* Update LED */
8327 wm_tbi_serdes_set_linkled(sc);
8328 } else if (icr & ICR_RXSEQ) {
8329 DPRINTF(WM_DEBUG_LINK,
8330 ("%s: LINK: Receive sequence error\n",
8331 device_xname(sc->sc_dev)));
8332 }
8333 }
8334
8335 /*
8336 * wm_linkintr_serdes:
8337 *
8338 * Helper; handle link interrupts for TBI mode.
8339 */
8340 static void
8341 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
8342 {
8343 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8344 struct mii_data *mii = &sc->sc_mii;
8345 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8346 uint32_t pcs_adv, pcs_lpab, reg;
8347
8348 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8349 __func__));
8350
8351 if (icr & ICR_LSC) {
8352 /* Check PCS */
8353 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8354 if ((reg & PCS_LSTS_LINKOK) != 0) {
8355 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
8356 device_xname(sc->sc_dev)));
8357 mii->mii_media_status |= IFM_ACTIVE;
8358 sc->sc_tbi_linkup = 1;
8359 if_link_state_change(ifp, LINK_STATE_UP);
8360 } else {
8361 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8362 device_xname(sc->sc_dev)));
8363 mii->mii_media_status |= IFM_NONE;
8364 sc->sc_tbi_linkup = 0;
8365 if_link_state_change(ifp, LINK_STATE_DOWN);
8366 wm_tbi_serdes_set_linkled(sc);
8367 return;
8368 }
8369 mii->mii_media_active |= IFM_1000_SX;
8370 if ((reg & PCS_LSTS_FDX) != 0)
8371 mii->mii_media_active |= IFM_FDX;
8372 else
8373 mii->mii_media_active |= IFM_HDX;
8374 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8375 /* Check flow */
8376 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8377 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8378 DPRINTF(WM_DEBUG_LINK,
8379 ("XXX LINKOK but not ACOMP\n"));
8380 return;
8381 }
8382 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8383 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8384 DPRINTF(WM_DEBUG_LINK,
8385 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
8386 if ((pcs_adv & TXCW_SYM_PAUSE)
8387 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8388 mii->mii_media_active |= IFM_FLOW
8389 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8390 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8391 && (pcs_adv & TXCW_ASYM_PAUSE)
8392 && (pcs_lpab & TXCW_SYM_PAUSE)
8393 && (pcs_lpab & TXCW_ASYM_PAUSE))
8394 mii->mii_media_active |= IFM_FLOW
8395 | IFM_ETH_TXPAUSE;
8396 else if ((pcs_adv & TXCW_SYM_PAUSE)
8397 && (pcs_adv & TXCW_ASYM_PAUSE)
8398 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8399 && (pcs_lpab & TXCW_ASYM_PAUSE))
8400 mii->mii_media_active |= IFM_FLOW
8401 | IFM_ETH_RXPAUSE;
8402 }
8403 /* Update LED */
8404 wm_tbi_serdes_set_linkled(sc);
8405 } else {
8406 DPRINTF(WM_DEBUG_LINK,
8407 ("%s: LINK: Receive sequence error\n",
8408 device_xname(sc->sc_dev)));
8409 }
8410 }
8411
8412 /*
8413 * wm_linkintr:
8414 *
8415 * Helper; handle link interrupts.
8416 */
8417 static void
8418 wm_linkintr(struct wm_softc *sc, uint32_t icr)
8419 {
8420
8421 KASSERT(WM_CORE_LOCKED(sc));
8422
8423 if (sc->sc_flags & WM_F_HAS_MII)
8424 wm_linkintr_gmii(sc, icr);
8425 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8426 && (sc->sc_type >= WM_T_82575))
8427 wm_linkintr_serdes(sc, icr);
8428 else
8429 wm_linkintr_tbi(sc, icr);
8430 }
8431
8432 /*
8433 * wm_intr_legacy:
8434 *
8435 * Interrupt service routine for INTx and MSI.
8436 */
8437 static int
8438 wm_intr_legacy(void *arg)
8439 {
8440 struct wm_softc *sc = arg;
8441 struct wm_queue *wmq = &sc->sc_queue[0];
8442 struct wm_txqueue *txq = &wmq->wmq_txq;
8443 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8444 uint32_t icr, rndval = 0;
8445 int handled = 0;
8446
8447 DPRINTF(WM_DEBUG_TX,
8448 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
8449 while (1 /* CONSTCOND */) {
8450 icr = CSR_READ(sc, WMREG_ICR);
8451 if ((icr & sc->sc_icr) == 0)
8452 break;
8453 if (rndval == 0)
8454 rndval = icr;
8455
8456 mutex_enter(rxq->rxq_lock);
8457
8458 if (rxq->rxq_stopping) {
8459 mutex_exit(rxq->rxq_lock);
8460 break;
8461 }
8462
8463 handled = 1;
8464
8465 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8466 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
8467 DPRINTF(WM_DEBUG_RX,
8468 ("%s: RX: got Rx intr 0x%08x\n",
8469 device_xname(sc->sc_dev),
8470 icr & (ICR_RXDMT0 | ICR_RXT0)));
8471 WM_Q_EVCNT_INCR(rxq, rxintr);
8472 }
8473 #endif
8474 wm_rxeof(rxq, UINT_MAX);
8475
8476 mutex_exit(rxq->rxq_lock);
8477 mutex_enter(txq->txq_lock);
8478
8479 if (txq->txq_stopping) {
8480 mutex_exit(txq->txq_lock);
8481 break;
8482 }
8483
8484 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8485 if (icr & ICR_TXDW) {
8486 DPRINTF(WM_DEBUG_TX,
8487 ("%s: TX: got TXDW interrupt\n",
8488 device_xname(sc->sc_dev)));
8489 WM_Q_EVCNT_INCR(txq, txdw);
8490 }
8491 #endif
8492 wm_txeof(sc, txq);
8493
8494 mutex_exit(txq->txq_lock);
8495 WM_CORE_LOCK(sc);
8496
8497 if (sc->sc_core_stopping) {
8498 WM_CORE_UNLOCK(sc);
8499 break;
8500 }
8501
8502 if (icr & (ICR_LSC | ICR_RXSEQ)) {
8503 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8504 wm_linkintr(sc, icr);
8505 }
8506
8507 WM_CORE_UNLOCK(sc);
8508
8509 if (icr & ICR_RXO) {
8510 #if defined(WM_DEBUG)
8511 log(LOG_WARNING, "%s: Receive overrun\n",
8512 device_xname(sc->sc_dev));
8513 #endif /* defined(WM_DEBUG) */
8514 }
8515 }
8516
8517 rnd_add_uint32(&sc->rnd_source, rndval);
8518
8519 if (handled) {
8520 /* Try to get more packets going. */
8521 softint_schedule(wmq->wmq_si);
8522 }
8523
8524 return handled;
8525 }
8526
8527 static inline void
8528 wm_txrxintr_disable(struct wm_queue *wmq)
8529 {
8530 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
8531
8532 if (sc->sc_type == WM_T_82574)
8533 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8534 else if (sc->sc_type == WM_T_82575)
8535 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8536 else
8537 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
8538 }
8539
8540 static inline void
8541 wm_txrxintr_enable(struct wm_queue *wmq)
8542 {
8543 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
8544
8545 wm_itrs_calculate(sc, wmq);
8546
8547 if (sc->sc_type == WM_T_82574)
8548 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8549 else if (sc->sc_type == WM_T_82575)
8550 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8551 else
8552 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
8553 }
8554
8555 static int
8556 wm_txrxintr_msix(void *arg)
8557 {
8558 struct wm_queue *wmq = arg;
8559 struct wm_txqueue *txq = &wmq->wmq_txq;
8560 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8561 struct wm_softc *sc = txq->txq_sc;
8562 u_int limit = sc->sc_rx_intr_process_limit;
8563
8564 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
8565
8566 DPRINTF(WM_DEBUG_TX,
8567 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
8568
8569 wm_txrxintr_disable(wmq);
8570
8571 mutex_enter(txq->txq_lock);
8572
8573 if (txq->txq_stopping) {
8574 mutex_exit(txq->txq_lock);
8575 return 0;
8576 }
8577
8578 WM_Q_EVCNT_INCR(txq, txdw);
8579 wm_txeof(sc, txq);
8580 /* wm_deferred start() is done in wm_handle_queue(). */
8581 mutex_exit(txq->txq_lock);
8582
8583 DPRINTF(WM_DEBUG_RX,
8584 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
8585 mutex_enter(rxq->rxq_lock);
8586
8587 if (rxq->rxq_stopping) {
8588 mutex_exit(rxq->rxq_lock);
8589 return 0;
8590 }
8591
8592 WM_Q_EVCNT_INCR(rxq, rxintr);
8593 wm_rxeof(rxq, limit);
8594 mutex_exit(rxq->rxq_lock);
8595
8596 wm_itrs_writereg(sc, wmq);
8597
8598 softint_schedule(wmq->wmq_si);
8599
8600 return 1;
8601 }
8602
8603 static void
8604 wm_handle_queue(void *arg)
8605 {
8606 struct wm_queue *wmq = arg;
8607 struct wm_txqueue *txq = &wmq->wmq_txq;
8608 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8609 struct wm_softc *sc = txq->txq_sc;
8610 u_int limit = sc->sc_rx_process_limit;
8611
8612 mutex_enter(txq->txq_lock);
8613 if (txq->txq_stopping) {
8614 mutex_exit(txq->txq_lock);
8615 return;
8616 }
8617 wm_txeof(sc, txq);
8618 wm_deferred_start_locked(txq);
8619 mutex_exit(txq->txq_lock);
8620
8621 mutex_enter(rxq->rxq_lock);
8622 if (rxq->rxq_stopping) {
8623 mutex_exit(rxq->rxq_lock);
8624 return;
8625 }
8626 WM_Q_EVCNT_INCR(rxq, rxintr);
8627 wm_rxeof(rxq, limit);
8628 mutex_exit(rxq->rxq_lock);
8629
8630 wm_txrxintr_enable(wmq);
8631 }
8632
8633 /*
8634 * wm_linkintr_msix:
8635 *
8636 * Interrupt service routine for link status change for MSI-X.
8637 */
8638 static int
8639 wm_linkintr_msix(void *arg)
8640 {
8641 struct wm_softc *sc = arg;
8642 uint32_t reg;
8643
8644 DPRINTF(WM_DEBUG_LINK,
8645 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
8646
8647 reg = CSR_READ(sc, WMREG_ICR);
8648 WM_CORE_LOCK(sc);
8649 if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
8650 goto out;
8651
8652 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8653 wm_linkintr(sc, ICR_LSC);
8654
8655 out:
8656 WM_CORE_UNLOCK(sc);
8657
8658 if (sc->sc_type == WM_T_82574)
8659 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
8660 else if (sc->sc_type == WM_T_82575)
8661 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
8662 else
8663 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
8664
8665 return 1;
8666 }
8667
8668 /*
8669 * Media related.
8670 * GMII, SGMII, TBI (and SERDES)
8671 */
8672
8673 /* Common */
8674
8675 /*
8676 * wm_tbi_serdes_set_linkled:
8677 *
8678 * Update the link LED on TBI and SERDES devices.
8679 */
8680 static void
8681 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
8682 {
8683
8684 if (sc->sc_tbi_linkup)
8685 sc->sc_ctrl |= CTRL_SWDPIN(0);
8686 else
8687 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
8688
8689 /* 82540 or newer devices are active low */
8690 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
8691
8692 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8693 }
8694
8695 /* GMII related */
8696
8697 /*
8698 * wm_gmii_reset:
8699 *
8700 * Reset the PHY.
8701 */
8702 static void
8703 wm_gmii_reset(struct wm_softc *sc)
8704 {
8705 uint32_t reg;
8706 int rv;
8707
8708 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
8709 device_xname(sc->sc_dev), __func__));
8710
8711 rv = sc->phy.acquire(sc);
8712 if (rv != 0) {
8713 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8714 __func__);
8715 return;
8716 }
8717
8718 switch (sc->sc_type) {
8719 case WM_T_82542_2_0:
8720 case WM_T_82542_2_1:
8721 /* null */
8722 break;
8723 case WM_T_82543:
8724 /*
8725 * With 82543, we need to force speed and duplex on the MAC
8726 * equal to what the PHY speed and duplex configuration is.
8727 * In addition, we need to perform a hardware reset on the PHY
8728 * to take it out of reset.
8729 */
8730 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8731 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8732
8733 /* The PHY reset pin is active-low. */
8734 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8735 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
8736 CTRL_EXT_SWDPIN(4));
8737 reg |= CTRL_EXT_SWDPIO(4);
8738
8739 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8740 CSR_WRITE_FLUSH(sc);
8741 delay(10*1000);
8742
8743 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
8744 CSR_WRITE_FLUSH(sc);
8745 delay(150);
8746 #if 0
8747 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
8748 #endif
8749 delay(20*1000); /* XXX extra delay to get PHY ID? */
8750 break;
8751 case WM_T_82544: /* reset 10000us */
8752 case WM_T_82540:
8753 case WM_T_82545:
8754 case WM_T_82545_3:
8755 case WM_T_82546:
8756 case WM_T_82546_3:
8757 case WM_T_82541:
8758 case WM_T_82541_2:
8759 case WM_T_82547:
8760 case WM_T_82547_2:
8761 case WM_T_82571: /* reset 100us */
8762 case WM_T_82572:
8763 case WM_T_82573:
8764 case WM_T_82574:
8765 case WM_T_82575:
8766 case WM_T_82576:
8767 case WM_T_82580:
8768 case WM_T_I350:
8769 case WM_T_I354:
8770 case WM_T_I210:
8771 case WM_T_I211:
8772 case WM_T_82583:
8773 case WM_T_80003:
8774 /* generic reset */
8775 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8776 CSR_WRITE_FLUSH(sc);
8777 delay(20000);
8778 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8779 CSR_WRITE_FLUSH(sc);
8780 delay(20000);
8781
8782 if ((sc->sc_type == WM_T_82541)
8783 || (sc->sc_type == WM_T_82541_2)
8784 || (sc->sc_type == WM_T_82547)
8785 || (sc->sc_type == WM_T_82547_2)) {
8786 /* workaround for igp are done in igp_reset() */
8787 /* XXX add code to set LED after phy reset */
8788 }
8789 break;
8790 case WM_T_ICH8:
8791 case WM_T_ICH9:
8792 case WM_T_ICH10:
8793 case WM_T_PCH:
8794 case WM_T_PCH2:
8795 case WM_T_PCH_LPT:
8796 case WM_T_PCH_SPT:
8797 /* generic reset */
8798 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8799 CSR_WRITE_FLUSH(sc);
8800 delay(100);
8801 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8802 CSR_WRITE_FLUSH(sc);
8803 delay(150);
8804 break;
8805 default:
8806 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8807 __func__);
8808 break;
8809 }
8810
8811 sc->phy.release(sc);
8812
8813 /* get_cfg_done */
8814 wm_get_cfg_done(sc);
8815
8816 /* extra setup */
8817 switch (sc->sc_type) {
8818 case WM_T_82542_2_0:
8819 case WM_T_82542_2_1:
8820 case WM_T_82543:
8821 case WM_T_82544:
8822 case WM_T_82540:
8823 case WM_T_82545:
8824 case WM_T_82545_3:
8825 case WM_T_82546:
8826 case WM_T_82546_3:
8827 case WM_T_82541_2:
8828 case WM_T_82547_2:
8829 case WM_T_82571:
8830 case WM_T_82572:
8831 case WM_T_82573:
8832 case WM_T_82575:
8833 case WM_T_82576:
8834 case WM_T_82580:
8835 case WM_T_I350:
8836 case WM_T_I354:
8837 case WM_T_I210:
8838 case WM_T_I211:
8839 case WM_T_80003:
8840 /* null */
8841 break;
8842 case WM_T_82574:
8843 case WM_T_82583:
8844 wm_lplu_d0_disable(sc);
8845 break;
8846 case WM_T_82541:
8847 case WM_T_82547:
8848 /* XXX Configure actively LED after PHY reset */
8849 break;
8850 case WM_T_ICH8:
8851 case WM_T_ICH9:
8852 case WM_T_ICH10:
8853 case WM_T_PCH:
8854 case WM_T_PCH2:
8855 case WM_T_PCH_LPT:
8856 case WM_T_PCH_SPT:
8857 /* Allow time for h/w to get to a quiescent state afer reset */
8858 delay(10*1000);
8859
8860 if (sc->sc_type == WM_T_PCH)
8861 wm_hv_phy_workaround_ich8lan(sc);
8862
8863 if (sc->sc_type == WM_T_PCH2)
8864 wm_lv_phy_workaround_ich8lan(sc);
8865
8866 /* Clear the host wakeup bit after lcd reset */
8867 if (sc->sc_type >= WM_T_PCH) {
8868 reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
8869 BM_PORT_GEN_CFG);
8870 reg &= ~BM_WUC_HOST_WU_BIT;
8871 wm_gmii_hv_writereg(sc->sc_dev, 2,
8872 BM_PORT_GEN_CFG, reg);
8873 }
8874
8875 /*
8876 * XXX Configure the LCD with th extended configuration region
8877 * in NVM
8878 */
8879
8880 /* Disable D0 LPLU. */
8881 if (sc->sc_type >= WM_T_PCH) /* PCH* */
8882 wm_lplu_d0_disable_pch(sc);
8883 else
8884 wm_lplu_d0_disable(sc); /* ICH* */
8885 break;
8886 default:
8887 panic("%s: unknown type\n", __func__);
8888 break;
8889 }
8890 }
8891
8892 /*
8893 * Setup sc_phytype and mii_{read|write}reg.
8894 *
8895 * To identify PHY type, correct read/write function should be selected.
8896 * To select correct read/write function, PCI ID or MAC type are required
8897 * without accessing PHY registers.
8898 *
8899 * On the first call of this function, PHY ID is not known yet. Check
8900 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
8901 * result might be incorrect.
8902 *
8903 * In the second call, PHY OUI and model is used to identify PHY type.
8904 * It might not be perfpect because of the lack of compared entry, but it
8905 * would be better than the first call.
8906 *
8907 * If the detected new result and previous assumption is different,
8908 * diagnous message will be printed.
8909 */
8910 static void
8911 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
8912 uint16_t phy_model)
8913 {
8914 device_t dev = sc->sc_dev;
8915 struct mii_data *mii = &sc->sc_mii;
8916 uint16_t new_phytype = WMPHY_UNKNOWN;
8917 uint16_t doubt_phytype = WMPHY_UNKNOWN;
8918 mii_readreg_t new_readreg;
8919 mii_writereg_t new_writereg;
8920
8921 if (mii->mii_readreg == NULL) {
8922 /*
8923 * This is the first call of this function. For ICH and PCH
8924 * variants, it's difficult to determine the PHY access method
8925 * by sc_type, so use the PCI product ID for some devices.
8926 */
8927
8928 switch (sc->sc_pcidevid) {
8929 case PCI_PRODUCT_INTEL_PCH_M_LM:
8930 case PCI_PRODUCT_INTEL_PCH_M_LC:
8931 /* 82577 */
8932 new_phytype = WMPHY_82577;
8933 break;
8934 case PCI_PRODUCT_INTEL_PCH_D_DM:
8935 case PCI_PRODUCT_INTEL_PCH_D_DC:
8936 /* 82578 */
8937 new_phytype = WMPHY_82578;
8938 break;
8939 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8940 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8941 /* 82579 */
8942 new_phytype = WMPHY_82579;
8943 break;
8944 case PCI_PRODUCT_INTEL_82801H_82567V_3:
8945 case PCI_PRODUCT_INTEL_82801I_BM:
8946 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
8947 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8948 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8949 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8950 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8951 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8952 /* ICH8, 9, 10 with 82567 */
8953 new_phytype = WMPHY_BM;
8954 break;
8955 default:
8956 break;
8957 }
8958 } else {
8959 /* It's not the first call. Use PHY OUI and model */
8960 switch (phy_oui) {
8961 case MII_OUI_ATHEROS: /* XXX ??? */
8962 switch (phy_model) {
8963 case 0x0004: /* XXX */
8964 new_phytype = WMPHY_82578;
8965 break;
8966 default:
8967 break;
8968 }
8969 break;
8970 case MII_OUI_xxMARVELL:
8971 switch (phy_model) {
8972 case MII_MODEL_xxMARVELL_I210:
8973 new_phytype = WMPHY_I210;
8974 break;
8975 case MII_MODEL_xxMARVELL_E1011:
8976 case MII_MODEL_xxMARVELL_E1000_3:
8977 case MII_MODEL_xxMARVELL_E1000_5:
8978 case MII_MODEL_xxMARVELL_E1112:
8979 new_phytype = WMPHY_M88;
8980 break;
8981 case MII_MODEL_xxMARVELL_E1149:
8982 new_phytype = WMPHY_BM;
8983 break;
8984 case MII_MODEL_xxMARVELL_E1111:
8985 case MII_MODEL_xxMARVELL_I347:
8986 case MII_MODEL_xxMARVELL_E1512:
8987 case MII_MODEL_xxMARVELL_E1340M:
8988 case MII_MODEL_xxMARVELL_E1543:
8989 new_phytype = WMPHY_M88;
8990 break;
8991 case MII_MODEL_xxMARVELL_I82563:
8992 new_phytype = WMPHY_GG82563;
8993 break;
8994 default:
8995 break;
8996 }
8997 break;
8998 case MII_OUI_INTEL:
8999 switch (phy_model) {
9000 case MII_MODEL_INTEL_I82577:
9001 new_phytype = WMPHY_82577;
9002 break;
9003 case MII_MODEL_INTEL_I82579:
9004 new_phytype = WMPHY_82579;
9005 break;
9006 case MII_MODEL_INTEL_I217:
9007 new_phytype = WMPHY_I217;
9008 break;
9009 case MII_MODEL_INTEL_I82580:
9010 case MII_MODEL_INTEL_I350:
9011 new_phytype = WMPHY_82580;
9012 break;
9013 default:
9014 break;
9015 }
9016 break;
9017 case MII_OUI_yyINTEL:
9018 switch (phy_model) {
9019 case MII_MODEL_yyINTEL_I82562G:
9020 case MII_MODEL_yyINTEL_I82562EM:
9021 case MII_MODEL_yyINTEL_I82562ET:
9022 new_phytype = WMPHY_IFE;
9023 break;
9024 case MII_MODEL_yyINTEL_IGP01E1000:
9025 new_phytype = WMPHY_IGP;
9026 break;
9027 case MII_MODEL_yyINTEL_I82566:
9028 new_phytype = WMPHY_IGP_3;
9029 break;
9030 default:
9031 break;
9032 }
9033 break;
9034 default:
9035 break;
9036 }
9037 if (new_phytype == WMPHY_UNKNOWN)
9038 aprint_verbose_dev(dev, "%s: unknown PHY model\n",
9039 __func__);
9040
9041 if ((sc->sc_phytype != WMPHY_UNKNOWN)
9042 && (sc->sc_phytype != new_phytype )) {
9043 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9044 "was incorrect. PHY type from PHY ID = %u\n",
9045 sc->sc_phytype, new_phytype);
9046 }
9047 }
9048
9049 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
9050 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
9051 /* SGMII */
9052 new_readreg = wm_sgmii_readreg;
9053 new_writereg = wm_sgmii_writereg;
9054 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9055 /* BM2 (phyaddr == 1) */
9056 if ((sc->sc_phytype != WMPHY_UNKNOWN)
9057 && (new_phytype != WMPHY_BM)
9058 && (new_phytype != WMPHY_UNKNOWN))
9059 doubt_phytype = new_phytype;
9060 new_phytype = WMPHY_BM;
9061 new_readreg = wm_gmii_bm_readreg;
9062 new_writereg = wm_gmii_bm_writereg;
9063 } else if (sc->sc_type >= WM_T_PCH) {
9064 /* All PCH* use _hv_ */
9065 new_readreg = wm_gmii_hv_readreg;
9066 new_writereg = wm_gmii_hv_writereg;
9067 } else if (sc->sc_type >= WM_T_ICH8) {
9068 /* non-82567 ICH8, 9 and 10 */
9069 new_readreg = wm_gmii_i82544_readreg;
9070 new_writereg = wm_gmii_i82544_writereg;
9071 } else if (sc->sc_type >= WM_T_80003) {
9072 /* 80003 */
9073 if ((sc->sc_phytype != WMPHY_UNKNOWN)
9074 && (new_phytype != WMPHY_GG82563)
9075 && (new_phytype != WMPHY_UNKNOWN))
9076 doubt_phytype = new_phytype;
9077 new_phytype = WMPHY_GG82563;
9078 new_readreg = wm_gmii_i80003_readreg;
9079 new_writereg = wm_gmii_i80003_writereg;
9080 } else if (sc->sc_type >= WM_T_I210) {
9081 /* I210 and I211 */
9082 if ((sc->sc_phytype != WMPHY_UNKNOWN)
9083 && (new_phytype != WMPHY_I210)
9084 && (new_phytype != WMPHY_UNKNOWN))
9085 doubt_phytype = new_phytype;
9086 new_phytype = WMPHY_I210;
9087 new_readreg = wm_gmii_gs40g_readreg;
9088 new_writereg = wm_gmii_gs40g_writereg;
9089 } else if (sc->sc_type >= WM_T_82580) {
9090 /* 82580, I350 and I354 */
9091 new_readreg = wm_gmii_82580_readreg;
9092 new_writereg = wm_gmii_82580_writereg;
9093 } else if (sc->sc_type >= WM_T_82544) {
9094 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
9095 new_readreg = wm_gmii_i82544_readreg;
9096 new_writereg = wm_gmii_i82544_writereg;
9097 } else {
9098 new_readreg = wm_gmii_i82543_readreg;
9099 new_writereg = wm_gmii_i82543_writereg;
9100 }
9101
9102 if (new_phytype == WMPHY_BM) {
9103 /* All BM use _bm_ */
9104 new_readreg = wm_gmii_bm_readreg;
9105 new_writereg = wm_gmii_bm_writereg;
9106 }
9107 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
9108 /* All PCH* use _hv_ */
9109 new_readreg = wm_gmii_hv_readreg;
9110 new_writereg = wm_gmii_hv_writereg;
9111 }
9112
9113 /* Diag output */
9114 if (doubt_phytype != WMPHY_UNKNOWN)
9115 aprint_error_dev(dev, "Assumed new PHY type was "
9116 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
9117 new_phytype);
9118 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
9119 && (sc->sc_phytype != new_phytype ))
9120 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9121 "was incorrect. New PHY type = %u\n",
9122 sc->sc_phytype, new_phytype);
9123
9124 if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
9125 aprint_error_dev(dev, "PHY type is still unknown.\n");
9126
9127 if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
9128 aprint_error_dev(dev, "Previously assumed PHY read/write "
9129 "function was incorrect.\n");
9130
9131 /* Update now */
9132 sc->sc_phytype = new_phytype;
9133 mii->mii_readreg = new_readreg;
9134 mii->mii_writereg = new_writereg;
9135 }
9136
9137 /*
9138 * wm_get_phy_id_82575:
9139 *
9140 * Return PHY ID. Return -1 if it failed.
9141 */
9142 static int
9143 wm_get_phy_id_82575(struct wm_softc *sc)
9144 {
9145 uint32_t reg;
9146 int phyid = -1;
9147
9148 /* XXX */
9149 if ((sc->sc_flags & WM_F_SGMII) == 0)
9150 return -1;
9151
9152 if (wm_sgmii_uses_mdio(sc)) {
9153 switch (sc->sc_type) {
9154 case WM_T_82575:
9155 case WM_T_82576:
9156 reg = CSR_READ(sc, WMREG_MDIC);
9157 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
9158 break;
9159 case WM_T_82580:
9160 case WM_T_I350:
9161 case WM_T_I354:
9162 case WM_T_I210:
9163 case WM_T_I211:
9164 reg = CSR_READ(sc, WMREG_MDICNFG);
9165 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
9166 break;
9167 default:
9168 return -1;
9169 }
9170 }
9171
9172 return phyid;
9173 }
9174
9175
9176 /*
9177 * wm_gmii_mediainit:
9178 *
9179 * Initialize media for use on 1000BASE-T devices.
9180 */
9181 static void
9182 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
9183 {
9184 device_t dev = sc->sc_dev;
9185 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9186 struct mii_data *mii = &sc->sc_mii;
9187 uint32_t reg;
9188
9189 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9190 device_xname(sc->sc_dev), __func__));
9191
9192 /* We have GMII. */
9193 sc->sc_flags |= WM_F_HAS_MII;
9194
9195 if (sc->sc_type == WM_T_80003)
9196 sc->sc_tipg = TIPG_1000T_80003_DFLT;
9197 else
9198 sc->sc_tipg = TIPG_1000T_DFLT;
9199
9200 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
9201 if ((sc->sc_type == WM_T_82580)
9202 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
9203 || (sc->sc_type == WM_T_I211)) {
9204 reg = CSR_READ(sc, WMREG_PHPM);
9205 reg &= ~PHPM_GO_LINK_D;
9206 CSR_WRITE(sc, WMREG_PHPM, reg);
9207 }
9208
9209 /*
9210 * Let the chip set speed/duplex on its own based on
9211 * signals from the PHY.
9212 * XXXbouyer - I'm not sure this is right for the 80003,
9213 * the em driver only sets CTRL_SLU here - but it seems to work.
9214 */
9215 sc->sc_ctrl |= CTRL_SLU;
9216 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9217
9218 /* Initialize our media structures and probe the GMII. */
9219 mii->mii_ifp = ifp;
9220
9221 /*
9222 * The first call of wm_mii_setup_phytype. The result might be
9223 * incorrect.
9224 */
9225 wm_gmii_setup_phytype(sc, 0, 0);
9226
9227 mii->mii_statchg = wm_gmii_statchg;
9228
9229 /* get PHY control from SMBus to PCIe */
9230 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
9231 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
9232 wm_smbustopci(sc);
9233
9234 wm_gmii_reset(sc);
9235
9236 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9237 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
9238 wm_gmii_mediastatus);
9239
9240 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
9241 || (sc->sc_type == WM_T_82580)
9242 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
9243 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
9244 if ((sc->sc_flags & WM_F_SGMII) == 0) {
9245 /* Attach only one port */
9246 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
9247 MII_OFFSET_ANY, MIIF_DOPAUSE);
9248 } else {
9249 int i, id;
9250 uint32_t ctrl_ext;
9251
9252 id = wm_get_phy_id_82575(sc);
9253 if (id != -1) {
9254 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
9255 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
9256 }
9257 if ((id == -1)
9258 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
9259 /* Power on sgmii phy if it is disabled */
9260 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9261 CSR_WRITE(sc, WMREG_CTRL_EXT,
9262 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
9263 CSR_WRITE_FLUSH(sc);
9264 delay(300*1000); /* XXX too long */
9265
9266 /* from 1 to 8 */
9267 for (i = 1; i < 8; i++)
9268 mii_attach(sc->sc_dev, &sc->sc_mii,
9269 0xffffffff, i, MII_OFFSET_ANY,
9270 MIIF_DOPAUSE);
9271
9272 /* restore previous sfp cage power state */
9273 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9274 }
9275 }
9276 } else {
9277 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9278 MII_OFFSET_ANY, MIIF_DOPAUSE);
9279 }
9280
9281 /*
9282 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
9283 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
9284 */
9285 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
9286 (LIST_FIRST(&mii->mii_phys) == NULL)) {
9287 wm_set_mdio_slow_mode_hv(sc);
9288 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9289 MII_OFFSET_ANY, MIIF_DOPAUSE);
9290 }
9291
9292 /*
9293 * (For ICH8 variants)
9294 * If PHY detection failed, use BM's r/w function and retry.
9295 */
9296 if (LIST_FIRST(&mii->mii_phys) == NULL) {
9297 /* if failed, retry with *_bm_* */
9298 aprint_verbose_dev(dev, "Assumed PHY access function "
9299 "(type = %d) might be incorrect. Use BM and retry.\n",
9300 sc->sc_phytype);
9301 sc->sc_phytype = WMPHY_BM;
9302 mii->mii_readreg = wm_gmii_bm_readreg;
9303 mii->mii_writereg = wm_gmii_bm_writereg;
9304
9305 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9306 MII_OFFSET_ANY, MIIF_DOPAUSE);
9307 }
9308
9309 if (LIST_FIRST(&mii->mii_phys) == NULL) {
9310 /* Any PHY wasn't find */
9311 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
9312 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
9313 sc->sc_phytype = WMPHY_NONE;
9314 } else {
9315 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
9316
9317 /*
9318 * PHY Found! Check PHY type again by the second call of
9319 * wm_mii_setup_phytype.
9320 */
9321 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
9322 child->mii_mpd_model);
9323
9324 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
9325 }
9326 }
9327
9328 /*
9329 * wm_gmii_mediachange: [ifmedia interface function]
9330 *
9331 * Set hardware to newly-selected media on a 1000BASE-T device.
9332 */
9333 static int
9334 wm_gmii_mediachange(struct ifnet *ifp)
9335 {
9336 struct wm_softc *sc = ifp->if_softc;
9337 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9338 int rc;
9339
9340 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9341 device_xname(sc->sc_dev), __func__));
9342 if ((ifp->if_flags & IFF_UP) == 0)
9343 return 0;
9344
9345 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9346 sc->sc_ctrl |= CTRL_SLU;
9347 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9348 || (sc->sc_type > WM_T_82543)) {
9349 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
9350 } else {
9351 sc->sc_ctrl &= ~CTRL_ASDE;
9352 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9353 if (ife->ifm_media & IFM_FDX)
9354 sc->sc_ctrl |= CTRL_FD;
9355 switch (IFM_SUBTYPE(ife->ifm_media)) {
9356 case IFM_10_T:
9357 sc->sc_ctrl |= CTRL_SPEED_10;
9358 break;
9359 case IFM_100_TX:
9360 sc->sc_ctrl |= CTRL_SPEED_100;
9361 break;
9362 case IFM_1000_T:
9363 sc->sc_ctrl |= CTRL_SPEED_1000;
9364 break;
9365 default:
9366 panic("wm_gmii_mediachange: bad media 0x%x",
9367 ife->ifm_media);
9368 }
9369 }
9370 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9371 if (sc->sc_type <= WM_T_82543)
9372 wm_gmii_reset(sc);
9373
9374 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
9375 return 0;
9376 return rc;
9377 }
9378
9379 /*
9380 * wm_gmii_mediastatus: [ifmedia interface function]
9381 *
9382 * Get the current interface media status on a 1000BASE-T device.
9383 */
9384 static void
9385 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9386 {
9387 struct wm_softc *sc = ifp->if_softc;
9388
9389 ether_mediastatus(ifp, ifmr);
9390 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9391 | sc->sc_flowflags;
9392 }
9393
9394 #define MDI_IO CTRL_SWDPIN(2)
9395 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
9396 #define MDI_CLK CTRL_SWDPIN(3)
9397
9398 static void
9399 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
9400 {
9401 uint32_t i, v;
9402
9403 v = CSR_READ(sc, WMREG_CTRL);
9404 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9405 v |= MDI_DIR | CTRL_SWDPIO(3);
9406
9407 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
9408 if (data & i)
9409 v |= MDI_IO;
9410 else
9411 v &= ~MDI_IO;
9412 CSR_WRITE(sc, WMREG_CTRL, v);
9413 CSR_WRITE_FLUSH(sc);
9414 delay(10);
9415 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9416 CSR_WRITE_FLUSH(sc);
9417 delay(10);
9418 CSR_WRITE(sc, WMREG_CTRL, v);
9419 CSR_WRITE_FLUSH(sc);
9420 delay(10);
9421 }
9422 }
9423
9424 static uint32_t
9425 wm_i82543_mii_recvbits(struct wm_softc *sc)
9426 {
9427 uint32_t v, i, data = 0;
9428
9429 v = CSR_READ(sc, WMREG_CTRL);
9430 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9431 v |= CTRL_SWDPIO(3);
9432
9433 CSR_WRITE(sc, WMREG_CTRL, v);
9434 CSR_WRITE_FLUSH(sc);
9435 delay(10);
9436 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9437 CSR_WRITE_FLUSH(sc);
9438 delay(10);
9439 CSR_WRITE(sc, WMREG_CTRL, v);
9440 CSR_WRITE_FLUSH(sc);
9441 delay(10);
9442
9443 for (i = 0; i < 16; i++) {
9444 data <<= 1;
9445 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9446 CSR_WRITE_FLUSH(sc);
9447 delay(10);
9448 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
9449 data |= 1;
9450 CSR_WRITE(sc, WMREG_CTRL, v);
9451 CSR_WRITE_FLUSH(sc);
9452 delay(10);
9453 }
9454
9455 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9456 CSR_WRITE_FLUSH(sc);
9457 delay(10);
9458 CSR_WRITE(sc, WMREG_CTRL, v);
9459 CSR_WRITE_FLUSH(sc);
9460 delay(10);
9461
9462 return data;
9463 }
9464
9465 #undef MDI_IO
9466 #undef MDI_DIR
9467 #undef MDI_CLK
9468
9469 /*
9470 * wm_gmii_i82543_readreg: [mii interface function]
9471 *
9472 * Read a PHY register on the GMII (i82543 version).
9473 */
9474 static int
9475 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
9476 {
9477 struct wm_softc *sc = device_private(self);
9478 int rv;
9479
9480 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9481 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
9482 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
9483 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
9484
9485 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
9486 device_xname(sc->sc_dev), phy, reg, rv));
9487
9488 return rv;
9489 }
9490
9491 /*
9492 * wm_gmii_i82543_writereg: [mii interface function]
9493 *
9494 * Write a PHY register on the GMII (i82543 version).
9495 */
9496 static void
9497 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
9498 {
9499 struct wm_softc *sc = device_private(self);
9500
9501 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9502 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
9503 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
9504 (MII_COMMAND_START << 30), 32);
9505 }
9506
9507 /*
9508 * wm_gmii_mdic_readreg: [mii interface function]
9509 *
9510 * Read a PHY register on the GMII.
9511 */
9512 static int
9513 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
9514 {
9515 struct wm_softc *sc = device_private(self);
9516 uint32_t mdic = 0;
9517 int i, rv;
9518
9519 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
9520 MDIC_REGADD(reg));
9521
9522 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
9523 mdic = CSR_READ(sc, WMREG_MDIC);
9524 if (mdic & MDIC_READY)
9525 break;
9526 delay(50);
9527 }
9528
9529 if ((mdic & MDIC_READY) == 0) {
9530 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
9531 device_xname(sc->sc_dev), phy, reg);
9532 rv = 0;
9533 } else if (mdic & MDIC_E) {
9534 #if 0 /* This is normal if no PHY is present. */
9535 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
9536 device_xname(sc->sc_dev), phy, reg);
9537 #endif
9538 rv = 0;
9539 } else {
9540 rv = MDIC_DATA(mdic);
9541 if (rv == 0xffff)
9542 rv = 0;
9543 }
9544
9545 return rv;
9546 }
9547
9548 /*
9549 * wm_gmii_mdic_writereg: [mii interface function]
9550 *
9551 * Write a PHY register on the GMII.
9552 */
9553 static void
9554 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
9555 {
9556 struct wm_softc *sc = device_private(self);
9557 uint32_t mdic = 0;
9558 int i;
9559
9560 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
9561 MDIC_REGADD(reg) | MDIC_DATA(val));
9562
9563 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
9564 mdic = CSR_READ(sc, WMREG_MDIC);
9565 if (mdic & MDIC_READY)
9566 break;
9567 delay(50);
9568 }
9569
9570 if ((mdic & MDIC_READY) == 0)
9571 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
9572 device_xname(sc->sc_dev), phy, reg);
9573 else if (mdic & MDIC_E)
9574 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
9575 device_xname(sc->sc_dev), phy, reg);
9576 }
9577
9578 /*
9579 * wm_gmii_i82544_readreg: [mii interface function]
9580 *
9581 * Read a PHY register on the GMII.
9582 */
9583 static int
9584 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
9585 {
9586 struct wm_softc *sc = device_private(self);
9587 int rv;
9588
9589 if (sc->phy.acquire(sc)) {
9590 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9591 __func__);
9592 return 0;
9593 }
9594 rv = wm_gmii_mdic_readreg(self, phy, reg);
9595 sc->phy.release(sc);
9596
9597 return rv;
9598 }
9599
9600 /*
9601 * wm_gmii_i82544_writereg: [mii interface function]
9602 *
9603 * Write a PHY register on the GMII.
9604 */
9605 static void
9606 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
9607 {
9608 struct wm_softc *sc = device_private(self);
9609
9610 if (sc->phy.acquire(sc)) {
9611 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9612 __func__);
9613 }
9614 wm_gmii_mdic_writereg(self, phy, reg, val);
9615 sc->phy.release(sc);
9616 }
9617
9618 /*
9619 * wm_gmii_i80003_readreg: [mii interface function]
9620 *
9621 * Read a PHY register on the kumeran
9622 * This could be handled by the PHY layer if we didn't have to lock the
9623 * ressource ...
9624 */
9625 static int
9626 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
9627 {
9628 struct wm_softc *sc = device_private(self);
9629 int rv;
9630
9631 if (phy != 1) /* only one PHY on kumeran bus */
9632 return 0;
9633
9634 if (sc->phy.acquire(sc)) {
9635 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9636 __func__);
9637 return 0;
9638 }
9639
9640 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
9641 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
9642 reg >> GG82563_PAGE_SHIFT);
9643 } else {
9644 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
9645 reg >> GG82563_PAGE_SHIFT);
9646 }
9647 /* Wait more 200us for a bug of the ready bit in the MDIC register */
9648 delay(200);
9649 rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
9650 delay(200);
9651 sc->phy.release(sc);
9652
9653 return rv;
9654 }
9655
9656 /*
9657 * wm_gmii_i80003_writereg: [mii interface function]
9658 *
9659 * Write a PHY register on the kumeran.
9660 * This could be handled by the PHY layer if we didn't have to lock the
9661 * ressource ...
9662 */
9663 static void
9664 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
9665 {
9666 struct wm_softc *sc = device_private(self);
9667
9668 if (phy != 1) /* only one PHY on kumeran bus */
9669 return;
9670
9671 if (sc->phy.acquire(sc)) {
9672 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9673 __func__);
9674 return;
9675 }
9676
9677 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
9678 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
9679 reg >> GG82563_PAGE_SHIFT);
9680 } else {
9681 wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
9682 reg >> GG82563_PAGE_SHIFT);
9683 }
9684 /* Wait more 200us for a bug of the ready bit in the MDIC register */
9685 delay(200);
9686 wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
9687 delay(200);
9688
9689 sc->phy.release(sc);
9690 }
9691
9692 /*
9693 * wm_gmii_bm_readreg: [mii interface function]
9694 *
9695 * Read a PHY register on the kumeran
9696 * This could be handled by the PHY layer if we didn't have to lock the
9697 * ressource ...
9698 */
9699 static int
9700 wm_gmii_bm_readreg(device_t self, int phy, int reg)
9701 {
9702 struct wm_softc *sc = device_private(self);
9703 uint16_t page = reg >> BME1000_PAGE_SHIFT;
9704 uint16_t val;
9705 int rv;
9706
9707 if (sc->phy.acquire(sc)) {
9708 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9709 __func__);
9710 return 0;
9711 }
9712
9713 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
9714 phy = ((page >= 768) || ((page == 0) && (reg == 25))
9715 || (reg == 31)) ? 1 : phy;
9716 /* Page 800 works differently than the rest so it has its own func */
9717 if (page == BM_WUC_PAGE) {
9718 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
9719 rv = val;
9720 goto release;
9721 }
9722
9723 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9724 if ((phy == 1) && (sc->sc_type != WM_T_82574)
9725 && (sc->sc_type != WM_T_82583))
9726 wm_gmii_mdic_writereg(self, phy,
9727 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
9728 else
9729 wm_gmii_mdic_writereg(self, phy,
9730 BME1000_PHY_PAGE_SELECT, page);
9731 }
9732
9733 rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
9734
9735 release:
9736 sc->phy.release(sc);
9737 return rv;
9738 }
9739
9740 /*
9741 * wm_gmii_bm_writereg: [mii interface function]
9742 *
9743 * Write a PHY register on the kumeran.
9744 * This could be handled by the PHY layer if we didn't have to lock the
9745 * ressource ...
9746 */
9747 static void
9748 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
9749 {
9750 struct wm_softc *sc = device_private(self);
9751 uint16_t page = reg >> BME1000_PAGE_SHIFT;
9752
9753 if (sc->phy.acquire(sc)) {
9754 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9755 __func__);
9756 return;
9757 }
9758
9759 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
9760 phy = ((page >= 768) || ((page == 0) && (reg == 25))
9761 || (reg == 31)) ? 1 : phy;
9762 /* Page 800 works differently than the rest so it has its own func */
9763 if (page == BM_WUC_PAGE) {
9764 uint16_t tmp;
9765
9766 tmp = val;
9767 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9768 goto release;
9769 }
9770
9771 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9772 if ((phy == 1) && (sc->sc_type != WM_T_82574)
9773 && (sc->sc_type != WM_T_82583))
9774 wm_gmii_mdic_writereg(self, phy,
9775 MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
9776 else
9777 wm_gmii_mdic_writereg(self, phy,
9778 BME1000_PHY_PAGE_SELECT, page);
9779 }
9780
9781 wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
9782
9783 release:
9784 sc->phy.release(sc);
9785 }
9786
9787 static void
9788 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
9789 {
9790 struct wm_softc *sc = device_private(self);
9791 uint16_t regnum = BM_PHY_REG_NUM(offset);
9792 uint16_t wuce, reg;
9793
9794 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9795 device_xname(sc->sc_dev), __func__));
9796 /* XXX Gig must be disabled for MDIO accesses to page 800 */
9797 if (sc->sc_type == WM_T_PCH) {
9798 /* XXX e1000 driver do nothing... why? */
9799 }
9800
9801 /*
9802 * 1) Enable PHY wakeup register first.
9803 * See e1000_enable_phy_wakeup_reg_access_bm().
9804 */
9805
9806 /* Set page 769 */
9807 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9808 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9809
9810 /* Read WUCE and save it */
9811 wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
9812
9813 reg = wuce | BM_WUC_ENABLE_BIT;
9814 reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
9815 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
9816
9817 /* Select page 800 */
9818 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9819 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
9820
9821 /*
9822 * 2) Access PHY wakeup register.
9823 * See e1000_access_phy_wakeup_reg_bm.
9824 */
9825
9826 /* Write page 800 */
9827 wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
9828
9829 if (rd)
9830 *val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
9831 else
9832 wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
9833
9834 /*
9835 * 3) Disable PHY wakeup register.
9836 * See e1000_disable_phy_wakeup_reg_access_bm().
9837 */
9838 /* Set page 769 */
9839 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9840 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9841
9842 wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
9843 }
9844
9845 /*
9846 * wm_gmii_hv_readreg: [mii interface function]
9847 *
9848 * Read a PHY register on the kumeran
9849 * This could be handled by the PHY layer if we didn't have to lock the
9850 * ressource ...
9851 */
9852 static int
9853 wm_gmii_hv_readreg(device_t self, int phy, int reg)
9854 {
9855 struct wm_softc *sc = device_private(self);
9856 int rv;
9857
9858 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9859 device_xname(sc->sc_dev), __func__));
9860 if (sc->phy.acquire(sc)) {
9861 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9862 __func__);
9863 return 0;
9864 }
9865
9866 rv = wm_gmii_hv_readreg_locked(self, phy, reg);
9867 sc->phy.release(sc);
9868 return rv;
9869 }
9870
9871 static int
9872 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
9873 {
9874 uint16_t page = BM_PHY_REG_PAGE(reg);
9875 uint16_t regnum = BM_PHY_REG_NUM(reg);
9876 uint16_t val;
9877 int rv;
9878
9879 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9880
9881 /* Page 800 works differently than the rest so it has its own func */
9882 if (page == BM_WUC_PAGE) {
9883 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
9884 return val;
9885 }
9886
9887 /*
9888 * Lower than page 768 works differently than the rest so it has its
9889 * own func
9890 */
9891 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9892 printf("gmii_hv_readreg!!!\n");
9893 return 0;
9894 }
9895
9896 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9897 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9898 page << BME1000_PAGE_SHIFT);
9899 }
9900
9901 rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
9902 return rv;
9903 }
9904
9905 /*
9906 * wm_gmii_hv_writereg: [mii interface function]
9907 *
9908 * Write a PHY register on the kumeran.
9909 * This could be handled by the PHY layer if we didn't have to lock the
9910 * ressource ...
9911 */
9912 static void
9913 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
9914 {
9915 struct wm_softc *sc = device_private(self);
9916
9917 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9918 device_xname(sc->sc_dev), __func__));
9919
9920 if (sc->phy.acquire(sc)) {
9921 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9922 __func__);
9923 return;
9924 }
9925
9926 wm_gmii_hv_writereg_locked(self, phy, reg, val);
9927 sc->phy.release(sc);
9928 }
9929
9930 static void
9931 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
9932 {
9933 struct wm_softc *sc = device_private(self);
9934 uint16_t page = BM_PHY_REG_PAGE(reg);
9935 uint16_t regnum = BM_PHY_REG_NUM(reg);
9936
9937 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9938
9939 /* Page 800 works differently than the rest so it has its own func */
9940 if (page == BM_WUC_PAGE) {
9941 uint16_t tmp;
9942
9943 tmp = val;
9944 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9945 return;
9946 }
9947
9948 /*
9949 * Lower than page 768 works differently than the rest so it has its
9950 * own func
9951 */
9952 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9953 printf("gmii_hv_writereg!!!\n");
9954 return;
9955 }
9956
9957 {
9958 /*
9959 * XXX Workaround MDIO accesses being disabled after entering
9960 * IEEE Power Down (whenever bit 11 of the PHY control
9961 * register is set)
9962 */
9963 if (sc->sc_phytype == WMPHY_82578) {
9964 struct mii_softc *child;
9965
9966 child = LIST_FIRST(&sc->sc_mii.mii_phys);
9967 if ((child != NULL) && (child->mii_mpd_rev >= 1)
9968 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
9969 && ((val & (1 << 11)) != 0)) {
9970 printf("XXX need workaround\n");
9971 }
9972 }
9973
9974 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9975 wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9976 page << BME1000_PAGE_SHIFT);
9977 }
9978 }
9979
9980 wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
9981 }
9982
9983 /*
9984 * wm_gmii_82580_readreg: [mii interface function]
9985 *
9986 * Read a PHY register on the 82580 and I350.
9987 * This could be handled by the PHY layer if we didn't have to lock the
9988 * ressource ...
9989 */
9990 static int
9991 wm_gmii_82580_readreg(device_t self, int phy, int reg)
9992 {
9993 struct wm_softc *sc = device_private(self);
9994 int rv;
9995
9996 if (sc->phy.acquire(sc) != 0) {
9997 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9998 __func__);
9999 return 0;
10000 }
10001
10002 rv = wm_gmii_mdic_readreg(self, phy, reg);
10003
10004 sc->phy.release(sc);
10005 return rv;
10006 }
10007
10008 /*
10009 * wm_gmii_82580_writereg: [mii interface function]
10010 *
10011 * Write a PHY register on the 82580 and I350.
10012 * This could be handled by the PHY layer if we didn't have to lock the
10013 * ressource ...
10014 */
10015 static void
10016 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
10017 {
10018 struct wm_softc *sc = device_private(self);
10019
10020 if (sc->phy.acquire(sc) != 0) {
10021 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10022 __func__);
10023 return;
10024 }
10025
10026 wm_gmii_mdic_writereg(self, phy, reg, val);
10027
10028 sc->phy.release(sc);
10029 }
10030
10031 /*
10032 * wm_gmii_gs40g_readreg: [mii interface function]
10033 *
10034 * Read a PHY register on the I2100 and I211.
10035 * This could be handled by the PHY layer if we didn't have to lock the
10036 * ressource ...
10037 */
10038 static int
10039 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
10040 {
10041 struct wm_softc *sc = device_private(self);
10042 int page, offset;
10043 int rv;
10044
10045 /* Acquire semaphore */
10046 if (sc->phy.acquire(sc)) {
10047 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10048 __func__);
10049 return 0;
10050 }
10051
10052 /* Page select */
10053 page = reg >> GS40G_PAGE_SHIFT;
10054 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
10055
10056 /* Read reg */
10057 offset = reg & GS40G_OFFSET_MASK;
10058 rv = wm_gmii_mdic_readreg(self, phy, offset);
10059
10060 sc->phy.release(sc);
10061 return rv;
10062 }
10063
10064 /*
10065 * wm_gmii_gs40g_writereg: [mii interface function]
10066 *
10067 * Write a PHY register on the I210 and I211.
10068 * This could be handled by the PHY layer if we didn't have to lock the
10069 * ressource ...
10070 */
10071 static void
10072 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
10073 {
10074 struct wm_softc *sc = device_private(self);
10075 int page, offset;
10076
10077 /* Acquire semaphore */
10078 if (sc->phy.acquire(sc)) {
10079 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10080 __func__);
10081 return;
10082 }
10083
10084 /* Page select */
10085 page = reg >> GS40G_PAGE_SHIFT;
10086 wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
10087
10088 /* Write reg */
10089 offset = reg & GS40G_OFFSET_MASK;
10090 wm_gmii_mdic_writereg(self, phy, offset, val);
10091
10092 /* Release semaphore */
10093 sc->phy.release(sc);
10094 }
10095
10096 /*
10097 * wm_gmii_statchg: [mii interface function]
10098 *
10099 * Callback from MII layer when media changes.
10100 */
10101 static void
10102 wm_gmii_statchg(struct ifnet *ifp)
10103 {
10104 struct wm_softc *sc = ifp->if_softc;
10105 struct mii_data *mii = &sc->sc_mii;
10106
10107 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
10108 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10109 sc->sc_fcrtl &= ~FCRTL_XONE;
10110
10111 /*
10112 * Get flow control negotiation result.
10113 */
10114 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
10115 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
10116 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
10117 mii->mii_media_active &= ~IFM_ETH_FMASK;
10118 }
10119
10120 if (sc->sc_flowflags & IFM_FLOW) {
10121 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
10122 sc->sc_ctrl |= CTRL_TFCE;
10123 sc->sc_fcrtl |= FCRTL_XONE;
10124 }
10125 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
10126 sc->sc_ctrl |= CTRL_RFCE;
10127 }
10128
10129 if (sc->sc_mii.mii_media_active & IFM_FDX) {
10130 DPRINTF(WM_DEBUG_LINK,
10131 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
10132 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10133 } else {
10134 DPRINTF(WM_DEBUG_LINK,
10135 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
10136 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10137 }
10138
10139 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10140 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10141 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
10142 : WMREG_FCRTL, sc->sc_fcrtl);
10143 if (sc->sc_type == WM_T_80003) {
10144 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
10145 case IFM_1000_T:
10146 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10147 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
10148 sc->sc_tipg = TIPG_1000T_80003_DFLT;
10149 break;
10150 default:
10151 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10152 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
10153 sc->sc_tipg = TIPG_10_100_80003_DFLT;
10154 break;
10155 }
10156 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
10157 }
10158 }
10159
10160 /* kumeran related (80003, ICH* and PCH*) */
10161
10162 /*
10163 * wm_kmrn_readreg:
10164 *
10165 * Read a kumeran register
10166 */
10167 static int
10168 wm_kmrn_readreg(struct wm_softc *sc, int reg)
10169 {
10170 int rv;
10171
10172 if (sc->sc_type == WM_T_80003)
10173 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10174 else
10175 rv = sc->phy.acquire(sc);
10176 if (rv != 0) {
10177 aprint_error_dev(sc->sc_dev,
10178 "%s: failed to get semaphore\n", __func__);
10179 return 0;
10180 }
10181
10182 rv = wm_kmrn_readreg_locked(sc, reg);
10183
10184 if (sc->sc_type == WM_T_80003)
10185 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10186 else
10187 sc->phy.release(sc);
10188
10189 return rv;
10190 }
10191
10192 static int
10193 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
10194 {
10195 int rv;
10196
10197 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10198 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
10199 KUMCTRLSTA_REN);
10200 CSR_WRITE_FLUSH(sc);
10201 delay(2);
10202
10203 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
10204
10205 return rv;
10206 }
10207
10208 /*
10209 * wm_kmrn_writereg:
10210 *
10211 * Write a kumeran register
10212 */
10213 static void
10214 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
10215 {
10216 int rv;
10217
10218 if (sc->sc_type == WM_T_80003)
10219 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10220 else
10221 rv = sc->phy.acquire(sc);
10222 if (rv != 0) {
10223 aprint_error_dev(sc->sc_dev,
10224 "%s: failed to get semaphore\n", __func__);
10225 return;
10226 }
10227
10228 wm_kmrn_writereg_locked(sc, reg, val);
10229
10230 if (sc->sc_type == WM_T_80003)
10231 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10232 else
10233 sc->phy.release(sc);
10234 }
10235
10236 static void
10237 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
10238 {
10239
10240 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10241 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
10242 (val & KUMCTRLSTA_MASK));
10243 }
10244
10245 /* SGMII related */
10246
10247 /*
10248 * wm_sgmii_uses_mdio
10249 *
10250 * Check whether the transaction is to the internal PHY or the external
10251 * MDIO interface. Return true if it's MDIO.
10252 */
10253 static bool
10254 wm_sgmii_uses_mdio(struct wm_softc *sc)
10255 {
10256 uint32_t reg;
10257 bool ismdio = false;
10258
10259 switch (sc->sc_type) {
10260 case WM_T_82575:
10261 case WM_T_82576:
10262 reg = CSR_READ(sc, WMREG_MDIC);
10263 ismdio = ((reg & MDIC_DEST) != 0);
10264 break;
10265 case WM_T_82580:
10266 case WM_T_I350:
10267 case WM_T_I354:
10268 case WM_T_I210:
10269 case WM_T_I211:
10270 reg = CSR_READ(sc, WMREG_MDICNFG);
10271 ismdio = ((reg & MDICNFG_DEST) != 0);
10272 break;
10273 default:
10274 break;
10275 }
10276
10277 return ismdio;
10278 }
10279
10280 /*
10281 * wm_sgmii_readreg: [mii interface function]
10282 *
10283 * Read a PHY register on the SGMII
10284 * This could be handled by the PHY layer if we didn't have to lock the
10285 * ressource ...
10286 */
10287 static int
10288 wm_sgmii_readreg(device_t self, int phy, int reg)
10289 {
10290 struct wm_softc *sc = device_private(self);
10291 uint32_t i2ccmd;
10292 int i, rv;
10293
10294 if (sc->phy.acquire(sc)) {
10295 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10296 __func__);
10297 return 0;
10298 }
10299
10300 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10301 | (phy << I2CCMD_PHY_ADDR_SHIFT)
10302 | I2CCMD_OPCODE_READ;
10303 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10304
10305 /* Poll the ready bit */
10306 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10307 delay(50);
10308 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10309 if (i2ccmd & I2CCMD_READY)
10310 break;
10311 }
10312 if ((i2ccmd & I2CCMD_READY) == 0)
10313 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
10314 if ((i2ccmd & I2CCMD_ERROR) != 0)
10315 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
10316
10317 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
10318
10319 sc->phy.release(sc);
10320 return rv;
10321 }
10322
10323 /*
10324 * wm_sgmii_writereg: [mii interface function]
10325 *
10326 * Write a PHY register on the SGMII.
10327 * This could be handled by the PHY layer if we didn't have to lock the
10328 * ressource ...
10329 */
10330 static void
10331 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
10332 {
10333 struct wm_softc *sc = device_private(self);
10334 uint32_t i2ccmd;
10335 int i;
10336 int val_swapped;
10337
10338 if (sc->phy.acquire(sc) != 0) {
10339 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10340 __func__);
10341 return;
10342 }
10343 /* Swap the data bytes for the I2C interface */
10344 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
10345 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10346 | (phy << I2CCMD_PHY_ADDR_SHIFT)
10347 | I2CCMD_OPCODE_WRITE | val_swapped;
10348 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10349
10350 /* Poll the ready bit */
10351 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10352 delay(50);
10353 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10354 if (i2ccmd & I2CCMD_READY)
10355 break;
10356 }
10357 if ((i2ccmd & I2CCMD_READY) == 0)
10358 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
10359 if ((i2ccmd & I2CCMD_ERROR) != 0)
10360 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
10361
10362 sc->phy.release(sc);
10363 }
10364
10365 /* TBI related */
10366
10367 /*
10368 * wm_tbi_mediainit:
10369 *
10370 * Initialize media for use on 1000BASE-X devices.
10371 */
10372 static void
10373 wm_tbi_mediainit(struct wm_softc *sc)
10374 {
10375 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10376 const char *sep = "";
10377
10378 if (sc->sc_type < WM_T_82543)
10379 sc->sc_tipg = TIPG_WM_DFLT;
10380 else
10381 sc->sc_tipg = TIPG_LG_DFLT;
10382
10383 sc->sc_tbi_serdes_anegticks = 5;
10384
10385 /* Initialize our media structures */
10386 sc->sc_mii.mii_ifp = ifp;
10387 sc->sc_ethercom.ec_mii = &sc->sc_mii;
10388
10389 if ((sc->sc_type >= WM_T_82575)
10390 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
10391 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10392 wm_serdes_mediachange, wm_serdes_mediastatus);
10393 else
10394 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10395 wm_tbi_mediachange, wm_tbi_mediastatus);
10396
10397 /*
10398 * SWD Pins:
10399 *
10400 * 0 = Link LED (output)
10401 * 1 = Loss Of Signal (input)
10402 */
10403 sc->sc_ctrl |= CTRL_SWDPIO(0);
10404
10405 /* XXX Perhaps this is only for TBI */
10406 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10407 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
10408
10409 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10410 sc->sc_ctrl &= ~CTRL_LRST;
10411
10412 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10413
10414 #define ADD(ss, mm, dd) \
10415 do { \
10416 aprint_normal("%s%s", sep, ss); \
10417 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
10418 sep = ", "; \
10419 } while (/*CONSTCOND*/0)
10420
10421 aprint_normal_dev(sc->sc_dev, "");
10422
10423 if (sc->sc_type == WM_T_I354) {
10424 uint32_t status;
10425
10426 status = CSR_READ(sc, WMREG_STATUS);
10427 if (((status & STATUS_2P5_SKU) != 0)
10428 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10429 ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
10430 } else
10431 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
10432 } else if (sc->sc_type == WM_T_82545) {
10433 /* Only 82545 is LX (XXX except SFP) */
10434 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
10435 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
10436 } else {
10437 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
10438 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
10439 }
10440 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
10441 aprint_normal("\n");
10442
10443 #undef ADD
10444
10445 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
10446 }
10447
10448 /*
10449 * wm_tbi_mediachange: [ifmedia interface function]
10450 *
10451 * Set hardware to newly-selected media on a 1000BASE-X device.
10452 */
10453 static int
10454 wm_tbi_mediachange(struct ifnet *ifp)
10455 {
10456 struct wm_softc *sc = ifp->if_softc;
10457 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10458 uint32_t status;
10459 int i;
10460
10461 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
10462 /* XXX need some work for >= 82571 and < 82575 */
10463 if (sc->sc_type < WM_T_82575)
10464 return 0;
10465 }
10466
10467 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10468 || (sc->sc_type >= WM_T_82575))
10469 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10470
10471 sc->sc_ctrl &= ~CTRL_LRST;
10472 sc->sc_txcw = TXCW_ANE;
10473 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10474 sc->sc_txcw |= TXCW_FD | TXCW_HD;
10475 else if (ife->ifm_media & IFM_FDX)
10476 sc->sc_txcw |= TXCW_FD;
10477 else
10478 sc->sc_txcw |= TXCW_HD;
10479
10480 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
10481 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
10482
10483 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
10484 device_xname(sc->sc_dev), sc->sc_txcw));
10485 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10486 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10487 CSR_WRITE_FLUSH(sc);
10488 delay(1000);
10489
10490 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
10491 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
10492
10493 /*
10494 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
10495 * optics detect a signal, 0 if they don't.
10496 */
10497 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
10498 /* Have signal; wait for the link to come up. */
10499 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
10500 delay(10000);
10501 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
10502 break;
10503 }
10504
10505 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
10506 device_xname(sc->sc_dev),i));
10507
10508 status = CSR_READ(sc, WMREG_STATUS);
10509 DPRINTF(WM_DEBUG_LINK,
10510 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
10511 device_xname(sc->sc_dev),status, STATUS_LU));
10512 if (status & STATUS_LU) {
10513 /* Link is up. */
10514 DPRINTF(WM_DEBUG_LINK,
10515 ("%s: LINK: set media -> link up %s\n",
10516 device_xname(sc->sc_dev),
10517 (status & STATUS_FD) ? "FDX" : "HDX"));
10518
10519 /*
10520 * NOTE: CTRL will update TFCE and RFCE automatically,
10521 * so we should update sc->sc_ctrl
10522 */
10523 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10524 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10525 sc->sc_fcrtl &= ~FCRTL_XONE;
10526 if (status & STATUS_FD)
10527 sc->sc_tctl |=
10528 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10529 else
10530 sc->sc_tctl |=
10531 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10532 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
10533 sc->sc_fcrtl |= FCRTL_XONE;
10534 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10535 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10536 WMREG_OLD_FCRTL : WMREG_FCRTL,
10537 sc->sc_fcrtl);
10538 sc->sc_tbi_linkup = 1;
10539 } else {
10540 if (i == WM_LINKUP_TIMEOUT)
10541 wm_check_for_link(sc);
10542 /* Link is down. */
10543 DPRINTF(WM_DEBUG_LINK,
10544 ("%s: LINK: set media -> link down\n",
10545 device_xname(sc->sc_dev)));
10546 sc->sc_tbi_linkup = 0;
10547 }
10548 } else {
10549 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
10550 device_xname(sc->sc_dev)));
10551 sc->sc_tbi_linkup = 0;
10552 }
10553
10554 wm_tbi_serdes_set_linkled(sc);
10555
10556 return 0;
10557 }
10558
10559 /*
10560 * wm_tbi_mediastatus: [ifmedia interface function]
10561 *
10562 * Get the current interface media status on a 1000BASE-X device.
10563 */
10564 static void
10565 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10566 {
10567 struct wm_softc *sc = ifp->if_softc;
10568 uint32_t ctrl, status;
10569
10570 ifmr->ifm_status = IFM_AVALID;
10571 ifmr->ifm_active = IFM_ETHER;
10572
10573 status = CSR_READ(sc, WMREG_STATUS);
10574 if ((status & STATUS_LU) == 0) {
10575 ifmr->ifm_active |= IFM_NONE;
10576 return;
10577 }
10578
10579 ifmr->ifm_status |= IFM_ACTIVE;
10580 /* Only 82545 is LX */
10581 if (sc->sc_type == WM_T_82545)
10582 ifmr->ifm_active |= IFM_1000_LX;
10583 else
10584 ifmr->ifm_active |= IFM_1000_SX;
10585 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
10586 ifmr->ifm_active |= IFM_FDX;
10587 else
10588 ifmr->ifm_active |= IFM_HDX;
10589 ctrl = CSR_READ(sc, WMREG_CTRL);
10590 if (ctrl & CTRL_RFCE)
10591 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
10592 if (ctrl & CTRL_TFCE)
10593 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
10594 }
10595
10596 /* XXX TBI only */
10597 static int
10598 wm_check_for_link(struct wm_softc *sc)
10599 {
10600 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10601 uint32_t rxcw;
10602 uint32_t ctrl;
10603 uint32_t status;
10604 uint32_t sig;
10605
10606 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
10607 /* XXX need some work for >= 82571 */
10608 if (sc->sc_type >= WM_T_82571) {
10609 sc->sc_tbi_linkup = 1;
10610 return 0;
10611 }
10612 }
10613
10614 rxcw = CSR_READ(sc, WMREG_RXCW);
10615 ctrl = CSR_READ(sc, WMREG_CTRL);
10616 status = CSR_READ(sc, WMREG_STATUS);
10617
10618 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
10619
10620 DPRINTF(WM_DEBUG_LINK,
10621 ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
10622 device_xname(sc->sc_dev), __func__,
10623 ((ctrl & CTRL_SWDPIN(1)) == sig),
10624 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
10625
10626 /*
10627 * SWDPIN LU RXCW
10628 * 0 0 0
10629 * 0 0 1 (should not happen)
10630 * 0 1 0 (should not happen)
10631 * 0 1 1 (should not happen)
10632 * 1 0 0 Disable autonego and force linkup
10633 * 1 0 1 got /C/ but not linkup yet
10634 * 1 1 0 (linkup)
10635 * 1 1 1 If IFM_AUTO, back to autonego
10636 *
10637 */
10638 if (((ctrl & CTRL_SWDPIN(1)) == sig)
10639 && ((status & STATUS_LU) == 0)
10640 && ((rxcw & RXCW_C) == 0)) {
10641 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
10642 __func__));
10643 sc->sc_tbi_linkup = 0;
10644 /* Disable auto-negotiation in the TXCW register */
10645 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
10646
10647 /*
10648 * Force link-up and also force full-duplex.
10649 *
10650 * NOTE: CTRL was updated TFCE and RFCE automatically,
10651 * so we should update sc->sc_ctrl
10652 */
10653 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
10654 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10655 } else if (((status & STATUS_LU) != 0)
10656 && ((rxcw & RXCW_C) != 0)
10657 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
10658 sc->sc_tbi_linkup = 1;
10659 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
10660 __func__));
10661 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10662 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
10663 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
10664 && ((rxcw & RXCW_C) != 0)) {
10665 DPRINTF(WM_DEBUG_LINK, ("/C/"));
10666 } else {
10667 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
10668 status));
10669 }
10670
10671 return 0;
10672 }
10673
10674 /*
10675 * wm_tbi_tick:
10676 *
10677 * Check the link on TBI devices.
10678 * This function acts as mii_tick().
10679 */
10680 static void
10681 wm_tbi_tick(struct wm_softc *sc)
10682 {
10683 struct mii_data *mii = &sc->sc_mii;
10684 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10685 uint32_t status;
10686
10687 KASSERT(WM_CORE_LOCKED(sc));
10688
10689 status = CSR_READ(sc, WMREG_STATUS);
10690
10691 /* XXX is this needed? */
10692 (void)CSR_READ(sc, WMREG_RXCW);
10693 (void)CSR_READ(sc, WMREG_CTRL);
10694
10695 /* set link status */
10696 if ((status & STATUS_LU) == 0) {
10697 DPRINTF(WM_DEBUG_LINK,
10698 ("%s: LINK: checklink -> down\n",
10699 device_xname(sc->sc_dev)));
10700 sc->sc_tbi_linkup = 0;
10701 } else if (sc->sc_tbi_linkup == 0) {
10702 DPRINTF(WM_DEBUG_LINK,
10703 ("%s: LINK: checklink -> up %s\n",
10704 device_xname(sc->sc_dev),
10705 (status & STATUS_FD) ? "FDX" : "HDX"));
10706 sc->sc_tbi_linkup = 1;
10707 sc->sc_tbi_serdes_ticks = 0;
10708 }
10709
10710 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
10711 goto setled;
10712
10713 if ((status & STATUS_LU) == 0) {
10714 sc->sc_tbi_linkup = 0;
10715 /* If the timer expired, retry autonegotiation */
10716 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10717 && (++sc->sc_tbi_serdes_ticks
10718 >= sc->sc_tbi_serdes_anegticks)) {
10719 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
10720 sc->sc_tbi_serdes_ticks = 0;
10721 /*
10722 * Reset the link, and let autonegotiation do
10723 * its thing
10724 */
10725 sc->sc_ctrl |= CTRL_LRST;
10726 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10727 CSR_WRITE_FLUSH(sc);
10728 delay(1000);
10729 sc->sc_ctrl &= ~CTRL_LRST;
10730 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10731 CSR_WRITE_FLUSH(sc);
10732 delay(1000);
10733 CSR_WRITE(sc, WMREG_TXCW,
10734 sc->sc_txcw & ~TXCW_ANE);
10735 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10736 }
10737 }
10738
10739 setled:
10740 wm_tbi_serdes_set_linkled(sc);
10741 }
10742
10743 /* SERDES related */
10744 static void
10745 wm_serdes_power_up_link_82575(struct wm_softc *sc)
10746 {
10747 uint32_t reg;
10748
10749 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10750 && ((sc->sc_flags & WM_F_SGMII) == 0))
10751 return;
10752
10753 reg = CSR_READ(sc, WMREG_PCS_CFG);
10754 reg |= PCS_CFG_PCS_EN;
10755 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
10756
10757 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10758 reg &= ~CTRL_EXT_SWDPIN(3);
10759 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10760 CSR_WRITE_FLUSH(sc);
10761 }
10762
10763 static int
10764 wm_serdes_mediachange(struct ifnet *ifp)
10765 {
10766 struct wm_softc *sc = ifp->if_softc;
10767 bool pcs_autoneg = true; /* XXX */
10768 uint32_t ctrl_ext, pcs_lctl, reg;
10769
10770 /* XXX Currently, this function is not called on 8257[12] */
10771 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10772 || (sc->sc_type >= WM_T_82575))
10773 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10774
10775 wm_serdes_power_up_link_82575(sc);
10776
10777 sc->sc_ctrl |= CTRL_SLU;
10778
10779 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
10780 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
10781
10782 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10783 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
10784 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
10785 case CTRL_EXT_LINK_MODE_SGMII:
10786 pcs_autoneg = true;
10787 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
10788 break;
10789 case CTRL_EXT_LINK_MODE_1000KX:
10790 pcs_autoneg = false;
10791 /* FALLTHROUGH */
10792 default:
10793 if ((sc->sc_type == WM_T_82575)
10794 || (sc->sc_type == WM_T_82576)) {
10795 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
10796 pcs_autoneg = false;
10797 }
10798 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
10799 | CTRL_FRCFDX;
10800 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
10801 }
10802 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10803
10804 if (pcs_autoneg) {
10805 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
10806 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
10807
10808 reg = CSR_READ(sc, WMREG_PCS_ANADV);
10809 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
10810 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
10811 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
10812 } else
10813 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
10814
10815 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
10816
10817
10818 return 0;
10819 }
10820
10821 static void
10822 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10823 {
10824 struct wm_softc *sc = ifp->if_softc;
10825 struct mii_data *mii = &sc->sc_mii;
10826 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10827 uint32_t pcs_adv, pcs_lpab, reg;
10828
10829 ifmr->ifm_status = IFM_AVALID;
10830 ifmr->ifm_active = IFM_ETHER;
10831
10832 /* Check PCS */
10833 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10834 if ((reg & PCS_LSTS_LINKOK) == 0) {
10835 ifmr->ifm_active |= IFM_NONE;
10836 sc->sc_tbi_linkup = 0;
10837 goto setled;
10838 }
10839
10840 sc->sc_tbi_linkup = 1;
10841 ifmr->ifm_status |= IFM_ACTIVE;
10842 if (sc->sc_type == WM_T_I354) {
10843 uint32_t status;
10844
10845 status = CSR_READ(sc, WMREG_STATUS);
10846 if (((status & STATUS_2P5_SKU) != 0)
10847 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10848 ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
10849 } else
10850 ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
10851 } else {
10852 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
10853 case PCS_LSTS_SPEED_10:
10854 ifmr->ifm_active |= IFM_10_T; /* XXX */
10855 break;
10856 case PCS_LSTS_SPEED_100:
10857 ifmr->ifm_active |= IFM_100_FX; /* XXX */
10858 break;
10859 case PCS_LSTS_SPEED_1000:
10860 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10861 break;
10862 default:
10863 device_printf(sc->sc_dev, "Unknown speed\n");
10864 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10865 break;
10866 }
10867 }
10868 if ((reg & PCS_LSTS_FDX) != 0)
10869 ifmr->ifm_active |= IFM_FDX;
10870 else
10871 ifmr->ifm_active |= IFM_HDX;
10872 mii->mii_media_active &= ~IFM_ETH_FMASK;
10873 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10874 /* Check flow */
10875 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10876 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10877 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
10878 goto setled;
10879 }
10880 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10881 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10882 DPRINTF(WM_DEBUG_LINK,
10883 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
10884 if ((pcs_adv & TXCW_SYM_PAUSE)
10885 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10886 mii->mii_media_active |= IFM_FLOW
10887 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10888 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10889 && (pcs_adv & TXCW_ASYM_PAUSE)
10890 && (pcs_lpab & TXCW_SYM_PAUSE)
10891 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10892 mii->mii_media_active |= IFM_FLOW
10893 | IFM_ETH_TXPAUSE;
10894 } else if ((pcs_adv & TXCW_SYM_PAUSE)
10895 && (pcs_adv & TXCW_ASYM_PAUSE)
10896 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10897 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10898 mii->mii_media_active |= IFM_FLOW
10899 | IFM_ETH_RXPAUSE;
10900 }
10901 }
10902 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10903 | (mii->mii_media_active & IFM_ETH_FMASK);
10904 setled:
10905 wm_tbi_serdes_set_linkled(sc);
10906 }
10907
10908 /*
10909 * wm_serdes_tick:
10910 *
10911 * Check the link on serdes devices.
10912 */
10913 static void
10914 wm_serdes_tick(struct wm_softc *sc)
10915 {
10916 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10917 struct mii_data *mii = &sc->sc_mii;
10918 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10919 uint32_t reg;
10920
10921 KASSERT(WM_CORE_LOCKED(sc));
10922
10923 mii->mii_media_status = IFM_AVALID;
10924 mii->mii_media_active = IFM_ETHER;
10925
10926 /* Check PCS */
10927 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10928 if ((reg & PCS_LSTS_LINKOK) != 0) {
10929 mii->mii_media_status |= IFM_ACTIVE;
10930 sc->sc_tbi_linkup = 1;
10931 sc->sc_tbi_serdes_ticks = 0;
10932 mii->mii_media_active |= IFM_1000_SX; /* XXX */
10933 if ((reg & PCS_LSTS_FDX) != 0)
10934 mii->mii_media_active |= IFM_FDX;
10935 else
10936 mii->mii_media_active |= IFM_HDX;
10937 } else {
10938 mii->mii_media_status |= IFM_NONE;
10939 sc->sc_tbi_linkup = 0;
10940 /* If the timer expired, retry autonegotiation */
10941 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10942 && (++sc->sc_tbi_serdes_ticks
10943 >= sc->sc_tbi_serdes_anegticks)) {
10944 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
10945 sc->sc_tbi_serdes_ticks = 0;
10946 /* XXX */
10947 wm_serdes_mediachange(ifp);
10948 }
10949 }
10950
10951 wm_tbi_serdes_set_linkled(sc);
10952 }
10953
10954 /* SFP related */
10955
10956 static int
10957 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
10958 {
10959 uint32_t i2ccmd;
10960 int i;
10961
10962 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
10963 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10964
10965 /* Poll the ready bit */
10966 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10967 delay(50);
10968 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10969 if (i2ccmd & I2CCMD_READY)
10970 break;
10971 }
10972 if ((i2ccmd & I2CCMD_READY) == 0)
10973 return -1;
10974 if ((i2ccmd & I2CCMD_ERROR) != 0)
10975 return -1;
10976
10977 *data = i2ccmd & 0x00ff;
10978
10979 return 0;
10980 }
10981
10982 static uint32_t
10983 wm_sfp_get_media_type(struct wm_softc *sc)
10984 {
10985 uint32_t ctrl_ext;
10986 uint8_t val = 0;
10987 int timeout = 3;
10988 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
10989 int rv = -1;
10990
10991 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10992 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
10993 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
10994 CSR_WRITE_FLUSH(sc);
10995
10996 /* Read SFP module data */
10997 while (timeout) {
10998 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
10999 if (rv == 0)
11000 break;
11001 delay(100*1000); /* XXX too big */
11002 timeout--;
11003 }
11004 if (rv != 0)
11005 goto out;
11006 switch (val) {
11007 case SFF_SFP_ID_SFF:
11008 aprint_normal_dev(sc->sc_dev,
11009 "Module/Connector soldered to board\n");
11010 break;
11011 case SFF_SFP_ID_SFP:
11012 aprint_normal_dev(sc->sc_dev, "SFP\n");
11013 break;
11014 case SFF_SFP_ID_UNKNOWN:
11015 goto out;
11016 default:
11017 break;
11018 }
11019
11020 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
11021 if (rv != 0) {
11022 goto out;
11023 }
11024
11025 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
11026 mediatype = WM_MEDIATYPE_SERDES;
11027 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
11028 sc->sc_flags |= WM_F_SGMII;
11029 mediatype = WM_MEDIATYPE_COPPER;
11030 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
11031 sc->sc_flags |= WM_F_SGMII;
11032 mediatype = WM_MEDIATYPE_SERDES;
11033 }
11034
11035 out:
11036 /* Restore I2C interface setting */
11037 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11038
11039 return mediatype;
11040 }
11041
11042 /*
11043 * NVM related.
11044 * Microwire, SPI (w/wo EERD) and Flash.
11045 */
11046
11047 /* Both spi and uwire */
11048
11049 /*
11050 * wm_eeprom_sendbits:
11051 *
11052 * Send a series of bits to the EEPROM.
11053 */
11054 static void
11055 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
11056 {
11057 uint32_t reg;
11058 int x;
11059
11060 reg = CSR_READ(sc, WMREG_EECD);
11061
11062 for (x = nbits; x > 0; x--) {
11063 if (bits & (1U << (x - 1)))
11064 reg |= EECD_DI;
11065 else
11066 reg &= ~EECD_DI;
11067 CSR_WRITE(sc, WMREG_EECD, reg);
11068 CSR_WRITE_FLUSH(sc);
11069 delay(2);
11070 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11071 CSR_WRITE_FLUSH(sc);
11072 delay(2);
11073 CSR_WRITE(sc, WMREG_EECD, reg);
11074 CSR_WRITE_FLUSH(sc);
11075 delay(2);
11076 }
11077 }
11078
11079 /*
11080 * wm_eeprom_recvbits:
11081 *
11082 * Receive a series of bits from the EEPROM.
11083 */
11084 static void
11085 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
11086 {
11087 uint32_t reg, val;
11088 int x;
11089
11090 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
11091
11092 val = 0;
11093 for (x = nbits; x > 0; x--) {
11094 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11095 CSR_WRITE_FLUSH(sc);
11096 delay(2);
11097 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
11098 val |= (1U << (x - 1));
11099 CSR_WRITE(sc, WMREG_EECD, reg);
11100 CSR_WRITE_FLUSH(sc);
11101 delay(2);
11102 }
11103 *valp = val;
11104 }
11105
11106 /* Microwire */
11107
11108 /*
11109 * wm_nvm_read_uwire:
11110 *
11111 * Read a word from the EEPROM using the MicroWire protocol.
11112 */
11113 static int
11114 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11115 {
11116 uint32_t reg, val;
11117 int i;
11118
11119 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11120 device_xname(sc->sc_dev), __func__));
11121
11122 for (i = 0; i < wordcnt; i++) {
11123 /* Clear SK and DI. */
11124 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
11125 CSR_WRITE(sc, WMREG_EECD, reg);
11126
11127 /*
11128 * XXX: workaround for a bug in qemu-0.12.x and prior
11129 * and Xen.
11130 *
11131 * We use this workaround only for 82540 because qemu's
11132 * e1000 act as 82540.
11133 */
11134 if (sc->sc_type == WM_T_82540) {
11135 reg |= EECD_SK;
11136 CSR_WRITE(sc, WMREG_EECD, reg);
11137 reg &= ~EECD_SK;
11138 CSR_WRITE(sc, WMREG_EECD, reg);
11139 CSR_WRITE_FLUSH(sc);
11140 delay(2);
11141 }
11142 /* XXX: end of workaround */
11143
11144 /* Set CHIP SELECT. */
11145 reg |= EECD_CS;
11146 CSR_WRITE(sc, WMREG_EECD, reg);
11147 CSR_WRITE_FLUSH(sc);
11148 delay(2);
11149
11150 /* Shift in the READ command. */
11151 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
11152
11153 /* Shift in address. */
11154 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
11155
11156 /* Shift out the data. */
11157 wm_eeprom_recvbits(sc, &val, 16);
11158 data[i] = val & 0xffff;
11159
11160 /* Clear CHIP SELECT. */
11161 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
11162 CSR_WRITE(sc, WMREG_EECD, reg);
11163 CSR_WRITE_FLUSH(sc);
11164 delay(2);
11165 }
11166
11167 return 0;
11168 }
11169
11170 /* SPI */
11171
11172 /*
11173 * Set SPI and FLASH related information from the EECD register.
11174 * For 82541 and 82547, the word size is taken from EEPROM.
11175 */
11176 static int
11177 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
11178 {
11179 int size;
11180 uint32_t reg;
11181 uint16_t data;
11182
11183 reg = CSR_READ(sc, WMREG_EECD);
11184 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
11185
11186 /* Read the size of NVM from EECD by default */
11187 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11188 switch (sc->sc_type) {
11189 case WM_T_82541:
11190 case WM_T_82541_2:
11191 case WM_T_82547:
11192 case WM_T_82547_2:
11193 /* Set dummy value to access EEPROM */
11194 sc->sc_nvm_wordsize = 64;
11195 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
11196 reg = data;
11197 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11198 if (size == 0)
11199 size = 6; /* 64 word size */
11200 else
11201 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
11202 break;
11203 case WM_T_80003:
11204 case WM_T_82571:
11205 case WM_T_82572:
11206 case WM_T_82573: /* SPI case */
11207 case WM_T_82574: /* SPI case */
11208 case WM_T_82583: /* SPI case */
11209 size += NVM_WORD_SIZE_BASE_SHIFT;
11210 if (size > 14)
11211 size = 14;
11212 break;
11213 case WM_T_82575:
11214 case WM_T_82576:
11215 case WM_T_82580:
11216 case WM_T_I350:
11217 case WM_T_I354:
11218 case WM_T_I210:
11219 case WM_T_I211:
11220 size += NVM_WORD_SIZE_BASE_SHIFT;
11221 if (size > 15)
11222 size = 15;
11223 break;
11224 default:
11225 aprint_error_dev(sc->sc_dev,
11226 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
11227 return -1;
11228 break;
11229 }
11230
11231 sc->sc_nvm_wordsize = 1 << size;
11232
11233 return 0;
11234 }
11235
11236 /*
11237 * wm_nvm_ready_spi:
11238 *
11239 * Wait for a SPI EEPROM to be ready for commands.
11240 */
11241 static int
11242 wm_nvm_ready_spi(struct wm_softc *sc)
11243 {
11244 uint32_t val;
11245 int usec;
11246
11247 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11248 device_xname(sc->sc_dev), __func__));
11249
11250 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
11251 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
11252 wm_eeprom_recvbits(sc, &val, 8);
11253 if ((val & SPI_SR_RDY) == 0)
11254 break;
11255 }
11256 if (usec >= SPI_MAX_RETRIES) {
11257 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
11258 return 1;
11259 }
11260 return 0;
11261 }
11262
11263 /*
11264 * wm_nvm_read_spi:
11265 *
11266 * Read a work from the EEPROM using the SPI protocol.
11267 */
11268 static int
11269 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11270 {
11271 uint32_t reg, val;
11272 int i;
11273 uint8_t opc;
11274
11275 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11276 device_xname(sc->sc_dev), __func__));
11277
11278 /* Clear SK and CS. */
11279 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
11280 CSR_WRITE(sc, WMREG_EECD, reg);
11281 CSR_WRITE_FLUSH(sc);
11282 delay(2);
11283
11284 if (wm_nvm_ready_spi(sc))
11285 return 1;
11286
11287 /* Toggle CS to flush commands. */
11288 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
11289 CSR_WRITE_FLUSH(sc);
11290 delay(2);
11291 CSR_WRITE(sc, WMREG_EECD, reg);
11292 CSR_WRITE_FLUSH(sc);
11293 delay(2);
11294
11295 opc = SPI_OPC_READ;
11296 if (sc->sc_nvm_addrbits == 8 && word >= 128)
11297 opc |= SPI_OPC_A8;
11298
11299 wm_eeprom_sendbits(sc, opc, 8);
11300 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
11301
11302 for (i = 0; i < wordcnt; i++) {
11303 wm_eeprom_recvbits(sc, &val, 16);
11304 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
11305 }
11306
11307 /* Raise CS and clear SK. */
11308 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
11309 CSR_WRITE(sc, WMREG_EECD, reg);
11310 CSR_WRITE_FLUSH(sc);
11311 delay(2);
11312
11313 return 0;
11314 }
11315
11316 /* Using with EERD */
11317
11318 static int
11319 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
11320 {
11321 uint32_t attempts = 100000;
11322 uint32_t i, reg = 0;
11323 int32_t done = -1;
11324
11325 for (i = 0; i < attempts; i++) {
11326 reg = CSR_READ(sc, rw);
11327
11328 if (reg & EERD_DONE) {
11329 done = 0;
11330 break;
11331 }
11332 delay(5);
11333 }
11334
11335 return done;
11336 }
11337
11338 static int
11339 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
11340 uint16_t *data)
11341 {
11342 int i, eerd = 0;
11343 int error = 0;
11344
11345 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11346 device_xname(sc->sc_dev), __func__));
11347
11348 for (i = 0; i < wordcnt; i++) {
11349 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
11350
11351 CSR_WRITE(sc, WMREG_EERD, eerd);
11352 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
11353 if (error != 0)
11354 break;
11355
11356 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
11357 }
11358
11359 return error;
11360 }
11361
11362 /* Flash */
11363
11364 static int
11365 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
11366 {
11367 uint32_t eecd;
11368 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
11369 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
11370 uint8_t sig_byte = 0;
11371
11372 switch (sc->sc_type) {
11373 case WM_T_PCH_SPT:
11374 /*
11375 * In SPT, read from the CTRL_EXT reg instead of accessing the
11376 * sector valid bits from the NVM.
11377 */
11378 *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
11379 if ((*bank == 0) || (*bank == 1)) {
11380 aprint_error_dev(sc->sc_dev,
11381 "%s: no valid NVM bank present (%u)\n", __func__,
11382 *bank);
11383 return -1;
11384 } else {
11385 *bank = *bank - 2;
11386 return 0;
11387 }
11388 case WM_T_ICH8:
11389 case WM_T_ICH9:
11390 eecd = CSR_READ(sc, WMREG_EECD);
11391 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
11392 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
11393 return 0;
11394 }
11395 /* FALLTHROUGH */
11396 default:
11397 /* Default to 0 */
11398 *bank = 0;
11399
11400 /* Check bank 0 */
11401 wm_read_ich8_byte(sc, act_offset, &sig_byte);
11402 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
11403 *bank = 0;
11404 return 0;
11405 }
11406
11407 /* Check bank 1 */
11408 wm_read_ich8_byte(sc, act_offset + bank1_offset,
11409 &sig_byte);
11410 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
11411 *bank = 1;
11412 return 0;
11413 }
11414 }
11415
11416 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
11417 device_xname(sc->sc_dev)));
11418 return -1;
11419 }
11420
11421 /******************************************************************************
11422 * This function does initial flash setup so that a new read/write/erase cycle
11423 * can be started.
11424 *
11425 * sc - The pointer to the hw structure
11426 ****************************************************************************/
11427 static int32_t
11428 wm_ich8_cycle_init(struct wm_softc *sc)
11429 {
11430 uint16_t hsfsts;
11431 int32_t error = 1;
11432 int32_t i = 0;
11433
11434 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11435
11436 /* May be check the Flash Des Valid bit in Hw status */
11437 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
11438 return error;
11439 }
11440
11441 /* Clear FCERR in Hw status by writing 1 */
11442 /* Clear DAEL in Hw status by writing a 1 */
11443 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
11444
11445 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11446
11447 /*
11448 * Either we should have a hardware SPI cycle in progress bit to check
11449 * against, in order to start a new cycle or FDONE bit should be
11450 * changed in the hardware so that it is 1 after harware reset, which
11451 * can then be used as an indication whether a cycle is in progress or
11452 * has been completed .. we should also have some software semaphore
11453 * mechanism to guard FDONE or the cycle in progress bit so that two
11454 * threads access to those bits can be sequentiallized or a way so that
11455 * 2 threads dont start the cycle at the same time
11456 */
11457
11458 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
11459 /*
11460 * There is no cycle running at present, so we can start a
11461 * cycle
11462 */
11463
11464 /* Begin by setting Flash Cycle Done. */
11465 hsfsts |= HSFSTS_DONE;
11466 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11467 error = 0;
11468 } else {
11469 /*
11470 * otherwise poll for sometime so the current cycle has a
11471 * chance to end before giving up.
11472 */
11473 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
11474 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11475 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
11476 error = 0;
11477 break;
11478 }
11479 delay(1);
11480 }
11481 if (error == 0) {
11482 /*
11483 * Successful in waiting for previous cycle to timeout,
11484 * now set the Flash Cycle Done.
11485 */
11486 hsfsts |= HSFSTS_DONE;
11487 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11488 }
11489 }
11490 return error;
11491 }
11492
11493 /******************************************************************************
11494 * This function starts a flash cycle and waits for its completion
11495 *
11496 * sc - The pointer to the hw structure
11497 ****************************************************************************/
11498 static int32_t
11499 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
11500 {
11501 uint16_t hsflctl;
11502 uint16_t hsfsts;
11503 int32_t error = 1;
11504 uint32_t i = 0;
11505
11506 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
11507 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
11508 hsflctl |= HSFCTL_GO;
11509 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
11510
11511 /* Wait till FDONE bit is set to 1 */
11512 do {
11513 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11514 if (hsfsts & HSFSTS_DONE)
11515 break;
11516 delay(1);
11517 i++;
11518 } while (i < timeout);
11519 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
11520 error = 0;
11521
11522 return error;
11523 }
11524
11525 /******************************************************************************
11526 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
11527 *
11528 * sc - The pointer to the hw structure
11529 * index - The index of the byte or word to read.
11530 * size - Size of data to read, 1=byte 2=word, 4=dword
11531 * data - Pointer to the word to store the value read.
11532 *****************************************************************************/
11533 static int32_t
11534 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
11535 uint32_t size, uint32_t *data)
11536 {
11537 uint16_t hsfsts;
11538 uint16_t hsflctl;
11539 uint32_t flash_linear_address;
11540 uint32_t flash_data = 0;
11541 int32_t error = 1;
11542 int32_t count = 0;
11543
11544 if (size < 1 || size > 4 || data == 0x0 ||
11545 index > ICH_FLASH_LINEAR_ADDR_MASK)
11546 return error;
11547
11548 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
11549 sc->sc_ich8_flash_base;
11550
11551 do {
11552 delay(1);
11553 /* Steps */
11554 error = wm_ich8_cycle_init(sc);
11555 if (error)
11556 break;
11557
11558 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
11559 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
11560 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
11561 & HSFCTL_BCOUNT_MASK;
11562 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
11563 if (sc->sc_type == WM_T_PCH_SPT) {
11564 /*
11565 * In SPT, This register is in Lan memory space, not
11566 * flash. Therefore, only 32 bit access is supported.
11567 */
11568 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
11569 (uint32_t)hsflctl);
11570 } else
11571 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
11572
11573 /*
11574 * Write the last 24 bits of index into Flash Linear address
11575 * field in Flash Address
11576 */
11577 /* TODO: TBD maybe check the index against the size of flash */
11578
11579 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
11580
11581 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
11582
11583 /*
11584 * Check if FCERR is set to 1, if set to 1, clear it and try
11585 * the whole sequence a few more times, else read in (shift in)
11586 * the Flash Data0, the order is least significant byte first
11587 * msb to lsb
11588 */
11589 if (error == 0) {
11590 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
11591 if (size == 1)
11592 *data = (uint8_t)(flash_data & 0x000000FF);
11593 else if (size == 2)
11594 *data = (uint16_t)(flash_data & 0x0000FFFF);
11595 else if (size == 4)
11596 *data = (uint32_t)flash_data;
11597 break;
11598 } else {
11599 /*
11600 * If we've gotten here, then things are probably
11601 * completely hosed, but if the error condition is
11602 * detected, it won't hurt to give it another try...
11603 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
11604 */
11605 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11606 if (hsfsts & HSFSTS_ERR) {
11607 /* Repeat for some time before giving up. */
11608 continue;
11609 } else if ((hsfsts & HSFSTS_DONE) == 0)
11610 break;
11611 }
11612 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
11613
11614 return error;
11615 }
11616
11617 /******************************************************************************
11618 * Reads a single byte from the NVM using the ICH8 flash access registers.
11619 *
11620 * sc - pointer to wm_hw structure
11621 * index - The index of the byte to read.
11622 * data - Pointer to a byte to store the value read.
11623 *****************************************************************************/
11624 static int32_t
11625 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
11626 {
11627 int32_t status;
11628 uint32_t word = 0;
11629
11630 status = wm_read_ich8_data(sc, index, 1, &word);
11631 if (status == 0)
11632 *data = (uint8_t)word;
11633 else
11634 *data = 0;
11635
11636 return status;
11637 }
11638
11639 /******************************************************************************
11640 * Reads a word from the NVM using the ICH8 flash access registers.
11641 *
11642 * sc - pointer to wm_hw structure
11643 * index - The starting byte index of the word to read.
11644 * data - Pointer to a word to store the value read.
11645 *****************************************************************************/
11646 static int32_t
11647 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
11648 {
11649 int32_t status;
11650 uint32_t word = 0;
11651
11652 status = wm_read_ich8_data(sc, index, 2, &word);
11653 if (status == 0)
11654 *data = (uint16_t)word;
11655 else
11656 *data = 0;
11657
11658 return status;
11659 }
11660
11661 /******************************************************************************
11662 * Reads a dword from the NVM using the ICH8 flash access registers.
11663 *
11664 * sc - pointer to wm_hw structure
11665 * index - The starting byte index of the word to read.
11666 * data - Pointer to a word to store the value read.
11667 *****************************************************************************/
11668 static int32_t
11669 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
11670 {
11671 int32_t status;
11672
11673 status = wm_read_ich8_data(sc, index, 4, data);
11674 return status;
11675 }
11676
11677 /******************************************************************************
11678 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
11679 * register.
11680 *
11681 * sc - Struct containing variables accessed by shared code
11682 * offset - offset of word in the EEPROM to read
11683 * data - word read from the EEPROM
11684 * words - number of words to read
11685 *****************************************************************************/
11686 static int
11687 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
11688 {
11689 int32_t error = 0;
11690 uint32_t flash_bank = 0;
11691 uint32_t act_offset = 0;
11692 uint32_t bank_offset = 0;
11693 uint16_t word = 0;
11694 uint16_t i = 0;
11695
11696 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11697 device_xname(sc->sc_dev), __func__));
11698
11699 /*
11700 * We need to know which is the valid flash bank. In the event
11701 * that we didn't allocate eeprom_shadow_ram, we may not be
11702 * managing flash_bank. So it cannot be trusted and needs
11703 * to be updated with each read.
11704 */
11705 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
11706 if (error) {
11707 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
11708 device_xname(sc->sc_dev)));
11709 flash_bank = 0;
11710 }
11711
11712 /*
11713 * Adjust offset appropriately if we're on bank 1 - adjust for word
11714 * size
11715 */
11716 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
11717
11718 error = wm_get_swfwhw_semaphore(sc);
11719 if (error) {
11720 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11721 __func__);
11722 return error;
11723 }
11724
11725 for (i = 0; i < words; i++) {
11726 /* The NVM part needs a byte offset, hence * 2 */
11727 act_offset = bank_offset + ((offset + i) * 2);
11728 error = wm_read_ich8_word(sc, act_offset, &word);
11729 if (error) {
11730 aprint_error_dev(sc->sc_dev,
11731 "%s: failed to read NVM\n", __func__);
11732 break;
11733 }
11734 data[i] = word;
11735 }
11736
11737 wm_put_swfwhw_semaphore(sc);
11738 return error;
11739 }
11740
11741 /******************************************************************************
11742 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
11743 * register.
11744 *
11745 * sc - Struct containing variables accessed by shared code
11746 * offset - offset of word in the EEPROM to read
11747 * data - word read from the EEPROM
11748 * words - number of words to read
11749 *****************************************************************************/
11750 static int
11751 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
11752 {
11753 int32_t error = 0;
11754 uint32_t flash_bank = 0;
11755 uint32_t act_offset = 0;
11756 uint32_t bank_offset = 0;
11757 uint32_t dword = 0;
11758 uint16_t i = 0;
11759
11760 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11761 device_xname(sc->sc_dev), __func__));
11762
11763 /*
11764 * We need to know which is the valid flash bank. In the event
11765 * that we didn't allocate eeprom_shadow_ram, we may not be
11766 * managing flash_bank. So it cannot be trusted and needs
11767 * to be updated with each read.
11768 */
11769 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
11770 if (error) {
11771 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
11772 device_xname(sc->sc_dev)));
11773 flash_bank = 0;
11774 }
11775
11776 /*
11777 * Adjust offset appropriately if we're on bank 1 - adjust for word
11778 * size
11779 */
11780 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
11781
11782 error = wm_get_swfwhw_semaphore(sc);
11783 if (error) {
11784 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11785 __func__);
11786 return error;
11787 }
11788
11789 for (i = 0; i < words; i++) {
11790 /* The NVM part needs a byte offset, hence * 2 */
11791 act_offset = bank_offset + ((offset + i) * 2);
11792 /* but we must read dword aligned, so mask ... */
11793 error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
11794 if (error) {
11795 aprint_error_dev(sc->sc_dev,
11796 "%s: failed to read NVM\n", __func__);
11797 break;
11798 }
11799 /* ... and pick out low or high word */
11800 if ((act_offset & 0x2) == 0)
11801 data[i] = (uint16_t)(dword & 0xFFFF);
11802 else
11803 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
11804 }
11805
11806 wm_put_swfwhw_semaphore(sc);
11807 return error;
11808 }
11809
11810 /* iNVM */
11811
11812 static int
11813 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
11814 {
11815 int32_t rv = 0;
11816 uint32_t invm_dword;
11817 uint16_t i;
11818 uint8_t record_type, word_address;
11819
11820 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11821 device_xname(sc->sc_dev), __func__));
11822
11823 for (i = 0; i < INVM_SIZE; i++) {
11824 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
11825 /* Get record type */
11826 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
11827 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
11828 break;
11829 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
11830 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
11831 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
11832 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
11833 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
11834 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
11835 if (word_address == address) {
11836 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
11837 rv = 0;
11838 break;
11839 }
11840 }
11841 }
11842
11843 return rv;
11844 }
11845
11846 static int
11847 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
11848 {
11849 int rv = 0;
11850 int i;
11851
11852 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11853 device_xname(sc->sc_dev), __func__));
11854
11855 for (i = 0; i < words; i++) {
11856 switch (offset + i) {
11857 case NVM_OFF_MACADDR:
11858 case NVM_OFF_MACADDR1:
11859 case NVM_OFF_MACADDR2:
11860 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
11861 if (rv != 0) {
11862 data[i] = 0xffff;
11863 rv = -1;
11864 }
11865 break;
11866 case NVM_OFF_CFG2:
11867 rv = wm_nvm_read_word_invm(sc, offset, data);
11868 if (rv != 0) {
11869 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
11870 rv = 0;
11871 }
11872 break;
11873 case NVM_OFF_CFG4:
11874 rv = wm_nvm_read_word_invm(sc, offset, data);
11875 if (rv != 0) {
11876 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
11877 rv = 0;
11878 }
11879 break;
11880 case NVM_OFF_LED_1_CFG:
11881 rv = wm_nvm_read_word_invm(sc, offset, data);
11882 if (rv != 0) {
11883 *data = NVM_LED_1_CFG_DEFAULT_I211;
11884 rv = 0;
11885 }
11886 break;
11887 case NVM_OFF_LED_0_2_CFG:
11888 rv = wm_nvm_read_word_invm(sc, offset, data);
11889 if (rv != 0) {
11890 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
11891 rv = 0;
11892 }
11893 break;
11894 case NVM_OFF_ID_LED_SETTINGS:
11895 rv = wm_nvm_read_word_invm(sc, offset, data);
11896 if (rv != 0) {
11897 *data = ID_LED_RESERVED_FFFF;
11898 rv = 0;
11899 }
11900 break;
11901 default:
11902 DPRINTF(WM_DEBUG_NVM,
11903 ("NVM word 0x%02x is not mapped.\n", offset));
11904 *data = NVM_RESERVED_WORD;
11905 break;
11906 }
11907 }
11908
11909 return rv;
11910 }
11911
11912 /* Lock, detecting NVM type, validate checksum, version and read */
11913
11914 /*
11915 * wm_nvm_acquire:
11916 *
11917 * Perform the EEPROM handshake required on some chips.
11918 */
11919 static int
11920 wm_nvm_acquire(struct wm_softc *sc)
11921 {
11922 uint32_t reg;
11923 int x;
11924 int ret = 0;
11925
11926 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11927 device_xname(sc->sc_dev), __func__));
11928
11929 if (sc->sc_type >= WM_T_ICH8) {
11930 ret = wm_get_nvm_ich8lan(sc);
11931 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
11932 ret = wm_get_swfwhw_semaphore(sc);
11933 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
11934 /* This will also do wm_get_swsm_semaphore() if needed */
11935 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
11936 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
11937 ret = wm_get_swsm_semaphore(sc);
11938 }
11939
11940 if (ret) {
11941 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11942 __func__);
11943 return 1;
11944 }
11945
11946 if (sc->sc_flags & WM_F_LOCK_EECD) {
11947 reg = CSR_READ(sc, WMREG_EECD);
11948
11949 /* Request EEPROM access. */
11950 reg |= EECD_EE_REQ;
11951 CSR_WRITE(sc, WMREG_EECD, reg);
11952
11953 /* ..and wait for it to be granted. */
11954 for (x = 0; x < 1000; x++) {
11955 reg = CSR_READ(sc, WMREG_EECD);
11956 if (reg & EECD_EE_GNT)
11957 break;
11958 delay(5);
11959 }
11960 if ((reg & EECD_EE_GNT) == 0) {
11961 aprint_error_dev(sc->sc_dev,
11962 "could not acquire EEPROM GNT\n");
11963 reg &= ~EECD_EE_REQ;
11964 CSR_WRITE(sc, WMREG_EECD, reg);
11965 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11966 wm_put_swfwhw_semaphore(sc);
11967 if (sc->sc_flags & WM_F_LOCK_SWFW)
11968 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11969 else if (sc->sc_flags & WM_F_LOCK_SWSM)
11970 wm_put_swsm_semaphore(sc);
11971 return 1;
11972 }
11973 }
11974
11975 return 0;
11976 }
11977
11978 /*
11979 * wm_nvm_release:
11980 *
11981 * Release the EEPROM mutex.
11982 */
11983 static void
11984 wm_nvm_release(struct wm_softc *sc)
11985 {
11986 uint32_t reg;
11987
11988 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11989 device_xname(sc->sc_dev), __func__));
11990
11991 if (sc->sc_flags & WM_F_LOCK_EECD) {
11992 reg = CSR_READ(sc, WMREG_EECD);
11993 reg &= ~EECD_EE_REQ;
11994 CSR_WRITE(sc, WMREG_EECD, reg);
11995 }
11996
11997 if (sc->sc_type >= WM_T_ICH8) {
11998 wm_put_nvm_ich8lan(sc);
11999 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
12000 wm_put_swfwhw_semaphore(sc);
12001 if (sc->sc_flags & WM_F_LOCK_SWFW)
12002 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
12003 else if (sc->sc_flags & WM_F_LOCK_SWSM)
12004 wm_put_swsm_semaphore(sc);
12005 }
12006
12007 static int
12008 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
12009 {
12010 uint32_t eecd = 0;
12011
12012 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
12013 || sc->sc_type == WM_T_82583) {
12014 eecd = CSR_READ(sc, WMREG_EECD);
12015
12016 /* Isolate bits 15 & 16 */
12017 eecd = ((eecd >> 15) & 0x03);
12018
12019 /* If both bits are set, device is Flash type */
12020 if (eecd == 0x03)
12021 return 0;
12022 }
12023 return 1;
12024 }
12025
12026 static int
12027 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
12028 {
12029 uint32_t eec;
12030
12031 eec = CSR_READ(sc, WMREG_EEC);
12032 if ((eec & EEC_FLASH_DETECTED) != 0)
12033 return 1;
12034
12035 return 0;
12036 }
12037
12038 /*
12039 * wm_nvm_validate_checksum
12040 *
12041 * The checksum is defined as the sum of the first 64 (16 bit) words.
12042 */
12043 static int
12044 wm_nvm_validate_checksum(struct wm_softc *sc)
12045 {
12046 uint16_t checksum;
12047 uint16_t eeprom_data;
12048 #ifdef WM_DEBUG
12049 uint16_t csum_wordaddr, valid_checksum;
12050 #endif
12051 int i;
12052
12053 checksum = 0;
12054
12055 /* Don't check for I211 */
12056 if (sc->sc_type == WM_T_I211)
12057 return 0;
12058
12059 #ifdef WM_DEBUG
12060 if (sc->sc_type == WM_T_PCH_LPT) {
12061 csum_wordaddr = NVM_OFF_COMPAT;
12062 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
12063 } else {
12064 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
12065 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
12066 }
12067
12068 /* Dump EEPROM image for debug */
12069 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12070 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12071 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
12072 /* XXX PCH_SPT? */
12073 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
12074 if ((eeprom_data & valid_checksum) == 0) {
12075 DPRINTF(WM_DEBUG_NVM,
12076 ("%s: NVM need to be updated (%04x != %04x)\n",
12077 device_xname(sc->sc_dev), eeprom_data,
12078 valid_checksum));
12079 }
12080 }
12081
12082 if ((wm_debug & WM_DEBUG_NVM) != 0) {
12083 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
12084 for (i = 0; i < NVM_SIZE; i++) {
12085 if (wm_nvm_read(sc, i, 1, &eeprom_data))
12086 printf("XXXX ");
12087 else
12088 printf("%04hx ", eeprom_data);
12089 if (i % 8 == 7)
12090 printf("\n");
12091 }
12092 }
12093
12094 #endif /* WM_DEBUG */
12095
12096 for (i = 0; i < NVM_SIZE; i++) {
12097 if (wm_nvm_read(sc, i, 1, &eeprom_data))
12098 return 1;
12099 checksum += eeprom_data;
12100 }
12101
12102 if (checksum != (uint16_t) NVM_CHECKSUM) {
12103 #ifdef WM_DEBUG
12104 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
12105 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
12106 #endif
12107 }
12108
12109 return 0;
12110 }
12111
12112 static void
12113 wm_nvm_version_invm(struct wm_softc *sc)
12114 {
12115 uint32_t dword;
12116
12117 /*
12118 * Linux's code to decode version is very strange, so we don't
12119 * obey that algorithm and just use word 61 as the document.
12120 * Perhaps it's not perfect though...
12121 *
12122 * Example:
12123 *
12124 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
12125 */
12126 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
12127 dword = __SHIFTOUT(dword, INVM_VER_1);
12128 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
12129 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
12130 }
12131
12132 static void
12133 wm_nvm_version(struct wm_softc *sc)
12134 {
12135 uint16_t major, minor, build, patch;
12136 uint16_t uid0, uid1;
12137 uint16_t nvm_data;
12138 uint16_t off;
12139 bool check_version = false;
12140 bool check_optionrom = false;
12141 bool have_build = false;
12142
12143 /*
12144 * Version format:
12145 *
12146 * XYYZ
12147 * X0YZ
12148 * X0YY
12149 *
12150 * Example:
12151 *
12152 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
12153 * 82571 0x50a6 5.10.6?
12154 * 82572 0x506a 5.6.10?
12155 * 82572EI 0x5069 5.6.9?
12156 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
12157 * 0x2013 2.1.3?
12158 * 82583 0x10a0 1.10.0? (document says it's default vaule)
12159 */
12160 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
12161 switch (sc->sc_type) {
12162 case WM_T_82571:
12163 case WM_T_82572:
12164 case WM_T_82574:
12165 case WM_T_82583:
12166 check_version = true;
12167 check_optionrom = true;
12168 have_build = true;
12169 break;
12170 case WM_T_82575:
12171 case WM_T_82576:
12172 case WM_T_82580:
12173 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
12174 check_version = true;
12175 break;
12176 case WM_T_I211:
12177 wm_nvm_version_invm(sc);
12178 goto printver;
12179 case WM_T_I210:
12180 if (!wm_nvm_get_flash_presence_i210(sc)) {
12181 wm_nvm_version_invm(sc);
12182 goto printver;
12183 }
12184 /* FALLTHROUGH */
12185 case WM_T_I350:
12186 case WM_T_I354:
12187 check_version = true;
12188 check_optionrom = true;
12189 break;
12190 default:
12191 return;
12192 }
12193 if (check_version) {
12194 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
12195 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
12196 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
12197 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
12198 build = nvm_data & NVM_BUILD_MASK;
12199 have_build = true;
12200 } else
12201 minor = nvm_data & 0x00ff;
12202
12203 /* Decimal */
12204 minor = (minor / 16) * 10 + (minor % 16);
12205 sc->sc_nvm_ver_major = major;
12206 sc->sc_nvm_ver_minor = minor;
12207
12208 printver:
12209 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
12210 sc->sc_nvm_ver_minor);
12211 if (have_build) {
12212 sc->sc_nvm_ver_build = build;
12213 aprint_verbose(".%d", build);
12214 }
12215 }
12216 if (check_optionrom) {
12217 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
12218 /* Option ROM Version */
12219 if ((off != 0x0000) && (off != 0xffff)) {
12220 off += NVM_COMBO_VER_OFF;
12221 wm_nvm_read(sc, off + 1, 1, &uid1);
12222 wm_nvm_read(sc, off, 1, &uid0);
12223 if ((uid0 != 0) && (uid0 != 0xffff)
12224 && (uid1 != 0) && (uid1 != 0xffff)) {
12225 /* 16bits */
12226 major = uid0 >> 8;
12227 build = (uid0 << 8) | (uid1 >> 8);
12228 patch = uid1 & 0x00ff;
12229 aprint_verbose(", option ROM Version %d.%d.%d",
12230 major, build, patch);
12231 }
12232 }
12233 }
12234
12235 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
12236 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
12237 }
12238
12239 /*
12240 * wm_nvm_read:
12241 *
12242 * Read data from the serial EEPROM.
12243 */
12244 static int
12245 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12246 {
12247 int rv;
12248
12249 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12250 device_xname(sc->sc_dev), __func__));
12251
12252 if (sc->sc_flags & WM_F_EEPROM_INVALID)
12253 return 1;
12254
12255 if (wm_nvm_acquire(sc))
12256 return 1;
12257
12258 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12259 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12260 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
12261 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
12262 else if (sc->sc_type == WM_T_PCH_SPT)
12263 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
12264 else if (sc->sc_flags & WM_F_EEPROM_INVM)
12265 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
12266 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
12267 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
12268 else if (sc->sc_flags & WM_F_EEPROM_SPI)
12269 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
12270 else
12271 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
12272
12273 wm_nvm_release(sc);
12274 return rv;
12275 }
12276
12277 /*
12278 * Hardware semaphores.
12279 * Very complexed...
12280 */
12281
12282 static int
12283 wm_get_null(struct wm_softc *sc)
12284 {
12285
12286 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12287 device_xname(sc->sc_dev), __func__));
12288 return 0;
12289 }
12290
12291 static void
12292 wm_put_null(struct wm_softc *sc)
12293 {
12294
12295 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12296 device_xname(sc->sc_dev), __func__));
12297 return;
12298 }
12299
12300 /*
12301 * Get hardware semaphore.
12302 * Same as e1000_get_hw_semaphore_generic()
12303 */
12304 static int
12305 wm_get_swsm_semaphore(struct wm_softc *sc)
12306 {
12307 int32_t timeout;
12308 uint32_t swsm;
12309
12310 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12311 device_xname(sc->sc_dev), __func__));
12312 KASSERT(sc->sc_nvm_wordsize > 0);
12313
12314 /* Get the SW semaphore. */
12315 timeout = sc->sc_nvm_wordsize + 1;
12316 while (timeout) {
12317 swsm = CSR_READ(sc, WMREG_SWSM);
12318
12319 if ((swsm & SWSM_SMBI) == 0)
12320 break;
12321
12322 delay(50);
12323 timeout--;
12324 }
12325
12326 if (timeout == 0) {
12327 aprint_error_dev(sc->sc_dev,
12328 "could not acquire SWSM SMBI\n");
12329 return 1;
12330 }
12331
12332 /* Get the FW semaphore. */
12333 timeout = sc->sc_nvm_wordsize + 1;
12334 while (timeout) {
12335 swsm = CSR_READ(sc, WMREG_SWSM);
12336 swsm |= SWSM_SWESMBI;
12337 CSR_WRITE(sc, WMREG_SWSM, swsm);
12338 /* If we managed to set the bit we got the semaphore. */
12339 swsm = CSR_READ(sc, WMREG_SWSM);
12340 if (swsm & SWSM_SWESMBI)
12341 break;
12342
12343 delay(50);
12344 timeout--;
12345 }
12346
12347 if (timeout == 0) {
12348 aprint_error_dev(sc->sc_dev,
12349 "could not acquire SWSM SWESMBI\n");
12350 /* Release semaphores */
12351 wm_put_swsm_semaphore(sc);
12352 return 1;
12353 }
12354 return 0;
12355 }
12356
12357 /*
12358 * Put hardware semaphore.
12359 * Same as e1000_put_hw_semaphore_generic()
12360 */
12361 static void
12362 wm_put_swsm_semaphore(struct wm_softc *sc)
12363 {
12364 uint32_t swsm;
12365
12366 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12367 device_xname(sc->sc_dev), __func__));
12368
12369 swsm = CSR_READ(sc, WMREG_SWSM);
12370 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
12371 CSR_WRITE(sc, WMREG_SWSM, swsm);
12372 }
12373
12374 /*
12375 * Get SW/FW semaphore.
12376 * Same as e1000_acquire_swfw_sync_82575().
12377 */
12378 static int
12379 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12380 {
12381 uint32_t swfw_sync;
12382 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
12383 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
12384 int timeout = 200;
12385
12386 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12387 device_xname(sc->sc_dev), __func__));
12388 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
12389
12390 for (timeout = 0; timeout < 200; timeout++) {
12391 if (sc->sc_flags & WM_F_LOCK_SWSM) {
12392 if (wm_get_swsm_semaphore(sc)) {
12393 aprint_error_dev(sc->sc_dev,
12394 "%s: failed to get semaphore\n",
12395 __func__);
12396 return 1;
12397 }
12398 }
12399 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12400 if ((swfw_sync & (swmask | fwmask)) == 0) {
12401 swfw_sync |= swmask;
12402 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12403 if (sc->sc_flags & WM_F_LOCK_SWSM)
12404 wm_put_swsm_semaphore(sc);
12405 return 0;
12406 }
12407 if (sc->sc_flags & WM_F_LOCK_SWSM)
12408 wm_put_swsm_semaphore(sc);
12409 delay(5000);
12410 }
12411 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
12412 device_xname(sc->sc_dev), mask, swfw_sync);
12413 return 1;
12414 }
12415
12416 static void
12417 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12418 {
12419 uint32_t swfw_sync;
12420
12421 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12422 device_xname(sc->sc_dev), __func__));
12423 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
12424
12425 if (sc->sc_flags & WM_F_LOCK_SWSM) {
12426 while (wm_get_swsm_semaphore(sc) != 0)
12427 continue;
12428 }
12429 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12430 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
12431 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12432 if (sc->sc_flags & WM_F_LOCK_SWSM)
12433 wm_put_swsm_semaphore(sc);
12434 }
12435
12436 static int
12437 wm_get_phy_82575(struct wm_softc *sc)
12438 {
12439
12440 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12441 device_xname(sc->sc_dev), __func__));
12442 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12443 }
12444
12445 static void
12446 wm_put_phy_82575(struct wm_softc *sc)
12447 {
12448
12449 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12450 device_xname(sc->sc_dev), __func__));
12451 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12452 }
12453
12454 static int
12455 wm_get_swfwhw_semaphore(struct wm_softc *sc)
12456 {
12457 uint32_t ext_ctrl;
12458 int timeout = 200;
12459
12460 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12461 device_xname(sc->sc_dev), __func__));
12462
12463 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12464 for (timeout = 0; timeout < 200; timeout++) {
12465 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12466 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
12467 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12468
12469 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12470 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
12471 return 0;
12472 delay(5000);
12473 }
12474 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
12475 device_xname(sc->sc_dev), ext_ctrl);
12476 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12477 return 1;
12478 }
12479
12480 static void
12481 wm_put_swfwhw_semaphore(struct wm_softc *sc)
12482 {
12483 uint32_t ext_ctrl;
12484
12485 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12486 device_xname(sc->sc_dev), __func__));
12487
12488 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12489 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12490 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12491
12492 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12493 }
12494
12495 static int
12496 wm_get_swflag_ich8lan(struct wm_softc *sc)
12497 {
12498 uint32_t ext_ctrl;
12499 int timeout;
12500
12501 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12502 device_xname(sc->sc_dev), __func__));
12503 mutex_enter(sc->sc_ich_phymtx);
12504 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
12505 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12506 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
12507 break;
12508 delay(1000);
12509 }
12510 if (timeout >= WM_PHY_CFG_TIMEOUT) {
12511 printf("%s: SW has already locked the resource\n",
12512 device_xname(sc->sc_dev));
12513 goto out;
12514 }
12515
12516 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
12517 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12518 for (timeout = 0; timeout < 1000; timeout++) {
12519 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12520 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
12521 break;
12522 delay(1000);
12523 }
12524 if (timeout >= 1000) {
12525 printf("%s: failed to acquire semaphore\n",
12526 device_xname(sc->sc_dev));
12527 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12528 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12529 goto out;
12530 }
12531 return 0;
12532
12533 out:
12534 mutex_exit(sc->sc_ich_phymtx);
12535 return 1;
12536 }
12537
12538 static void
12539 wm_put_swflag_ich8lan(struct wm_softc *sc)
12540 {
12541 uint32_t ext_ctrl;
12542
12543 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12544 device_xname(sc->sc_dev), __func__));
12545 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12546 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
12547 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12548 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12549 } else {
12550 printf("%s: Semaphore unexpectedly released\n",
12551 device_xname(sc->sc_dev));
12552 }
12553
12554 mutex_exit(sc->sc_ich_phymtx);
12555 }
12556
12557 static int
12558 wm_get_nvm_ich8lan(struct wm_softc *sc)
12559 {
12560
12561 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12562 device_xname(sc->sc_dev), __func__));
12563 mutex_enter(sc->sc_ich_nvmmtx);
12564
12565 return 0;
12566 }
12567
12568 static void
12569 wm_put_nvm_ich8lan(struct wm_softc *sc)
12570 {
12571
12572 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12573 device_xname(sc->sc_dev), __func__));
12574 mutex_exit(sc->sc_ich_nvmmtx);
12575 }
12576
12577 static int
12578 wm_get_hw_semaphore_82573(struct wm_softc *sc)
12579 {
12580 int i = 0;
12581 uint32_t reg;
12582
12583 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12584 device_xname(sc->sc_dev), __func__));
12585
12586 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12587 do {
12588 CSR_WRITE(sc, WMREG_EXTCNFCTR,
12589 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
12590 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12591 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
12592 break;
12593 delay(2*1000);
12594 i++;
12595 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
12596
12597 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
12598 wm_put_hw_semaphore_82573(sc);
12599 log(LOG_ERR, "%s: Driver can't access the PHY\n",
12600 device_xname(sc->sc_dev));
12601 return -1;
12602 }
12603
12604 return 0;
12605 }
12606
12607 static void
12608 wm_put_hw_semaphore_82573(struct wm_softc *sc)
12609 {
12610 uint32_t reg;
12611
12612 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12613 device_xname(sc->sc_dev), __func__));
12614
12615 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12616 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12617 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12618 }
12619
12620 /*
12621 * Management mode and power management related subroutines.
12622 * BMC, AMT, suspend/resume and EEE.
12623 */
12624
12625 #ifdef WM_WOL
12626 static int
12627 wm_check_mng_mode(struct wm_softc *sc)
12628 {
12629 int rv;
12630
12631 switch (sc->sc_type) {
12632 case WM_T_ICH8:
12633 case WM_T_ICH9:
12634 case WM_T_ICH10:
12635 case WM_T_PCH:
12636 case WM_T_PCH2:
12637 case WM_T_PCH_LPT:
12638 case WM_T_PCH_SPT:
12639 rv = wm_check_mng_mode_ich8lan(sc);
12640 break;
12641 case WM_T_82574:
12642 case WM_T_82583:
12643 rv = wm_check_mng_mode_82574(sc);
12644 break;
12645 case WM_T_82571:
12646 case WM_T_82572:
12647 case WM_T_82573:
12648 case WM_T_80003:
12649 rv = wm_check_mng_mode_generic(sc);
12650 break;
12651 default:
12652 /* noting to do */
12653 rv = 0;
12654 break;
12655 }
12656
12657 return rv;
12658 }
12659
12660 static int
12661 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
12662 {
12663 uint32_t fwsm;
12664
12665 fwsm = CSR_READ(sc, WMREG_FWSM);
12666
12667 if (((fwsm & FWSM_FW_VALID) != 0)
12668 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
12669 return 1;
12670
12671 return 0;
12672 }
12673
12674 static int
12675 wm_check_mng_mode_82574(struct wm_softc *sc)
12676 {
12677 uint16_t data;
12678
12679 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
12680
12681 if ((data & NVM_CFG2_MNGM_MASK) != 0)
12682 return 1;
12683
12684 return 0;
12685 }
12686
12687 static int
12688 wm_check_mng_mode_generic(struct wm_softc *sc)
12689 {
12690 uint32_t fwsm;
12691
12692 fwsm = CSR_READ(sc, WMREG_FWSM);
12693
12694 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
12695 return 1;
12696
12697 return 0;
12698 }
12699 #endif /* WM_WOL */
12700
12701 static int
12702 wm_enable_mng_pass_thru(struct wm_softc *sc)
12703 {
12704 uint32_t manc, fwsm, factps;
12705
12706 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
12707 return 0;
12708
12709 manc = CSR_READ(sc, WMREG_MANC);
12710
12711 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
12712 device_xname(sc->sc_dev), manc));
12713 if ((manc & MANC_RECV_TCO_EN) == 0)
12714 return 0;
12715
12716 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
12717 fwsm = CSR_READ(sc, WMREG_FWSM);
12718 factps = CSR_READ(sc, WMREG_FACTPS);
12719 if (((factps & FACTPS_MNGCG) == 0)
12720 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
12721 return 1;
12722 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
12723 uint16_t data;
12724
12725 factps = CSR_READ(sc, WMREG_FACTPS);
12726 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
12727 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
12728 device_xname(sc->sc_dev), factps, data));
12729 if (((factps & FACTPS_MNGCG) == 0)
12730 && ((data & NVM_CFG2_MNGM_MASK)
12731 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
12732 return 1;
12733 } else if (((manc & MANC_SMBUS_EN) != 0)
12734 && ((manc & MANC_ASF_EN) == 0))
12735 return 1;
12736
12737 return 0;
12738 }
12739
12740 static bool
12741 wm_phy_resetisblocked(struct wm_softc *sc)
12742 {
12743 bool blocked = false;
12744 uint32_t reg;
12745 int i = 0;
12746
12747 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12748 device_xname(sc->sc_dev), __func__));
12749
12750 switch (sc->sc_type) {
12751 case WM_T_ICH8:
12752 case WM_T_ICH9:
12753 case WM_T_ICH10:
12754 case WM_T_PCH:
12755 case WM_T_PCH2:
12756 case WM_T_PCH_LPT:
12757 case WM_T_PCH_SPT:
12758 do {
12759 reg = CSR_READ(sc, WMREG_FWSM);
12760 if ((reg & FWSM_RSPCIPHY) == 0) {
12761 blocked = true;
12762 delay(10*1000);
12763 continue;
12764 }
12765 blocked = false;
12766 } while (blocked && (i++ < 30));
12767 return blocked;
12768 break;
12769 case WM_T_82571:
12770 case WM_T_82572:
12771 case WM_T_82573:
12772 case WM_T_82574:
12773 case WM_T_82583:
12774 case WM_T_80003:
12775 reg = CSR_READ(sc, WMREG_MANC);
12776 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
12777 return true;
12778 else
12779 return false;
12780 break;
12781 default:
12782 /* no problem */
12783 break;
12784 }
12785
12786 return false;
12787 }
12788
12789 static void
12790 wm_get_hw_control(struct wm_softc *sc)
12791 {
12792 uint32_t reg;
12793
12794 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12795 device_xname(sc->sc_dev), __func__));
12796
12797 if (sc->sc_type == WM_T_82573) {
12798 reg = CSR_READ(sc, WMREG_SWSM);
12799 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
12800 } else if (sc->sc_type >= WM_T_82571) {
12801 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12802 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
12803 }
12804 }
12805
12806 static void
12807 wm_release_hw_control(struct wm_softc *sc)
12808 {
12809 uint32_t reg;
12810
12811 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12812 device_xname(sc->sc_dev), __func__));
12813
12814 if (sc->sc_type == WM_T_82573) {
12815 reg = CSR_READ(sc, WMREG_SWSM);
12816 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
12817 } else if (sc->sc_type >= WM_T_82571) {
12818 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12819 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
12820 }
12821 }
12822
12823 static void
12824 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
12825 {
12826 uint32_t reg;
12827
12828 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12829 device_xname(sc->sc_dev), __func__));
12830
12831 if (sc->sc_type < WM_T_PCH2)
12832 return;
12833
12834 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12835
12836 if (gate)
12837 reg |= EXTCNFCTR_GATE_PHY_CFG;
12838 else
12839 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
12840
12841 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12842 }
12843
12844 static void
12845 wm_smbustopci(struct wm_softc *sc)
12846 {
12847 uint32_t fwsm, reg;
12848 int rv = 0;
12849
12850 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12851 device_xname(sc->sc_dev), __func__));
12852
12853 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
12854 wm_gate_hw_phy_config_ich8lan(sc, true);
12855
12856 /* Disable ULP */
12857 wm_ulp_disable(sc);
12858
12859 /* Acquire PHY semaphore */
12860 sc->phy.acquire(sc);
12861
12862 fwsm = CSR_READ(sc, WMREG_FWSM);
12863 switch (sc->sc_type) {
12864 case WM_T_PCH_LPT:
12865 case WM_T_PCH_SPT:
12866 if (wm_phy_is_accessible_pchlan(sc))
12867 break;
12868
12869 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12870 reg |= CTRL_EXT_FORCE_SMBUS;
12871 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12872 #if 0
12873 /* XXX Isn't this required??? */
12874 CSR_WRITE_FLUSH(sc);
12875 #endif
12876 delay(50 * 1000);
12877 /* FALLTHROUGH */
12878 case WM_T_PCH2:
12879 if (wm_phy_is_accessible_pchlan(sc) == true)
12880 break;
12881 /* FALLTHROUGH */
12882 case WM_T_PCH:
12883 if (sc->sc_type == WM_T_PCH)
12884 if ((fwsm & FWSM_FW_VALID) != 0)
12885 break;
12886
12887 if (wm_phy_resetisblocked(sc) == true) {
12888 printf("XXX reset is blocked(3)\n");
12889 break;
12890 }
12891
12892 wm_toggle_lanphypc_pch_lpt(sc);
12893
12894 if (sc->sc_type >= WM_T_PCH_LPT) {
12895 if (wm_phy_is_accessible_pchlan(sc) == true)
12896 break;
12897
12898 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12899 reg &= ~CTRL_EXT_FORCE_SMBUS;
12900 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12901
12902 if (wm_phy_is_accessible_pchlan(sc) == true)
12903 break;
12904 rv = -1;
12905 }
12906 break;
12907 default:
12908 break;
12909 }
12910
12911 /* Release semaphore */
12912 sc->phy.release(sc);
12913
12914 if (rv == 0) {
12915 if (wm_phy_resetisblocked(sc)) {
12916 printf("XXX reset is blocked(4)\n");
12917 goto out;
12918 }
12919 wm_reset_phy(sc);
12920 if (wm_phy_resetisblocked(sc))
12921 printf("XXX reset is blocked(4)\n");
12922 }
12923
12924 out:
12925 /*
12926 * Ungate automatic PHY configuration by hardware on non-managed 82579
12927 */
12928 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
12929 delay(10*1000);
12930 wm_gate_hw_phy_config_ich8lan(sc, false);
12931 }
12932 }
12933
12934 static void
12935 wm_init_manageability(struct wm_softc *sc)
12936 {
12937
12938 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12939 device_xname(sc->sc_dev), __func__));
12940 if (sc->sc_flags & WM_F_HAS_MANAGE) {
12941 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
12942 uint32_t manc = CSR_READ(sc, WMREG_MANC);
12943
12944 /* Disable hardware interception of ARP */
12945 manc &= ~MANC_ARP_EN;
12946
12947 /* Enable receiving management packets to the host */
12948 if (sc->sc_type >= WM_T_82571) {
12949 manc |= MANC_EN_MNG2HOST;
12950 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
12951 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
12952 }
12953
12954 CSR_WRITE(sc, WMREG_MANC, manc);
12955 }
12956 }
12957
12958 static void
12959 wm_release_manageability(struct wm_softc *sc)
12960 {
12961
12962 if (sc->sc_flags & WM_F_HAS_MANAGE) {
12963 uint32_t manc = CSR_READ(sc, WMREG_MANC);
12964
12965 manc |= MANC_ARP_EN;
12966 if (sc->sc_type >= WM_T_82571)
12967 manc &= ~MANC_EN_MNG2HOST;
12968
12969 CSR_WRITE(sc, WMREG_MANC, manc);
12970 }
12971 }
12972
12973 static void
12974 wm_get_wakeup(struct wm_softc *sc)
12975 {
12976
12977 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
12978 switch (sc->sc_type) {
12979 case WM_T_82573:
12980 case WM_T_82583:
12981 sc->sc_flags |= WM_F_HAS_AMT;
12982 /* FALLTHROUGH */
12983 case WM_T_80003:
12984 case WM_T_82575:
12985 case WM_T_82576:
12986 case WM_T_82580:
12987 case WM_T_I350:
12988 case WM_T_I354:
12989 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
12990 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
12991 /* FALLTHROUGH */
12992 case WM_T_82541:
12993 case WM_T_82541_2:
12994 case WM_T_82547:
12995 case WM_T_82547_2:
12996 case WM_T_82571:
12997 case WM_T_82572:
12998 case WM_T_82574:
12999 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13000 break;
13001 case WM_T_ICH8:
13002 case WM_T_ICH9:
13003 case WM_T_ICH10:
13004 case WM_T_PCH:
13005 case WM_T_PCH2:
13006 case WM_T_PCH_LPT:
13007 case WM_T_PCH_SPT:
13008 sc->sc_flags |= WM_F_HAS_AMT;
13009 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13010 break;
13011 default:
13012 break;
13013 }
13014
13015 /* 1: HAS_MANAGE */
13016 if (wm_enable_mng_pass_thru(sc) != 0)
13017 sc->sc_flags |= WM_F_HAS_MANAGE;
13018
13019 #ifdef WM_DEBUG
13020 printf("\n");
13021 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
13022 printf("HAS_AMT,");
13023 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
13024 printf("ARC_SUBSYS_VALID,");
13025 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
13026 printf("ASF_FIRMWARE_PRES,");
13027 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
13028 printf("HAS_MANAGE,");
13029 printf("\n");
13030 #endif
13031 /*
13032 * Note that the WOL flags is set after the resetting of the eeprom
13033 * stuff
13034 */
13035 }
13036
13037 /*
13038 * Unconfigure Ultra Low Power mode.
13039 * Only for I217 and newer (see below).
13040 */
13041 static void
13042 wm_ulp_disable(struct wm_softc *sc)
13043 {
13044 uint32_t reg;
13045 int i = 0;
13046
13047 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13048 device_xname(sc->sc_dev), __func__));
13049 /* Exclude old devices */
13050 if ((sc->sc_type < WM_T_PCH_LPT)
13051 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
13052 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
13053 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
13054 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
13055 return;
13056
13057 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
13058 /* Request ME un-configure ULP mode in the PHY */
13059 reg = CSR_READ(sc, WMREG_H2ME);
13060 reg &= ~H2ME_ULP;
13061 reg |= H2ME_ENFORCE_SETTINGS;
13062 CSR_WRITE(sc, WMREG_H2ME, reg);
13063
13064 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
13065 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
13066 if (i++ == 30) {
13067 printf("%s timed out\n", __func__);
13068 return;
13069 }
13070 delay(10 * 1000);
13071 }
13072 reg = CSR_READ(sc, WMREG_H2ME);
13073 reg &= ~H2ME_ENFORCE_SETTINGS;
13074 CSR_WRITE(sc, WMREG_H2ME, reg);
13075
13076 return;
13077 }
13078
13079 /* Acquire semaphore */
13080 sc->phy.acquire(sc);
13081
13082 /* Toggle LANPHYPC */
13083 wm_toggle_lanphypc_pch_lpt(sc);
13084
13085 /* Unforce SMBus mode in PHY */
13086 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13087 if (reg == 0x0000 || reg == 0xffff) {
13088 uint32_t reg2;
13089
13090 printf("%s: Force SMBus first.\n", __func__);
13091 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
13092 reg2 |= CTRL_EXT_FORCE_SMBUS;
13093 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
13094 delay(50 * 1000);
13095
13096 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13097 }
13098 reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13099 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
13100
13101 /* Unforce SMBus mode in MAC */
13102 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13103 reg &= ~CTRL_EXT_FORCE_SMBUS;
13104 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13105
13106 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
13107 reg |= HV_PM_CTRL_K1_ENA;
13108 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
13109
13110 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
13111 reg &= ~(I218_ULP_CONFIG1_IND
13112 | I218_ULP_CONFIG1_STICKY_ULP
13113 | I218_ULP_CONFIG1_RESET_TO_SMBUS
13114 | I218_ULP_CONFIG1_WOL_HOST
13115 | I218_ULP_CONFIG1_INBAND_EXIT
13116 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
13117 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
13118 | I218_ULP_CONFIG1_DIS_SMB_PERST);
13119 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13120 reg |= I218_ULP_CONFIG1_START;
13121 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13122
13123 reg = CSR_READ(sc, WMREG_FEXTNVM7);
13124 reg &= ~FEXTNVM7_DIS_SMB_PERST;
13125 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
13126
13127 /* Release semaphore */
13128 sc->phy.release(sc);
13129 wm_gmii_reset(sc);
13130 delay(50 * 1000);
13131 }
13132
13133 /* WOL in the newer chipset interfaces (pchlan) */
13134 static void
13135 wm_enable_phy_wakeup(struct wm_softc *sc)
13136 {
13137 #if 0
13138 uint16_t preg;
13139
13140 /* Copy MAC RARs to PHY RARs */
13141
13142 /* Copy MAC MTA to PHY MTA */
13143
13144 /* Configure PHY Rx Control register */
13145
13146 /* Enable PHY wakeup in MAC register */
13147
13148 /* Configure and enable PHY wakeup in PHY registers */
13149
13150 /* Activate PHY wakeup */
13151
13152 /* XXX */
13153 #endif
13154 }
13155
13156 /* Power down workaround on D3 */
13157 static void
13158 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
13159 {
13160 uint32_t reg;
13161 int i;
13162
13163 for (i = 0; i < 2; i++) {
13164 /* Disable link */
13165 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13166 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13167 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13168
13169 /*
13170 * Call gig speed drop workaround on Gig disable before
13171 * accessing any PHY registers
13172 */
13173 if (sc->sc_type == WM_T_ICH8)
13174 wm_gig_downshift_workaround_ich8lan(sc);
13175
13176 /* Write VR power-down enable */
13177 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13178 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13179 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
13180 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
13181
13182 /* Read it back and test */
13183 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13184 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13185 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
13186 break;
13187
13188 /* Issue PHY reset and repeat at most one more time */
13189 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
13190 }
13191 }
13192
13193 static void
13194 wm_enable_wakeup(struct wm_softc *sc)
13195 {
13196 uint32_t reg, pmreg;
13197 pcireg_t pmode;
13198
13199 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13200 device_xname(sc->sc_dev), __func__));
13201
13202 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13203 &pmreg, NULL) == 0)
13204 return;
13205
13206 /* Advertise the wakeup capability */
13207 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
13208 | CTRL_SWDPIN(3));
13209 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
13210
13211 /* ICH workaround */
13212 switch (sc->sc_type) {
13213 case WM_T_ICH8:
13214 case WM_T_ICH9:
13215 case WM_T_ICH10:
13216 case WM_T_PCH:
13217 case WM_T_PCH2:
13218 case WM_T_PCH_LPT:
13219 case WM_T_PCH_SPT:
13220 /* Disable gig during WOL */
13221 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13222 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
13223 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13224 if (sc->sc_type == WM_T_PCH)
13225 wm_gmii_reset(sc);
13226
13227 /* Power down workaround */
13228 if (sc->sc_phytype == WMPHY_82577) {
13229 struct mii_softc *child;
13230
13231 /* Assume that the PHY is copper */
13232 child = LIST_FIRST(&sc->sc_mii.mii_phys);
13233 if ((child != NULL) && (child->mii_mpd_rev <= 2))
13234 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
13235 (768 << 5) | 25, 0x0444); /* magic num */
13236 }
13237 break;
13238 default:
13239 break;
13240 }
13241
13242 /* Keep the laser running on fiber adapters */
13243 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
13244 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13245 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13246 reg |= CTRL_EXT_SWDPIN(3);
13247 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13248 }
13249
13250 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
13251 #if 0 /* for the multicast packet */
13252 reg |= WUFC_MC;
13253 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
13254 #endif
13255
13256 if (sc->sc_type >= WM_T_PCH)
13257 wm_enable_phy_wakeup(sc);
13258 else {
13259 CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
13260 CSR_WRITE(sc, WMREG_WUFC, reg);
13261 }
13262
13263 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13264 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13265 || (sc->sc_type == WM_T_PCH2))
13266 && (sc->sc_phytype == WMPHY_IGP_3))
13267 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
13268
13269 /* Request PME */
13270 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
13271 #if 0
13272 /* Disable WOL */
13273 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
13274 #else
13275 /* For WOL */
13276 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
13277 #endif
13278 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
13279 }
13280
13281 /* LPLU */
13282
13283 static void
13284 wm_lplu_d0_disable(struct wm_softc *sc)
13285 {
13286 uint32_t reg;
13287
13288 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13289 device_xname(sc->sc_dev), __func__));
13290
13291 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13292 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
13293 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13294 }
13295
13296 static void
13297 wm_lplu_d0_disable_pch(struct wm_softc *sc)
13298 {
13299 uint32_t reg;
13300
13301 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13302 device_xname(sc->sc_dev), __func__));
13303
13304 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
13305 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
13306 reg |= HV_OEM_BITS_ANEGNOW;
13307 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
13308 }
13309
13310 /* EEE */
13311
13312 static void
13313 wm_set_eee_i350(struct wm_softc *sc)
13314 {
13315 uint32_t ipcnfg, eeer;
13316
13317 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
13318 eeer = CSR_READ(sc, WMREG_EEER);
13319
13320 if ((sc->sc_flags & WM_F_EEE) != 0) {
13321 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13322 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
13323 | EEER_LPI_FC);
13324 } else {
13325 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13326 ipcnfg &= ~IPCNFG_10BASE_TE;
13327 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
13328 | EEER_LPI_FC);
13329 }
13330
13331 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
13332 CSR_WRITE(sc, WMREG_EEER, eeer);
13333 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
13334 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
13335 }
13336
13337 /*
13338 * Workarounds (mainly PHY related).
13339 * Basically, PHY's workarounds are in the PHY drivers.
13340 */
13341
13342 /* Work-around for 82566 Kumeran PCS lock loss */
13343 static void
13344 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
13345 {
13346 #if 0
13347 int miistatus, active, i;
13348 int reg;
13349
13350 miistatus = sc->sc_mii.mii_media_status;
13351
13352 /* If the link is not up, do nothing */
13353 if ((miistatus & IFM_ACTIVE) == 0)
13354 return;
13355
13356 active = sc->sc_mii.mii_media_active;
13357
13358 /* Nothing to do if the link is other than 1Gbps */
13359 if (IFM_SUBTYPE(active) != IFM_1000_T)
13360 return;
13361
13362 for (i = 0; i < 10; i++) {
13363 /* read twice */
13364 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13365 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13366 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
13367 goto out; /* GOOD! */
13368
13369 /* Reset the PHY */
13370 wm_gmii_reset(sc);
13371 delay(5*1000);
13372 }
13373
13374 /* Disable GigE link negotiation */
13375 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13376 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13377 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13378
13379 /*
13380 * Call gig speed drop workaround on Gig disable before accessing
13381 * any PHY registers.
13382 */
13383 wm_gig_downshift_workaround_ich8lan(sc);
13384
13385 out:
13386 return;
13387 #endif
13388 }
13389
13390 /* WOL from S5 stops working */
13391 static void
13392 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
13393 {
13394 uint16_t kmrn_reg;
13395
13396 /* Only for igp3 */
13397 if (sc->sc_phytype == WMPHY_IGP_3) {
13398 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
13399 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
13400 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
13401 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
13402 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
13403 }
13404 }
13405
13406 /*
13407 * Workaround for pch's PHYs
13408 * XXX should be moved to new PHY driver?
13409 */
13410 static void
13411 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
13412 {
13413
13414 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13415 device_xname(sc->sc_dev), __func__));
13416 KASSERT(sc->sc_type == WM_T_PCH);
13417
13418 if (sc->sc_phytype == WMPHY_82577)
13419 wm_set_mdio_slow_mode_hv(sc);
13420
13421 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
13422
13423 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
13424
13425 /* 82578 */
13426 if (sc->sc_phytype == WMPHY_82578) {
13427 struct mii_softc *child;
13428
13429 /*
13430 * Return registers to default by doing a soft reset then
13431 * writing 0x3140 to the control register
13432 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
13433 */
13434 child = LIST_FIRST(&sc->sc_mii.mii_phys);
13435 if ((child != NULL) && (child->mii_mpd_rev < 2)) {
13436 PHY_RESET(child);
13437 sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
13438 0x3140);
13439 }
13440 }
13441
13442 /* Select page 0 */
13443 sc->phy.acquire(sc);
13444 wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
13445 sc->phy.release(sc);
13446
13447 /*
13448 * Configure the K1 Si workaround during phy reset assuming there is
13449 * link so that it disables K1 if link is in 1Gbps.
13450 */
13451 wm_k1_gig_workaround_hv(sc, 1);
13452 }
13453
13454 static void
13455 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
13456 {
13457
13458 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13459 device_xname(sc->sc_dev), __func__));
13460 KASSERT(sc->sc_type == WM_T_PCH2);
13461
13462 wm_set_mdio_slow_mode_hv(sc);
13463 }
13464
13465 static int
13466 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
13467 {
13468 int k1_enable = sc->sc_nvm_k1_enabled;
13469
13470 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13471 device_xname(sc->sc_dev), __func__));
13472
13473 if (sc->phy.acquire(sc) != 0)
13474 return -1;
13475
13476 if (link) {
13477 k1_enable = 0;
13478
13479 /* Link stall fix for link up */
13480 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
13481 } else {
13482 /* Link stall fix for link down */
13483 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
13484 }
13485
13486 wm_configure_k1_ich8lan(sc, k1_enable);
13487 sc->phy.release(sc);
13488
13489 return 0;
13490 }
13491
13492 static void
13493 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
13494 {
13495 uint32_t reg;
13496
13497 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
13498 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
13499 reg | HV_KMRN_MDIO_SLOW);
13500 }
13501
13502 static void
13503 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
13504 {
13505 uint32_t ctrl, ctrl_ext, tmp;
13506 uint16_t kmrn_reg;
13507
13508 kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
13509
13510 if (k1_enable)
13511 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
13512 else
13513 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
13514
13515 wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
13516
13517 delay(20);
13518
13519 ctrl = CSR_READ(sc, WMREG_CTRL);
13520 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13521
13522 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
13523 tmp |= CTRL_FRCSPD;
13524
13525 CSR_WRITE(sc, WMREG_CTRL, tmp);
13526 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
13527 CSR_WRITE_FLUSH(sc);
13528 delay(20);
13529
13530 CSR_WRITE(sc, WMREG_CTRL, ctrl);
13531 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13532 CSR_WRITE_FLUSH(sc);
13533 delay(20);
13534 }
13535
13536 /* special case - for 82575 - need to do manual init ... */
13537 static void
13538 wm_reset_init_script_82575(struct wm_softc *sc)
13539 {
13540 /*
13541 * remark: this is untested code - we have no board without EEPROM
13542 * same setup as mentioned int the FreeBSD driver for the i82575
13543 */
13544
13545 /* SerDes configuration via SERDESCTRL */
13546 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
13547 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
13548 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
13549 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
13550
13551 /* CCM configuration via CCMCTL register */
13552 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
13553 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
13554
13555 /* PCIe lanes configuration */
13556 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
13557 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
13558 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
13559 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
13560
13561 /* PCIe PLL Configuration */
13562 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
13563 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
13564 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
13565 }
13566
13567 static void
13568 wm_reset_mdicnfg_82580(struct wm_softc *sc)
13569 {
13570 uint32_t reg;
13571 uint16_t nvmword;
13572 int rv;
13573
13574 if ((sc->sc_flags & WM_F_SGMII) == 0)
13575 return;
13576
13577 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
13578 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
13579 if (rv != 0) {
13580 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
13581 __func__);
13582 return;
13583 }
13584
13585 reg = CSR_READ(sc, WMREG_MDICNFG);
13586 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
13587 reg |= MDICNFG_DEST;
13588 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
13589 reg |= MDICNFG_COM_MDIO;
13590 CSR_WRITE(sc, WMREG_MDICNFG, reg);
13591 }
13592
13593 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
13594
13595 static bool
13596 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
13597 {
13598 int i;
13599 uint32_t reg;
13600 uint16_t id1, id2;
13601
13602 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13603 device_xname(sc->sc_dev), __func__));
13604 id1 = id2 = 0xffff;
13605 for (i = 0; i < 2; i++) {
13606 id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
13607 if (MII_INVALIDID(id1))
13608 continue;
13609 id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
13610 if (MII_INVALIDID(id2))
13611 continue;
13612 break;
13613 }
13614 if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
13615 goto out;
13616 }
13617
13618 if (sc->sc_type < WM_T_PCH_LPT) {
13619 sc->phy.release(sc);
13620 wm_set_mdio_slow_mode_hv(sc);
13621 id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
13622 id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
13623 sc->phy.acquire(sc);
13624 }
13625 if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
13626 printf("XXX return with false\n");
13627 return false;
13628 }
13629 out:
13630 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
13631 /* Only unforce SMBus if ME is not active */
13632 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
13633 /* Unforce SMBus mode in PHY */
13634 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
13635 CV_SMB_CTRL);
13636 reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13637 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
13638 CV_SMB_CTRL, reg);
13639
13640 /* Unforce SMBus mode in MAC */
13641 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13642 reg &= ~CTRL_EXT_FORCE_SMBUS;
13643 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13644 }
13645 }
13646 return true;
13647 }
13648
13649 static void
13650 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
13651 {
13652 uint32_t reg;
13653 int i;
13654
13655 /* Set PHY Config Counter to 50msec */
13656 reg = CSR_READ(sc, WMREG_FEXTNVM3);
13657 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
13658 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
13659 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
13660
13661 /* Toggle LANPHYPC */
13662 reg = CSR_READ(sc, WMREG_CTRL);
13663 reg |= CTRL_LANPHYPC_OVERRIDE;
13664 reg &= ~CTRL_LANPHYPC_VALUE;
13665 CSR_WRITE(sc, WMREG_CTRL, reg);
13666 CSR_WRITE_FLUSH(sc);
13667 delay(1000);
13668 reg &= ~CTRL_LANPHYPC_OVERRIDE;
13669 CSR_WRITE(sc, WMREG_CTRL, reg);
13670 CSR_WRITE_FLUSH(sc);
13671
13672 if (sc->sc_type < WM_T_PCH_LPT)
13673 delay(50 * 1000);
13674 else {
13675 i = 20;
13676
13677 do {
13678 delay(5 * 1000);
13679 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
13680 && i--);
13681
13682 delay(30 * 1000);
13683 }
13684 }
13685
13686 static int
13687 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
13688 {
13689 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
13690 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
13691 uint32_t rxa;
13692 uint16_t scale = 0, lat_enc = 0;
13693 int64_t lat_ns, value;
13694
13695 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13696 device_xname(sc->sc_dev), __func__));
13697
13698 if (link) {
13699 pcireg_t preg;
13700 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
13701
13702 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
13703
13704 /*
13705 * Determine the maximum latency tolerated by the device.
13706 *
13707 * Per the PCIe spec, the tolerated latencies are encoded as
13708 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
13709 * a 10-bit value (0-1023) to provide a range from 1 ns to
13710 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
13711 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
13712 */
13713 lat_ns = ((int64_t)rxa * 1024 -
13714 (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
13715 if (lat_ns < 0)
13716 lat_ns = 0;
13717 else {
13718 uint32_t status;
13719 uint16_t speed;
13720
13721 status = CSR_READ(sc, WMREG_STATUS);
13722 switch (__SHIFTOUT(status, STATUS_SPEED)) {
13723 case STATUS_SPEED_10:
13724 speed = 10;
13725 break;
13726 case STATUS_SPEED_100:
13727 speed = 100;
13728 break;
13729 case STATUS_SPEED_1000:
13730 speed = 1000;
13731 break;
13732 default:
13733 printf("%s: Unknown speed (status = %08x)\n",
13734 device_xname(sc->sc_dev), status);
13735 return -1;
13736 }
13737 lat_ns /= speed;
13738 }
13739 value = lat_ns;
13740
13741 while (value > LTRV_VALUE) {
13742 scale ++;
13743 value = howmany(value, __BIT(5));
13744 }
13745 if (scale > LTRV_SCALE_MAX) {
13746 printf("%s: Invalid LTR latency scale %d\n",
13747 device_xname(sc->sc_dev), scale);
13748 return -1;
13749 }
13750 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
13751
13752 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13753 WM_PCI_LTR_CAP_LPT);
13754 max_snoop = preg & 0xffff;
13755 max_nosnoop = preg >> 16;
13756
13757 max_ltr_enc = MAX(max_snoop, max_nosnoop);
13758
13759 if (lat_enc > max_ltr_enc) {
13760 lat_enc = max_ltr_enc;
13761 }
13762 }
13763 /* Snoop and No-Snoop latencies the same */
13764 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
13765 CSR_WRITE(sc, WMREG_LTRV, reg);
13766
13767 return 0;
13768 }
13769
13770 /*
13771 * I210 Errata 25 and I211 Errata 10
13772 * Slow System Clock.
13773 */
13774 static void
13775 wm_pll_workaround_i210(struct wm_softc *sc)
13776 {
13777 uint32_t mdicnfg, wuc;
13778 uint32_t reg;
13779 pcireg_t pcireg;
13780 uint32_t pmreg;
13781 uint16_t nvmword, tmp_nvmword;
13782 int phyval;
13783 bool wa_done = false;
13784 int i;
13785
13786 /* Save WUC and MDICNFG registers */
13787 wuc = CSR_READ(sc, WMREG_WUC);
13788 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
13789
13790 reg = mdicnfg & ~MDICNFG_DEST;
13791 CSR_WRITE(sc, WMREG_MDICNFG, reg);
13792
13793 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
13794 nvmword = INVM_DEFAULT_AL;
13795 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
13796
13797 /* Get Power Management cap offset */
13798 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13799 &pmreg, NULL) == 0)
13800 return;
13801 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
13802 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
13803 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
13804
13805 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
13806 break; /* OK */
13807 }
13808
13809 wa_done = true;
13810 /* Directly reset the internal PHY */
13811 reg = CSR_READ(sc, WMREG_CTRL);
13812 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
13813
13814 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13815 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
13816 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13817
13818 CSR_WRITE(sc, WMREG_WUC, 0);
13819 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
13820 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13821
13822 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13823 pmreg + PCI_PMCSR);
13824 pcireg |= PCI_PMCSR_STATE_D3;
13825 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13826 pmreg + PCI_PMCSR, pcireg);
13827 delay(1000);
13828 pcireg &= ~PCI_PMCSR_STATE_D3;
13829 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13830 pmreg + PCI_PMCSR, pcireg);
13831
13832 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
13833 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13834
13835 /* Restore WUC register */
13836 CSR_WRITE(sc, WMREG_WUC, wuc);
13837 }
13838
13839 /* Restore MDICNFG setting */
13840 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
13841 if (wa_done)
13842 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
13843 }
13844