if_wm.c revision 1.194 1 /* $NetBSD: if_wm.c,v 1.194 2010/01/21 08:52:20 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.194 2010/01/21 08:52:20 msaitoh Exp $");
80
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
97
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/inbmphyreg.h>
125
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129
130 #include <dev/pci/if_wmreg.h>
131 #include <dev/pci/if_wmvar.h>
132
133 #ifdef WM_DEBUG
134 #define WM_DEBUG_LINK 0x01
135 #define WM_DEBUG_TX 0x02
136 #define WM_DEBUG_RX 0x04
137 #define WM_DEBUG_GMII 0x08
138 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
139
140 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
141 #else
142 #define DPRINTF(x, y) /* nothing */
143 #endif /* WM_DEBUG */
144
145 /*
146 * Transmit descriptor list size. Due to errata, we can only have
147 * 256 hardware descriptors in the ring on < 82544, but we use 4096
148 * on >= 82544. We tell the upper layers that they can queue a lot
149 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
150 * of them at a time.
151 *
152 * We allow up to 256 (!) DMA segments per packet. Pathological packet
153 * chains containing many small mbufs have been observed in zero-copy
154 * situations with jumbo frames.
155 */
156 #define WM_NTXSEGS 256
157 #define WM_IFQUEUELEN 256
158 #define WM_TXQUEUELEN_MAX 64
159 #define WM_TXQUEUELEN_MAX_82547 16
160 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
161 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
162 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
163 #define WM_NTXDESC_82542 256
164 #define WM_NTXDESC_82544 4096
165 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
166 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
167 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
168 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
169 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
170
171 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
172
173 /*
174 * Receive descriptor list size. We have one Rx buffer for normal
175 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
176 * packet. We allocate 256 receive descriptors, each with a 2k
177 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
178 */
179 #define WM_NRXDESC 256
180 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
181 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
182 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
183
184 /*
185 * Control structures are DMA'd to the i82542 chip. We allocate them in
186 * a single clump that maps to a single DMA segment to make several things
187 * easier.
188 */
189 struct wm_control_data_82544 {
190 /*
191 * The receive descriptors.
192 */
193 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
194
195 /*
196 * The transmit descriptors. Put these at the end, because
197 * we might use a smaller number of them.
198 */
199 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
200 };
201
202 struct wm_control_data_82542 {
203 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
204 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
205 };
206
207 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
208 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
209 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
210
211 /*
212 * Software state for transmit jobs.
213 */
214 struct wm_txsoft {
215 struct mbuf *txs_mbuf; /* head of our mbuf chain */
216 bus_dmamap_t txs_dmamap; /* our DMA map */
217 int txs_firstdesc; /* first descriptor in packet */
218 int txs_lastdesc; /* last descriptor in packet */
219 int txs_ndesc; /* # of descriptors used */
220 };
221
222 /*
223 * Software state for receive buffers. Each descriptor gets a
224 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
225 * more than one buffer, we chain them together.
226 */
227 struct wm_rxsoft {
228 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
229 bus_dmamap_t rxs_dmamap; /* our DMA map */
230 };
231
232 #define WM_LINKUP_TIMEOUT 50
233
234 /*
235 * Software state per device.
236 */
237 struct wm_softc {
238 device_t sc_dev; /* generic device information */
239 bus_space_tag_t sc_st; /* bus space tag */
240 bus_space_handle_t sc_sh; /* bus space handle */
241 bus_space_tag_t sc_iot; /* I/O space tag */
242 bus_space_handle_t sc_ioh; /* I/O space handle */
243 bus_space_tag_t sc_flasht; /* flash registers space tag */
244 bus_space_handle_t sc_flashh; /* flash registers space handle */
245 bus_dma_tag_t sc_dmat; /* bus DMA tag */
246 struct ethercom sc_ethercom; /* ethernet common data */
247 pci_chipset_tag_t sc_pc;
248 pcitag_t sc_pcitag;
249
250 wm_chip_type sc_type; /* MAC type */
251 int sc_rev; /* MAC revision */
252 wm_phy_type sc_phytype; /* PHY type */
253 int sc_flags; /* flags; see below */
254 int sc_if_flags; /* last if_flags */
255 int sc_bus_speed; /* PCI/PCIX bus speed */
256 int sc_pcix_offset; /* PCIX capability register offset */
257 int sc_flowflags; /* 802.3x flow control flags */
258
259 void *sc_ih; /* interrupt cookie */
260
261 int sc_ee_addrbits; /* EEPROM address bits */
262
263 struct mii_data sc_mii; /* MII/media information */
264
265 callout_t sc_tick_ch; /* tick callout */
266
267 bus_dmamap_t sc_cddmamap; /* control data DMA map */
268 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
269
270 int sc_align_tweak;
271
272 /*
273 * Software state for the transmit and receive descriptors.
274 */
275 int sc_txnum; /* must be a power of two */
276 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
277 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
278
279 /*
280 * Control data structures.
281 */
282 int sc_ntxdesc; /* must be a power of two */
283 struct wm_control_data_82544 *sc_control_data;
284 #define sc_txdescs sc_control_data->wcd_txdescs
285 #define sc_rxdescs sc_control_data->wcd_rxdescs
286
287 #ifdef WM_EVENT_COUNTERS
288 /* Event counters. */
289 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
290 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
291 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
292 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
293 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
294 struct evcnt sc_ev_rxintr; /* Rx interrupts */
295 struct evcnt sc_ev_linkintr; /* Link interrupts */
296
297 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
298 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
299 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
300 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
301 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
302 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
303 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
304 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
305
306 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
307 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
308
309 struct evcnt sc_ev_tu; /* Tx underrun */
310
311 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
312 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
313 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
314 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
315 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
316 #endif /* WM_EVENT_COUNTERS */
317
318 bus_addr_t sc_tdt_reg; /* offset of TDT register */
319
320 int sc_txfree; /* number of free Tx descriptors */
321 int sc_txnext; /* next ready Tx descriptor */
322
323 int sc_txsfree; /* number of free Tx jobs */
324 int sc_txsnext; /* next free Tx job */
325 int sc_txsdirty; /* dirty Tx jobs */
326
327 /* These 5 variables are used only on the 82547. */
328 int sc_txfifo_size; /* Tx FIFO size */
329 int sc_txfifo_head; /* current head of FIFO */
330 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
331 int sc_txfifo_stall; /* Tx FIFO is stalled */
332 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
333
334 bus_addr_t sc_rdt_reg; /* offset of RDT register */
335
336 int sc_rxptr; /* next ready Rx descriptor/queue ent */
337 int sc_rxdiscard;
338 int sc_rxlen;
339 struct mbuf *sc_rxhead;
340 struct mbuf *sc_rxtail;
341 struct mbuf **sc_rxtailp;
342
343 uint32_t sc_ctrl; /* prototype CTRL register */
344 #if 0
345 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
346 #endif
347 uint32_t sc_icr; /* prototype interrupt bits */
348 uint32_t sc_itr; /* prototype intr throttling reg */
349 uint32_t sc_tctl; /* prototype TCTL register */
350 uint32_t sc_rctl; /* prototype RCTL register */
351 uint32_t sc_txcw; /* prototype TXCW register */
352 uint32_t sc_tipg; /* prototype TIPG register */
353 uint32_t sc_fcrtl; /* prototype FCRTL register */
354 uint32_t sc_pba; /* prototype PBA register */
355
356 int sc_tbi_linkup; /* TBI link status */
357 int sc_tbi_anegticks; /* autonegotiation ticks */
358 int sc_tbi_ticks; /* tbi ticks */
359 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
360 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
361
362 int sc_mchash_type; /* multicast filter offset */
363
364 #if NRND > 0
365 rndsource_element_t rnd_source; /* random source */
366 #endif
367 int sc_ich8_flash_base;
368 int sc_ich8_flash_bank_size;
369 int sc_nvm_k1_enabled;
370 };
371
372 #define WM_RXCHAIN_RESET(sc) \
373 do { \
374 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
375 *(sc)->sc_rxtailp = NULL; \
376 (sc)->sc_rxlen = 0; \
377 } while (/*CONSTCOND*/0)
378
379 #define WM_RXCHAIN_LINK(sc, m) \
380 do { \
381 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
382 (sc)->sc_rxtailp = &(m)->m_next; \
383 } while (/*CONSTCOND*/0)
384
385 #ifdef WM_EVENT_COUNTERS
386 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
387 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
388 #else
389 #define WM_EVCNT_INCR(ev) /* nothing */
390 #define WM_EVCNT_ADD(ev, val) /* nothing */
391 #endif
392
393 #define CSR_READ(sc, reg) \
394 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
395 #define CSR_WRITE(sc, reg, val) \
396 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
397 #define CSR_WRITE_FLUSH(sc) \
398 (void) CSR_READ((sc), WMREG_STATUS)
399
400 #define ICH8_FLASH_READ32(sc, reg) \
401 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
402 #define ICH8_FLASH_WRITE32(sc, reg, data) \
403 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
404
405 #define ICH8_FLASH_READ16(sc, reg) \
406 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
407 #define ICH8_FLASH_WRITE16(sc, reg, data) \
408 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
409
410 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
411 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
412
413 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
414 #define WM_CDTXADDR_HI(sc, x) \
415 (sizeof(bus_addr_t) == 8 ? \
416 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
417
418 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
419 #define WM_CDRXADDR_HI(sc, x) \
420 (sizeof(bus_addr_t) == 8 ? \
421 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
422
423 #define WM_CDTXSYNC(sc, x, n, ops) \
424 do { \
425 int __x, __n; \
426 \
427 __x = (x); \
428 __n = (n); \
429 \
430 /* If it will wrap around, sync to the end of the ring. */ \
431 if ((__x + __n) > WM_NTXDESC(sc)) { \
432 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
433 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
434 (WM_NTXDESC(sc) - __x), (ops)); \
435 __n -= (WM_NTXDESC(sc) - __x); \
436 __x = 0; \
437 } \
438 \
439 /* Now sync whatever is left. */ \
440 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
441 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
442 } while (/*CONSTCOND*/0)
443
444 #define WM_CDRXSYNC(sc, x, ops) \
445 do { \
446 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
447 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
448 } while (/*CONSTCOND*/0)
449
450 #define WM_INIT_RXDESC(sc, x) \
451 do { \
452 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
453 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
454 struct mbuf *__m = __rxs->rxs_mbuf; \
455 \
456 /* \
457 * Note: We scoot the packet forward 2 bytes in the buffer \
458 * so that the payload after the Ethernet header is aligned \
459 * to a 4-byte boundary. \
460 * \
461 * XXX BRAINDAMAGE ALERT! \
462 * The stupid chip uses the same size for every buffer, which \
463 * is set in the Receive Control register. We are using the 2K \
464 * size option, but what we REALLY want is (2K - 2)! For this \
465 * reason, we can't "scoot" packets longer than the standard \
466 * Ethernet MTU. On strict-alignment platforms, if the total \
467 * size exceeds (2K - 2) we set align_tweak to 0 and let \
468 * the upper layer copy the headers. \
469 */ \
470 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
471 \
472 wm_set_dma_addr(&__rxd->wrx_addr, \
473 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
474 __rxd->wrx_len = 0; \
475 __rxd->wrx_cksum = 0; \
476 __rxd->wrx_status = 0; \
477 __rxd->wrx_errors = 0; \
478 __rxd->wrx_special = 0; \
479 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
480 \
481 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
482 } while (/*CONSTCOND*/0)
483
484 static void wm_start(struct ifnet *);
485 static void wm_watchdog(struct ifnet *);
486 static int wm_ioctl(struct ifnet *, u_long, void *);
487 static int wm_init(struct ifnet *);
488 static void wm_stop(struct ifnet *, int);
489
490 static void wm_reset(struct wm_softc *);
491 static void wm_rxdrain(struct wm_softc *);
492 static int wm_add_rxbuf(struct wm_softc *, int);
493 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
494 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
495 static int wm_validate_eeprom_checksum(struct wm_softc *);
496 static void wm_tick(void *);
497
498 static void wm_set_filter(struct wm_softc *);
499
500 static int wm_intr(void *);
501 static void wm_txintr(struct wm_softc *);
502 static void wm_rxintr(struct wm_softc *);
503 static void wm_linkintr(struct wm_softc *, uint32_t);
504
505 static void wm_tbi_mediainit(struct wm_softc *);
506 static int wm_tbi_mediachange(struct ifnet *);
507 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
508
509 static void wm_tbi_set_linkled(struct wm_softc *);
510 static void wm_tbi_check_link(struct wm_softc *);
511
512 static void wm_gmii_reset(struct wm_softc *);
513
514 static int wm_gmii_i82543_readreg(device_t, int, int);
515 static void wm_gmii_i82543_writereg(device_t, int, int, int);
516
517 static int wm_gmii_i82544_readreg(device_t, int, int);
518 static void wm_gmii_i82544_writereg(device_t, int, int, int);
519
520 static int wm_gmii_i80003_readreg(device_t, int, int);
521 static void wm_gmii_i80003_writereg(device_t, int, int, int);
522 static int wm_gmii_bm_readreg(device_t, int, int);
523 static void wm_gmii_bm_writereg(device_t, int, int, int);
524 static int wm_gmii_hv_readreg(device_t, int, int);
525 static void wm_gmii_hv_writereg(device_t, int, int, int);
526
527 static void wm_gmii_statchg(device_t);
528
529 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
530 static int wm_gmii_mediachange(struct ifnet *);
531 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
532
533 static int wm_kmrn_readreg(struct wm_softc *, int);
534 static void wm_kmrn_writereg(struct wm_softc *, int, int);
535
536 static void wm_set_spiaddrsize(struct wm_softc *);
537 static int wm_match(device_t, cfdata_t, void *);
538 static void wm_attach(device_t, device_t, void *);
539 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
540 static void wm_get_auto_rd_done(struct wm_softc *);
541 static void wm_lan_init_done(struct wm_softc *);
542 static void wm_get_cfg_done(struct wm_softc *);
543 static int wm_get_swsm_semaphore(struct wm_softc *);
544 static void wm_put_swsm_semaphore(struct wm_softc *);
545 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
546 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
547 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
548 static int wm_get_swfwhw_semaphore(struct wm_softc *);
549 static void wm_put_swfwhw_semaphore(struct wm_softc *);
550
551 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
552 static int32_t wm_ich8_cycle_init(struct wm_softc *);
553 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
554 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
555 uint32_t, uint16_t *);
556 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
557 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
558 static void wm_82547_txfifo_stall(void *);
559 static int wm_check_mng_mode(struct wm_softc *);
560 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
561 static int wm_check_mng_mode_82574(struct wm_softc *);
562 static int wm_check_mng_mode_generic(struct wm_softc *);
563 static int wm_check_reset_block(struct wm_softc *);
564 static void wm_get_hw_control(struct wm_softc *);
565 static int wm_check_for_link(struct wm_softc *);
566 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
567 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
568 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
569
570 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
571 wm_match, wm_attach, NULL, NULL);
572
573 /*
574 * Devices supported by this driver.
575 */
576 static const struct wm_product {
577 pci_vendor_id_t wmp_vendor;
578 pci_product_id_t wmp_product;
579 const char *wmp_name;
580 wm_chip_type wmp_type;
581 int wmp_flags;
582 #define WMP_F_1000X 0x01
583 #define WMP_F_1000T 0x02
584 } wm_products[] = {
585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
586 "Intel i82542 1000BASE-X Ethernet",
587 WM_T_82542_2_1, WMP_F_1000X },
588
589 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
590 "Intel i82543GC 1000BASE-X Ethernet",
591 WM_T_82543, WMP_F_1000X },
592
593 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
594 "Intel i82543GC 1000BASE-T Ethernet",
595 WM_T_82543, WMP_F_1000T },
596
597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
598 "Intel i82544EI 1000BASE-T Ethernet",
599 WM_T_82544, WMP_F_1000T },
600
601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
602 "Intel i82544EI 1000BASE-X Ethernet",
603 WM_T_82544, WMP_F_1000X },
604
605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
606 "Intel i82544GC 1000BASE-T Ethernet",
607 WM_T_82544, WMP_F_1000T },
608
609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
610 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
611 WM_T_82544, WMP_F_1000T },
612
613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
614 "Intel i82540EM 1000BASE-T Ethernet",
615 WM_T_82540, WMP_F_1000T },
616
617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
618 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
619 WM_T_82540, WMP_F_1000T },
620
621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
622 "Intel i82540EP 1000BASE-T Ethernet",
623 WM_T_82540, WMP_F_1000T },
624
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
626 "Intel i82540EP 1000BASE-T Ethernet",
627 WM_T_82540, WMP_F_1000T },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
630 "Intel i82540EP 1000BASE-T Ethernet",
631 WM_T_82540, WMP_F_1000T },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
634 "Intel i82545EM 1000BASE-T Ethernet",
635 WM_T_82545, WMP_F_1000T },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
638 "Intel i82545GM 1000BASE-T Ethernet",
639 WM_T_82545_3, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
642 "Intel i82545GM 1000BASE-X Ethernet",
643 WM_T_82545_3, WMP_F_1000X },
644 #if 0
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
646 "Intel i82545GM Gigabit Ethernet (SERDES)",
647 WM_T_82545_3, WMP_F_SERDES },
648 #endif
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
650 "Intel i82546EB 1000BASE-T Ethernet",
651 WM_T_82546, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
654 "Intel i82546EB 1000BASE-T Ethernet",
655 WM_T_82546, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
658 "Intel i82545EM 1000BASE-X Ethernet",
659 WM_T_82545, WMP_F_1000X },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
662 "Intel i82546EB 1000BASE-X Ethernet",
663 WM_T_82546, WMP_F_1000X },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
666 "Intel i82546GB 1000BASE-T Ethernet",
667 WM_T_82546_3, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
670 "Intel i82546GB 1000BASE-X Ethernet",
671 WM_T_82546_3, WMP_F_1000X },
672 #if 0
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
674 "Intel i82546GB Gigabit Ethernet (SERDES)",
675 WM_T_82546_3, WMP_F_SERDES },
676 #endif
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
678 "i82546GB quad-port Gigabit Ethernet",
679 WM_T_82546_3, WMP_F_1000T },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
682 "i82546GB quad-port Gigabit Ethernet (KSP3)",
683 WM_T_82546_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
686 "Intel PRO/1000MT (82546GB)",
687 WM_T_82546_3, WMP_F_1000T },
688
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
690 "Intel i82541EI 1000BASE-T Ethernet",
691 WM_T_82541, WMP_F_1000T },
692
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
694 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
695 WM_T_82541, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
698 "Intel i82541EI Mobile 1000BASE-T Ethernet",
699 WM_T_82541, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
702 "Intel i82541ER 1000BASE-T Ethernet",
703 WM_T_82541_2, WMP_F_1000T },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
706 "Intel i82541GI 1000BASE-T Ethernet",
707 WM_T_82541_2, WMP_F_1000T },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
710 "Intel i82541GI Mobile 1000BASE-T Ethernet",
711 WM_T_82541_2, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
714 "Intel i82541PI 1000BASE-T Ethernet",
715 WM_T_82541_2, WMP_F_1000T },
716
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
718 "Intel i82547EI 1000BASE-T Ethernet",
719 WM_T_82547, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
722 "Intel i82547EI Mobile 1000BASE-T Ethernet",
723 WM_T_82547, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
726 "Intel i82547GI 1000BASE-T Ethernet",
727 WM_T_82547_2, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
730 "Intel PRO/1000 PT (82571EB)",
731 WM_T_82571, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
734 "Intel PRO/1000 PF (82571EB)",
735 WM_T_82571, WMP_F_1000X },
736 #if 0
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
738 "Intel PRO/1000 PB (82571EB)",
739 WM_T_82571, WMP_F_SERDES },
740 #endif
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
742 "Intel PRO/1000 QT (82571EB)",
743 WM_T_82571, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
746 "Intel i82572EI 1000baseT Ethernet",
747 WM_T_82572, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
750 "Intel PRO/1000 PT Quad Port Server Adapter",
751 WM_T_82571, WMP_F_1000T, },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
754 "Intel i82572EI 1000baseX Ethernet",
755 WM_T_82572, WMP_F_1000X },
756 #if 0
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
758 "Intel i82572EI Gigabit Ethernet (SERDES)",
759 WM_T_82572, WMP_F_SERDES },
760 #endif
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
763 "Intel i82572EI 1000baseT Ethernet",
764 WM_T_82572, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
767 "Intel i82573E",
768 WM_T_82573, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
771 "Intel i82573E IAMT",
772 WM_T_82573, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
775 "Intel i82573L Gigabit Ethernet",
776 WM_T_82573, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
779 "Intel i82574L",
780 WM_T_82574, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
783 "Intel i82583V",
784 WM_T_82583, WMP_F_1000T },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
787 "i80003 dual 1000baseT Ethernet",
788 WM_T_80003, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
791 "i80003 dual 1000baseX Ethernet",
792 WM_T_80003, WMP_F_1000T },
793 #if 0
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
795 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
796 WM_T_80003, WMP_F_SERDES },
797 #endif
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
800 "Intel i80003 1000baseT Ethernet",
801 WM_T_80003, WMP_F_1000T },
802 #if 0
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
804 "Intel i80003 Gigabit Ethernet (SERDES)",
805 WM_T_80003, WMP_F_SERDES },
806 #endif
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
808 "Intel i82801H (M_AMT) LAN Controller",
809 WM_T_ICH8, WMP_F_1000T },
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
811 "Intel i82801H (AMT) LAN Controller",
812 WM_T_ICH8, WMP_F_1000T },
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
814 "Intel i82801H LAN Controller",
815 WM_T_ICH8, WMP_F_1000T },
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
817 "Intel i82801H (IFE) LAN Controller",
818 WM_T_ICH8, WMP_F_1000T },
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
820 "Intel i82801H (M) LAN Controller",
821 WM_T_ICH8, WMP_F_1000T },
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
823 "Intel i82801H IFE (GT) LAN Controller",
824 WM_T_ICH8, WMP_F_1000T },
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
826 "Intel i82801H IFE (G) LAN Controller",
827 WM_T_ICH8, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
829 "82801I (AMT) LAN Controller",
830 WM_T_ICH9, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
832 "82801I LAN Controller",
833 WM_T_ICH9, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
835 "82801I (G) LAN Controller",
836 WM_T_ICH9, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
838 "82801I (GT) LAN Controller",
839 WM_T_ICH9, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
841 "82801I (C) LAN Controller",
842 WM_T_ICH9, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
844 "82801I mobile LAN Controller",
845 WM_T_ICH9, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
847 "82801I mobile (V) LAN Controller",
848 WM_T_ICH9, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
850 "82801I mobile (AMT) LAN Controller",
851 WM_T_ICH9, WMP_F_1000T },
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
853 "82567LM-4 LAN Controller",
854 WM_T_ICH9, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
856 "82567V-3 LAN Controller",
857 WM_T_ICH9, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
859 "82567LM-2 LAN Controller",
860 WM_T_ICH10, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
862 "82567LF-2 LAN Controller",
863 WM_T_ICH10, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
865 "82567LM-3 LAN Controller",
866 WM_T_ICH10, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
868 "82567LF-3 LAN Controller",
869 WM_T_ICH10, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
871 "82567V-2 LAN Controller",
872 WM_T_ICH10, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
874 "PCH LAN (82578LM) Controller",
875 WM_T_PCH, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
877 "PCH LAN (82578LC) Controller",
878 WM_T_PCH, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
880 "PCH LAN (82578DM) Controller",
881 WM_T_PCH, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
883 "PCH LAN (82578DC) Controller",
884 WM_T_PCH, WMP_F_1000T },
885 { 0, 0,
886 NULL,
887 0, 0 },
888 };
889
890 #ifdef WM_EVENT_COUNTERS
891 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
892 #endif /* WM_EVENT_COUNTERS */
893
894 #if 0 /* Not currently used */
895 static inline uint32_t
896 wm_io_read(struct wm_softc *sc, int reg)
897 {
898
899 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
900 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
901 }
902 #endif
903
904 static inline void
905 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
906 {
907
908 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
909 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
910 }
911
912 static inline void
913 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
914 {
915 wa->wa_low = htole32(v & 0xffffffffU);
916 if (sizeof(bus_addr_t) == 8)
917 wa->wa_high = htole32((uint64_t) v >> 32);
918 else
919 wa->wa_high = 0;
920 }
921
922 static void
923 wm_set_spiaddrsize(struct wm_softc *sc)
924 {
925 uint32_t reg;
926
927 sc->sc_flags |= WM_F_EEPROM_SPI;
928 reg = CSR_READ(sc, WMREG_EECD);
929 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
930 }
931
932 static const struct wm_product *
933 wm_lookup(const struct pci_attach_args *pa)
934 {
935 const struct wm_product *wmp;
936
937 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
938 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
939 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
940 return wmp;
941 }
942 return NULL;
943 }
944
945 static int
946 wm_match(device_t parent, cfdata_t cf, void *aux)
947 {
948 struct pci_attach_args *pa = aux;
949
950 if (wm_lookup(pa) != NULL)
951 return 1;
952
953 return 0;
954 }
955
956 static void
957 wm_attach(device_t parent, device_t self, void *aux)
958 {
959 struct wm_softc *sc = device_private(self);
960 struct pci_attach_args *pa = aux;
961 prop_dictionary_t dict;
962 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
963 pci_chipset_tag_t pc = pa->pa_pc;
964 pci_intr_handle_t ih;
965 size_t cdata_size;
966 const char *intrstr = NULL;
967 const char *eetype, *xname;
968 bus_space_tag_t memt;
969 bus_space_handle_t memh;
970 bus_dma_segment_t seg;
971 int memh_valid;
972 int i, rseg, error;
973 const struct wm_product *wmp;
974 prop_data_t ea;
975 prop_number_t pn;
976 uint8_t enaddr[ETHER_ADDR_LEN];
977 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
978 pcireg_t preg, memtype;
979 uint32_t reg;
980
981 sc->sc_dev = self;
982 callout_init(&sc->sc_tick_ch, 0);
983
984 wmp = wm_lookup(pa);
985 if (wmp == NULL) {
986 printf("\n");
987 panic("wm_attach: impossible");
988 }
989
990 sc->sc_pc = pa->pa_pc;
991 sc->sc_pcitag = pa->pa_tag;
992
993 if (pci_dma64_available(pa))
994 sc->sc_dmat = pa->pa_dmat64;
995 else
996 sc->sc_dmat = pa->pa_dmat;
997
998 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
999 aprint_naive(": Ethernet controller\n");
1000 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1001
1002 sc->sc_type = wmp->wmp_type;
1003 if (sc->sc_type < WM_T_82543) {
1004 if (sc->sc_rev < 2) {
1005 aprint_error_dev(sc->sc_dev,
1006 "i82542 must be at least rev. 2\n");
1007 return;
1008 }
1009 if (sc->sc_rev < 3)
1010 sc->sc_type = WM_T_82542_2_0;
1011 }
1012
1013 /* Set device properties (mactype) */
1014 dict = device_properties(sc->sc_dev);
1015 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1016
1017 /*
1018 * Map the device. All devices support memory-mapped acccess,
1019 * and it is really required for normal operation.
1020 */
1021 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1022 switch (memtype) {
1023 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1024 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1025 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1026 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1027 break;
1028 default:
1029 memh_valid = 0;
1030 break;
1031 }
1032
1033 if (memh_valid) {
1034 sc->sc_st = memt;
1035 sc->sc_sh = memh;
1036 } else {
1037 aprint_error_dev(sc->sc_dev,
1038 "unable to map device registers\n");
1039 return;
1040 }
1041
1042 /*
1043 * In addition, i82544 and later support I/O mapped indirect
1044 * register access. It is not desirable (nor supported in
1045 * this driver) to use it for normal operation, though it is
1046 * required to work around bugs in some chip versions.
1047 */
1048 if (sc->sc_type >= WM_T_82544) {
1049 /* First we have to find the I/O BAR. */
1050 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1051 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1052 PCI_MAPREG_TYPE_IO)
1053 break;
1054 }
1055 if (i == PCI_MAPREG_END)
1056 aprint_error_dev(sc->sc_dev,
1057 "WARNING: unable to find I/O BAR\n");
1058 else {
1059 /*
1060 * The i8254x doesn't apparently respond when the
1061 * I/O BAR is 0, which looks somewhat like it's not
1062 * been configured.
1063 */
1064 preg = pci_conf_read(pc, pa->pa_tag, i);
1065 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1066 aprint_error_dev(sc->sc_dev,
1067 "WARNING: I/O BAR at zero.\n");
1068 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1069 0, &sc->sc_iot, &sc->sc_ioh,
1070 NULL, NULL) == 0) {
1071 sc->sc_flags |= WM_F_IOH_VALID;
1072 } else {
1073 aprint_error_dev(sc->sc_dev,
1074 "WARNING: unable to map I/O space\n");
1075 }
1076 }
1077
1078 }
1079
1080 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1081 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1082 preg |= PCI_COMMAND_MASTER_ENABLE;
1083 if (sc->sc_type < WM_T_82542_2_1)
1084 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1085 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1086
1087 /* power up chip */
1088 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1089 NULL)) && error != EOPNOTSUPP) {
1090 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1091 return;
1092 }
1093
1094 /*
1095 * Map and establish our interrupt.
1096 */
1097 if (pci_intr_map(pa, &ih)) {
1098 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1099 return;
1100 }
1101 intrstr = pci_intr_string(pc, ih);
1102 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1103 if (sc->sc_ih == NULL) {
1104 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1105 if (intrstr != NULL)
1106 aprint_error(" at %s", intrstr);
1107 aprint_error("\n");
1108 return;
1109 }
1110 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1111
1112 /*
1113 * Determine a few things about the bus we're connected to.
1114 */
1115 if (sc->sc_type < WM_T_82543) {
1116 /* We don't really know the bus characteristics here. */
1117 sc->sc_bus_speed = 33;
1118 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1119 /*
1120 * CSA (Communication Streaming Architecture) is about as fast
1121 * a 32-bit 66MHz PCI Bus.
1122 */
1123 sc->sc_flags |= WM_F_CSA;
1124 sc->sc_bus_speed = 66;
1125 aprint_verbose_dev(sc->sc_dev,
1126 "Communication Streaming Architecture\n");
1127 if (sc->sc_type == WM_T_82547) {
1128 callout_init(&sc->sc_txfifo_ch, 0);
1129 callout_setfunc(&sc->sc_txfifo_ch,
1130 wm_82547_txfifo_stall, sc);
1131 aprint_verbose_dev(sc->sc_dev,
1132 "using 82547 Tx FIFO stall work-around\n");
1133 }
1134 } else if (sc->sc_type >= WM_T_82571) {
1135 sc->sc_flags |= WM_F_PCIE;
1136 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1137 && (sc->sc_type != WM_T_ICH10)
1138 && (sc->sc_type != WM_T_PCH))
1139 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1140 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1141 } else {
1142 reg = CSR_READ(sc, WMREG_STATUS);
1143 if (reg & STATUS_BUS64)
1144 sc->sc_flags |= WM_F_BUS64;
1145 if ((reg & STATUS_PCIX_MODE) != 0) {
1146 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1147
1148 sc->sc_flags |= WM_F_PCIX;
1149 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1150 PCI_CAP_PCIX,
1151 &sc->sc_pcix_offset, NULL) == 0)
1152 aprint_error_dev(sc->sc_dev,
1153 "unable to find PCIX capability\n");
1154 else if (sc->sc_type != WM_T_82545_3 &&
1155 sc->sc_type != WM_T_82546_3) {
1156 /*
1157 * Work around a problem caused by the BIOS
1158 * setting the max memory read byte count
1159 * incorrectly.
1160 */
1161 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1162 sc->sc_pcix_offset + PCI_PCIX_CMD);
1163 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1164 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1165
1166 bytecnt =
1167 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1168 PCI_PCIX_CMD_BYTECNT_SHIFT;
1169 maxb =
1170 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1171 PCI_PCIX_STATUS_MAXB_SHIFT;
1172 if (bytecnt > maxb) {
1173 aprint_verbose_dev(sc->sc_dev,
1174 "resetting PCI-X MMRBC: %d -> %d\n",
1175 512 << bytecnt, 512 << maxb);
1176 pcix_cmd = (pcix_cmd &
1177 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1178 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1179 pci_conf_write(pa->pa_pc, pa->pa_tag,
1180 sc->sc_pcix_offset + PCI_PCIX_CMD,
1181 pcix_cmd);
1182 }
1183 }
1184 }
1185 /*
1186 * The quad port adapter is special; it has a PCIX-PCIX
1187 * bridge on the board, and can run the secondary bus at
1188 * a higher speed.
1189 */
1190 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1191 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1192 : 66;
1193 } else if (sc->sc_flags & WM_F_PCIX) {
1194 switch (reg & STATUS_PCIXSPD_MASK) {
1195 case STATUS_PCIXSPD_50_66:
1196 sc->sc_bus_speed = 66;
1197 break;
1198 case STATUS_PCIXSPD_66_100:
1199 sc->sc_bus_speed = 100;
1200 break;
1201 case STATUS_PCIXSPD_100_133:
1202 sc->sc_bus_speed = 133;
1203 break;
1204 default:
1205 aprint_error_dev(sc->sc_dev,
1206 "unknown PCIXSPD %d; assuming 66MHz\n",
1207 reg & STATUS_PCIXSPD_MASK);
1208 sc->sc_bus_speed = 66;
1209 break;
1210 }
1211 } else
1212 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1213 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1214 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1215 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1216 }
1217
1218 /*
1219 * Allocate the control data structures, and create and load the
1220 * DMA map for it.
1221 *
1222 * NOTE: All Tx descriptors must be in the same 4G segment of
1223 * memory. So must Rx descriptors. We simplify by allocating
1224 * both sets within the same 4G segment.
1225 */
1226 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1227 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1228 cdata_size = sc->sc_type < WM_T_82544 ?
1229 sizeof(struct wm_control_data_82542) :
1230 sizeof(struct wm_control_data_82544);
1231 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1232 (bus_size_t) 0x100000000ULL, &seg, 1, &rseg, 0)) != 0) {
1233 aprint_error_dev(sc->sc_dev,
1234 "unable to allocate control data, error = %d\n",
1235 error);
1236 goto fail_0;
1237 }
1238
1239 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1240 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1241 aprint_error_dev(sc->sc_dev,
1242 "unable to map control data, error = %d\n", error);
1243 goto fail_1;
1244 }
1245
1246 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1247 0, 0, &sc->sc_cddmamap)) != 0) {
1248 aprint_error_dev(sc->sc_dev,
1249 "unable to create control data DMA map, error = %d\n",
1250 error);
1251 goto fail_2;
1252 }
1253
1254 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1255 sc->sc_control_data, cdata_size, NULL, 0)) != 0) {
1256 aprint_error_dev(sc->sc_dev,
1257 "unable to load control data DMA map, error = %d\n",
1258 error);
1259 goto fail_3;
1260 }
1261
1262 /*
1263 * Create the transmit buffer DMA maps.
1264 */
1265 WM_TXQUEUELEN(sc) =
1266 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1267 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1268 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1269 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1270 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1271 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1272 aprint_error_dev(sc->sc_dev,
1273 "unable to create Tx DMA map %d, error = %d\n",
1274 i, error);
1275 goto fail_4;
1276 }
1277 }
1278
1279 /*
1280 * Create the receive buffer DMA maps.
1281 */
1282 for (i = 0; i < WM_NRXDESC; i++) {
1283 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1284 MCLBYTES, 0, 0,
1285 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1286 aprint_error_dev(sc->sc_dev,
1287 "unable to create Rx DMA map %d error = %d\n",
1288 i, error);
1289 goto fail_5;
1290 }
1291 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1292 }
1293
1294 /* clear interesting stat counters */
1295 CSR_READ(sc, WMREG_COLC);
1296 CSR_READ(sc, WMREG_RXERRC);
1297
1298 /*
1299 * Reset the chip to a known state.
1300 */
1301 wm_reset(sc);
1302
1303 switch (sc->sc_type) {
1304 case WM_T_82571:
1305 case WM_T_82572:
1306 case WM_T_82573:
1307 case WM_T_82574:
1308 case WM_T_82583:
1309 case WM_T_80003:
1310 case WM_T_ICH8:
1311 case WM_T_ICH9:
1312 case WM_T_ICH10:
1313 case WM_T_PCH:
1314 if (wm_check_mng_mode(sc) != 0)
1315 wm_get_hw_control(sc);
1316 break;
1317 default:
1318 break;
1319 }
1320
1321 /*
1322 * Get some information about the EEPROM.
1323 */
1324 switch (sc->sc_type) {
1325 case WM_T_82542_2_0:
1326 case WM_T_82542_2_1:
1327 case WM_T_82543:
1328 case WM_T_82544:
1329 /* Microwire */
1330 sc->sc_ee_addrbits = 6;
1331 break;
1332 case WM_T_82540:
1333 case WM_T_82545:
1334 case WM_T_82545_3:
1335 case WM_T_82546:
1336 case WM_T_82546_3:
1337 /* Microwire */
1338 reg = CSR_READ(sc, WMREG_EECD);
1339 if (reg & EECD_EE_SIZE)
1340 sc->sc_ee_addrbits = 8;
1341 else
1342 sc->sc_ee_addrbits = 6;
1343 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1344 break;
1345 case WM_T_82541:
1346 case WM_T_82541_2:
1347 case WM_T_82547:
1348 case WM_T_82547_2:
1349 reg = CSR_READ(sc, WMREG_EECD);
1350 if (reg & EECD_EE_TYPE) {
1351 /* SPI */
1352 wm_set_spiaddrsize(sc);
1353 } else
1354 /* Microwire */
1355 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1356 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1357 break;
1358 case WM_T_82571:
1359 case WM_T_82572:
1360 /* SPI */
1361 wm_set_spiaddrsize(sc);
1362 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1363 break;
1364 case WM_T_82573:
1365 case WM_T_82574:
1366 case WM_T_82583:
1367 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1368 sc->sc_flags |= WM_F_EEPROM_FLASH;
1369 else {
1370 /* SPI */
1371 wm_set_spiaddrsize(sc);
1372 }
1373 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1374 break;
1375 case WM_T_80003:
1376 /* SPI */
1377 wm_set_spiaddrsize(sc);
1378 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1379 break;
1380 case WM_T_ICH8:
1381 case WM_T_ICH9:
1382 /* Check whether EEPROM is present or not */
1383 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1384 /* Not found */
1385 aprint_error_dev(sc->sc_dev,
1386 "EEPROM PRESENT bit isn't set\n");
1387 sc->sc_flags |= WM_F_EEPROM_INVALID;
1388 }
1389 /* FALLTHROUGH */
1390 case WM_T_ICH10:
1391 case WM_T_PCH:
1392 /* FLASH */
1393 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1394 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1395 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1396 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1397 aprint_error_dev(sc->sc_dev,
1398 "can't map FLASH registers\n");
1399 return;
1400 }
1401 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1402 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1403 ICH_FLASH_SECTOR_SIZE;
1404 sc->sc_ich8_flash_bank_size =
1405 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1406 sc->sc_ich8_flash_bank_size -=
1407 (reg & ICH_GFPREG_BASE_MASK);
1408 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1409 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1410 break;
1411 default:
1412 break;
1413 }
1414
1415 /*
1416 * Defer printing the EEPROM type until after verifying the checksum
1417 * This allows the EEPROM type to be printed correctly in the case
1418 * that no EEPROM is attached.
1419 */
1420 /*
1421 * Validate the EEPROM checksum. If the checksum fails, flag
1422 * this for later, so we can fail future reads from the EEPROM.
1423 */
1424 if (wm_validate_eeprom_checksum(sc)) {
1425 /*
1426 * Read twice again because some PCI-e parts fail the
1427 * first check due to the link being in sleep state.
1428 */
1429 if (wm_validate_eeprom_checksum(sc))
1430 sc->sc_flags |= WM_F_EEPROM_INVALID;
1431 }
1432
1433 /* Set device properties (macflags) */
1434 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1435
1436 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1437 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1438 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1439 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1440 } else {
1441 if (sc->sc_flags & WM_F_EEPROM_SPI)
1442 eetype = "SPI";
1443 else
1444 eetype = "MicroWire";
1445 aprint_verbose_dev(sc->sc_dev,
1446 "%u word (%d address bits) %s EEPROM\n",
1447 1U << sc->sc_ee_addrbits,
1448 sc->sc_ee_addrbits, eetype);
1449 }
1450
1451 /*
1452 * Read the Ethernet address from the EEPROM, if not first found
1453 * in device properties.
1454 */
1455 ea = prop_dictionary_get(dict, "mac-addr");
1456 if (ea != NULL) {
1457 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1458 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1459 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1460 } else {
1461 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1462 sizeof(myea) / sizeof(myea[0]), myea)) {
1463 aprint_error_dev(sc->sc_dev,
1464 "unable to read Ethernet address\n");
1465 return;
1466 }
1467 enaddr[0] = myea[0] & 0xff;
1468 enaddr[1] = myea[0] >> 8;
1469 enaddr[2] = myea[1] & 0xff;
1470 enaddr[3] = myea[1] >> 8;
1471 enaddr[4] = myea[2] & 0xff;
1472 enaddr[5] = myea[2] >> 8;
1473 }
1474
1475 /*
1476 * Toggle the LSB of the MAC address on the second port
1477 * of the dual port controller.
1478 */
1479 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1480 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1481 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1482 enaddr[5] ^= 1;
1483 }
1484
1485 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1486 ether_sprintf(enaddr));
1487
1488 /*
1489 * Read the config info from the EEPROM, and set up various
1490 * bits in the control registers based on their contents.
1491 */
1492 pn = prop_dictionary_get(dict, "i82543-cfg1");
1493 if (pn != NULL) {
1494 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1495 cfg1 = (uint16_t) prop_number_integer_value(pn);
1496 } else {
1497 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1498 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1499 return;
1500 }
1501 }
1502
1503 pn = prop_dictionary_get(dict, "i82543-cfg2");
1504 if (pn != NULL) {
1505 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1506 cfg2 = (uint16_t) prop_number_integer_value(pn);
1507 } else {
1508 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1509 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1510 return;
1511 }
1512 }
1513
1514 if (sc->sc_type >= WM_T_82544) {
1515 pn = prop_dictionary_get(dict, "i82543-swdpin");
1516 if (pn != NULL) {
1517 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1518 swdpin = (uint16_t) prop_number_integer_value(pn);
1519 } else {
1520 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1521 aprint_error_dev(sc->sc_dev,
1522 "unable to read SWDPIN\n");
1523 return;
1524 }
1525 }
1526 }
1527
1528 if (cfg1 & EEPROM_CFG1_ILOS)
1529 sc->sc_ctrl |= CTRL_ILOS;
1530 if (sc->sc_type >= WM_T_82544) {
1531 sc->sc_ctrl |=
1532 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1533 CTRL_SWDPIO_SHIFT;
1534 sc->sc_ctrl |=
1535 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1536 CTRL_SWDPINS_SHIFT;
1537 } else {
1538 sc->sc_ctrl |=
1539 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1540 CTRL_SWDPIO_SHIFT;
1541 }
1542
1543 #if 0
1544 if (sc->sc_type >= WM_T_82544) {
1545 if (cfg1 & EEPROM_CFG1_IPS0)
1546 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1547 if (cfg1 & EEPROM_CFG1_IPS1)
1548 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1549 sc->sc_ctrl_ext |=
1550 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1551 CTRL_EXT_SWDPIO_SHIFT;
1552 sc->sc_ctrl_ext |=
1553 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1554 CTRL_EXT_SWDPINS_SHIFT;
1555 } else {
1556 sc->sc_ctrl_ext |=
1557 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1558 CTRL_EXT_SWDPIO_SHIFT;
1559 }
1560 #endif
1561
1562 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1563 #if 0
1564 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1565 #endif
1566
1567 /*
1568 * Set up some register offsets that are different between
1569 * the i82542 and the i82543 and later chips.
1570 */
1571 if (sc->sc_type < WM_T_82543) {
1572 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1573 sc->sc_tdt_reg = WMREG_OLD_TDT;
1574 } else {
1575 sc->sc_rdt_reg = WMREG_RDT;
1576 sc->sc_tdt_reg = WMREG_TDT;
1577 }
1578
1579 if (sc->sc_type == WM_T_PCH) {
1580 uint16_t val;
1581
1582 /* Save the NVM K1 bit setting */
1583 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1584
1585 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1586 sc->sc_nvm_k1_enabled = 1;
1587 else
1588 sc->sc_nvm_k1_enabled = 0;
1589 }
1590
1591 /*
1592 * Determine if we're TBI or GMII mode, and initialize the
1593 * media structures accordingly.
1594 */
1595 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1596 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1597 || sc->sc_type == WM_T_82573
1598 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1599 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1600 wm_gmii_mediainit(sc, wmp->wmp_product);
1601 } else if (sc->sc_type < WM_T_82543 ||
1602 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1603 if (wmp->wmp_flags & WMP_F_1000T)
1604 aprint_error_dev(sc->sc_dev,
1605 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1606 wm_tbi_mediainit(sc);
1607 } else {
1608 if (wmp->wmp_flags & WMP_F_1000X)
1609 aprint_error_dev(sc->sc_dev,
1610 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1611 wm_gmii_mediainit(sc, wmp->wmp_product);
1612 }
1613
1614 ifp = &sc->sc_ethercom.ec_if;
1615 xname = device_xname(sc->sc_dev);
1616 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1617 ifp->if_softc = sc;
1618 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1619 ifp->if_ioctl = wm_ioctl;
1620 ifp->if_start = wm_start;
1621 ifp->if_watchdog = wm_watchdog;
1622 ifp->if_init = wm_init;
1623 ifp->if_stop = wm_stop;
1624 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1625 IFQ_SET_READY(&ifp->if_snd);
1626
1627 /* Check for jumbo frame */
1628 switch (sc->sc_type) {
1629 case WM_T_82573:
1630 /* XXX limited to 9234 if ASPM is disabled */
1631 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1632 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1633 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1634 break;
1635 case WM_T_82571:
1636 case WM_T_82572:
1637 case WM_T_82574:
1638 case WM_T_80003:
1639 case WM_T_ICH9:
1640 case WM_T_ICH10:
1641 /* XXX limited to 9234 */
1642 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1643 break;
1644 case WM_T_PCH:
1645 /* XXX limited to 4096 */
1646 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1647 break;
1648 case WM_T_82542_2_0:
1649 case WM_T_82542_2_1:
1650 case WM_T_82583:
1651 case WM_T_ICH8:
1652 /* No support for jumbo frame */
1653 break;
1654 default:
1655 /* ETHER_MAX_LEN_JUMBO */
1656 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1657 break;
1658 }
1659
1660 /*
1661 * If we're a i82543 or greater, we can support VLANs.
1662 */
1663 if (sc->sc_type >= WM_T_82543)
1664 sc->sc_ethercom.ec_capabilities |=
1665 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1666
1667 /*
1668 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1669 * on i82543 and later.
1670 */
1671 if (sc->sc_type >= WM_T_82543) {
1672 ifp->if_capabilities |=
1673 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1674 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1675 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1676 IFCAP_CSUM_TCPv6_Tx |
1677 IFCAP_CSUM_UDPv6_Tx;
1678 }
1679
1680 /*
1681 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1682 *
1683 * 82541GI (8086:1076) ... no
1684 * 82572EI (8086:10b9) ... yes
1685 */
1686 if (sc->sc_type >= WM_T_82571) {
1687 ifp->if_capabilities |=
1688 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1689 }
1690
1691 /*
1692 * If we're a i82544 or greater (except i82547), we can do
1693 * TCP segmentation offload.
1694 */
1695 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1696 ifp->if_capabilities |= IFCAP_TSOv4;
1697 }
1698
1699 if (sc->sc_type >= WM_T_82571) {
1700 ifp->if_capabilities |= IFCAP_TSOv6;
1701 }
1702
1703 /*
1704 * Attach the interface.
1705 */
1706 if_attach(ifp);
1707 ether_ifattach(ifp, enaddr);
1708 #if NRND > 0
1709 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1710 #endif
1711
1712 #ifdef WM_EVENT_COUNTERS
1713 /* Attach event counters. */
1714 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1715 NULL, xname, "txsstall");
1716 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1717 NULL, xname, "txdstall");
1718 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1719 NULL, xname, "txfifo_stall");
1720 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1721 NULL, xname, "txdw");
1722 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1723 NULL, xname, "txqe");
1724 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1725 NULL, xname, "rxintr");
1726 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1727 NULL, xname, "linkintr");
1728
1729 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1730 NULL, xname, "rxipsum");
1731 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1732 NULL, xname, "rxtusum");
1733 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1734 NULL, xname, "txipsum");
1735 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1736 NULL, xname, "txtusum");
1737 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1738 NULL, xname, "txtusum6");
1739
1740 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1741 NULL, xname, "txtso");
1742 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1743 NULL, xname, "txtso6");
1744 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1745 NULL, xname, "txtsopain");
1746
1747 for (i = 0; i < WM_NTXSEGS; i++) {
1748 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1749 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1750 NULL, xname, wm_txseg_evcnt_names[i]);
1751 }
1752
1753 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1754 NULL, xname, "txdrop");
1755
1756 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1757 NULL, xname, "tu");
1758
1759 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1760 NULL, xname, "tx_xoff");
1761 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1762 NULL, xname, "tx_xon");
1763 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1764 NULL, xname, "rx_xoff");
1765 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1766 NULL, xname, "rx_xon");
1767 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1768 NULL, xname, "rx_macctl");
1769 #endif /* WM_EVENT_COUNTERS */
1770
1771 if (pmf_device_register(self, NULL, NULL))
1772 pmf_class_network_register(self, ifp);
1773 else
1774 aprint_error_dev(self, "couldn't establish power handler\n");
1775
1776 return;
1777
1778 /*
1779 * Free any resources we've allocated during the failed attach
1780 * attempt. Do this in reverse order and fall through.
1781 */
1782 fail_5:
1783 for (i = 0; i < WM_NRXDESC; i++) {
1784 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1785 bus_dmamap_destroy(sc->sc_dmat,
1786 sc->sc_rxsoft[i].rxs_dmamap);
1787 }
1788 fail_4:
1789 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1790 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1791 bus_dmamap_destroy(sc->sc_dmat,
1792 sc->sc_txsoft[i].txs_dmamap);
1793 }
1794 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1795 fail_3:
1796 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1797 fail_2:
1798 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1799 cdata_size);
1800 fail_1:
1801 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1802 fail_0:
1803 return;
1804 }
1805
1806 /*
1807 * wm_tx_offload:
1808 *
1809 * Set up TCP/IP checksumming parameters for the
1810 * specified packet.
1811 */
1812 static int
1813 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1814 uint8_t *fieldsp)
1815 {
1816 struct mbuf *m0 = txs->txs_mbuf;
1817 struct livengood_tcpip_ctxdesc *t;
1818 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1819 uint32_t ipcse;
1820 struct ether_header *eh;
1821 int offset, iphl;
1822 uint8_t fields;
1823
1824 /*
1825 * XXX It would be nice if the mbuf pkthdr had offset
1826 * fields for the protocol headers.
1827 */
1828
1829 eh = mtod(m0, struct ether_header *);
1830 switch (htons(eh->ether_type)) {
1831 case ETHERTYPE_IP:
1832 case ETHERTYPE_IPV6:
1833 offset = ETHER_HDR_LEN;
1834 break;
1835
1836 case ETHERTYPE_VLAN:
1837 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1838 break;
1839
1840 default:
1841 /*
1842 * Don't support this protocol or encapsulation.
1843 */
1844 *fieldsp = 0;
1845 *cmdp = 0;
1846 return 0;
1847 }
1848
1849 if ((m0->m_pkthdr.csum_flags &
1850 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1851 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1852 } else {
1853 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1854 }
1855 ipcse = offset + iphl - 1;
1856
1857 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1858 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1859 seg = 0;
1860 fields = 0;
1861
1862 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1863 int hlen = offset + iphl;
1864 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1865
1866 if (__predict_false(m0->m_len <
1867 (hlen + sizeof(struct tcphdr)))) {
1868 /*
1869 * TCP/IP headers are not in the first mbuf; we need
1870 * to do this the slow and painful way. Let's just
1871 * hope this doesn't happen very often.
1872 */
1873 struct tcphdr th;
1874
1875 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1876
1877 m_copydata(m0, hlen, sizeof(th), &th);
1878 if (v4) {
1879 struct ip ip;
1880
1881 m_copydata(m0, offset, sizeof(ip), &ip);
1882 ip.ip_len = 0;
1883 m_copyback(m0,
1884 offset + offsetof(struct ip, ip_len),
1885 sizeof(ip.ip_len), &ip.ip_len);
1886 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1887 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1888 } else {
1889 struct ip6_hdr ip6;
1890
1891 m_copydata(m0, offset, sizeof(ip6), &ip6);
1892 ip6.ip6_plen = 0;
1893 m_copyback(m0,
1894 offset + offsetof(struct ip6_hdr, ip6_plen),
1895 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1896 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1897 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1898 }
1899 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1900 sizeof(th.th_sum), &th.th_sum);
1901
1902 hlen += th.th_off << 2;
1903 } else {
1904 /*
1905 * TCP/IP headers are in the first mbuf; we can do
1906 * this the easy way.
1907 */
1908 struct tcphdr *th;
1909
1910 if (v4) {
1911 struct ip *ip =
1912 (void *)(mtod(m0, char *) + offset);
1913 th = (void *)(mtod(m0, char *) + hlen);
1914
1915 ip->ip_len = 0;
1916 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1917 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1918 } else {
1919 struct ip6_hdr *ip6 =
1920 (void *)(mtod(m0, char *) + offset);
1921 th = (void *)(mtod(m0, char *) + hlen);
1922
1923 ip6->ip6_plen = 0;
1924 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1925 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1926 }
1927 hlen += th->th_off << 2;
1928 }
1929
1930 if (v4) {
1931 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1932 cmdlen |= WTX_TCPIP_CMD_IP;
1933 } else {
1934 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1935 ipcse = 0;
1936 }
1937 cmd |= WTX_TCPIP_CMD_TSE;
1938 cmdlen |= WTX_TCPIP_CMD_TSE |
1939 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1940 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1941 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1942 }
1943
1944 /*
1945 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1946 * offload feature, if we load the context descriptor, we
1947 * MUST provide valid values for IPCSS and TUCSS fields.
1948 */
1949
1950 ipcs = WTX_TCPIP_IPCSS(offset) |
1951 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1952 WTX_TCPIP_IPCSE(ipcse);
1953 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1954 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1955 fields |= WTX_IXSM;
1956 }
1957
1958 offset += iphl;
1959
1960 if (m0->m_pkthdr.csum_flags &
1961 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1962 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1963 fields |= WTX_TXSM;
1964 tucs = WTX_TCPIP_TUCSS(offset) |
1965 WTX_TCPIP_TUCSO(offset +
1966 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1967 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1968 } else if ((m0->m_pkthdr.csum_flags &
1969 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1970 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1971 fields |= WTX_TXSM;
1972 tucs = WTX_TCPIP_TUCSS(offset) |
1973 WTX_TCPIP_TUCSO(offset +
1974 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1975 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1976 } else {
1977 /* Just initialize it to a valid TCP context. */
1978 tucs = WTX_TCPIP_TUCSS(offset) |
1979 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1980 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1981 }
1982
1983 /* Fill in the context descriptor. */
1984 t = (struct livengood_tcpip_ctxdesc *)
1985 &sc->sc_txdescs[sc->sc_txnext];
1986 t->tcpip_ipcs = htole32(ipcs);
1987 t->tcpip_tucs = htole32(tucs);
1988 t->tcpip_cmdlen = htole32(cmdlen);
1989 t->tcpip_seg = htole32(seg);
1990 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1991
1992 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1993 txs->txs_ndesc++;
1994
1995 *cmdp = cmd;
1996 *fieldsp = fields;
1997
1998 return 0;
1999 }
2000
2001 static void
2002 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2003 {
2004 struct mbuf *m;
2005 int i;
2006
2007 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2008 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2009 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2010 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2011 m->m_data, m->m_len, m->m_flags);
2012 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2013 i, i == 1 ? "" : "s");
2014 }
2015
2016 /*
2017 * wm_82547_txfifo_stall:
2018 *
2019 * Callout used to wait for the 82547 Tx FIFO to drain,
2020 * reset the FIFO pointers, and restart packet transmission.
2021 */
2022 static void
2023 wm_82547_txfifo_stall(void *arg)
2024 {
2025 struct wm_softc *sc = arg;
2026 int s;
2027
2028 s = splnet();
2029
2030 if (sc->sc_txfifo_stall) {
2031 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2032 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2033 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2034 /*
2035 * Packets have drained. Stop transmitter, reset
2036 * FIFO pointers, restart transmitter, and kick
2037 * the packet queue.
2038 */
2039 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2040 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2041 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2042 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2043 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2044 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2045 CSR_WRITE(sc, WMREG_TCTL, tctl);
2046 CSR_WRITE_FLUSH(sc);
2047
2048 sc->sc_txfifo_head = 0;
2049 sc->sc_txfifo_stall = 0;
2050 wm_start(&sc->sc_ethercom.ec_if);
2051 } else {
2052 /*
2053 * Still waiting for packets to drain; try again in
2054 * another tick.
2055 */
2056 callout_schedule(&sc->sc_txfifo_ch, 1);
2057 }
2058 }
2059
2060 splx(s);
2061 }
2062
2063 /*
2064 * wm_82547_txfifo_bugchk:
2065 *
2066 * Check for bug condition in the 82547 Tx FIFO. We need to
2067 * prevent enqueueing a packet that would wrap around the end
2068 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2069 *
2070 * We do this by checking the amount of space before the end
2071 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2072 * the Tx FIFO, wait for all remaining packets to drain, reset
2073 * the internal FIFO pointers to the beginning, and restart
2074 * transmission on the interface.
2075 */
2076 #define WM_FIFO_HDR 0x10
2077 #define WM_82547_PAD_LEN 0x3e0
2078 static int
2079 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2080 {
2081 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2082 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2083
2084 /* Just return if already stalled. */
2085 if (sc->sc_txfifo_stall)
2086 return 1;
2087
2088 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2089 /* Stall only occurs in half-duplex mode. */
2090 goto send_packet;
2091 }
2092
2093 if (len >= WM_82547_PAD_LEN + space) {
2094 sc->sc_txfifo_stall = 1;
2095 callout_schedule(&sc->sc_txfifo_ch, 1);
2096 return 1;
2097 }
2098
2099 send_packet:
2100 sc->sc_txfifo_head += len;
2101 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2102 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2103
2104 return 0;
2105 }
2106
2107 /*
2108 * wm_start: [ifnet interface function]
2109 *
2110 * Start packet transmission on the interface.
2111 */
2112 static void
2113 wm_start(struct ifnet *ifp)
2114 {
2115 struct wm_softc *sc = ifp->if_softc;
2116 struct mbuf *m0;
2117 struct m_tag *mtag;
2118 struct wm_txsoft *txs;
2119 bus_dmamap_t dmamap;
2120 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2121 bus_addr_t curaddr;
2122 bus_size_t seglen, curlen;
2123 uint32_t cksumcmd;
2124 uint8_t cksumfields;
2125
2126 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2127 return;
2128
2129 /*
2130 * Remember the previous number of free descriptors.
2131 */
2132 ofree = sc->sc_txfree;
2133
2134 /*
2135 * Loop through the send queue, setting up transmit descriptors
2136 * until we drain the queue, or use up all available transmit
2137 * descriptors.
2138 */
2139 for (;;) {
2140 /* Grab a packet off the queue. */
2141 IFQ_POLL(&ifp->if_snd, m0);
2142 if (m0 == NULL)
2143 break;
2144
2145 DPRINTF(WM_DEBUG_TX,
2146 ("%s: TX: have packet to transmit: %p\n",
2147 device_xname(sc->sc_dev), m0));
2148
2149 /* Get a work queue entry. */
2150 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2151 wm_txintr(sc);
2152 if (sc->sc_txsfree == 0) {
2153 DPRINTF(WM_DEBUG_TX,
2154 ("%s: TX: no free job descriptors\n",
2155 device_xname(sc->sc_dev)));
2156 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2157 break;
2158 }
2159 }
2160
2161 txs = &sc->sc_txsoft[sc->sc_txsnext];
2162 dmamap = txs->txs_dmamap;
2163
2164 use_tso = (m0->m_pkthdr.csum_flags &
2165 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2166
2167 /*
2168 * So says the Linux driver:
2169 * The controller does a simple calculation to make sure
2170 * there is enough room in the FIFO before initiating the
2171 * DMA for each buffer. The calc is:
2172 * 4 = ceil(buffer len / MSS)
2173 * To make sure we don't overrun the FIFO, adjust the max
2174 * buffer len if the MSS drops.
2175 */
2176 dmamap->dm_maxsegsz =
2177 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2178 ? m0->m_pkthdr.segsz << 2
2179 : WTX_MAX_LEN;
2180
2181 /*
2182 * Load the DMA map. If this fails, the packet either
2183 * didn't fit in the allotted number of segments, or we
2184 * were short on resources. For the too-many-segments
2185 * case, we simply report an error and drop the packet,
2186 * since we can't sanely copy a jumbo packet to a single
2187 * buffer.
2188 */
2189 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2190 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2191 if (error) {
2192 if (error == EFBIG) {
2193 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2194 log(LOG_ERR, "%s: Tx packet consumes too many "
2195 "DMA segments, dropping...\n",
2196 device_xname(sc->sc_dev));
2197 IFQ_DEQUEUE(&ifp->if_snd, m0);
2198 wm_dump_mbuf_chain(sc, m0);
2199 m_freem(m0);
2200 continue;
2201 }
2202 /*
2203 * Short on resources, just stop for now.
2204 */
2205 DPRINTF(WM_DEBUG_TX,
2206 ("%s: TX: dmamap load failed: %d\n",
2207 device_xname(sc->sc_dev), error));
2208 break;
2209 }
2210
2211 segs_needed = dmamap->dm_nsegs;
2212 if (use_tso) {
2213 /* For sentinel descriptor; see below. */
2214 segs_needed++;
2215 }
2216
2217 /*
2218 * Ensure we have enough descriptors free to describe
2219 * the packet. Note, we always reserve one descriptor
2220 * at the end of the ring due to the semantics of the
2221 * TDT register, plus one more in the event we need
2222 * to load offload context.
2223 */
2224 if (segs_needed > sc->sc_txfree - 2) {
2225 /*
2226 * Not enough free descriptors to transmit this
2227 * packet. We haven't committed anything yet,
2228 * so just unload the DMA map, put the packet
2229 * pack on the queue, and punt. Notify the upper
2230 * layer that there are no more slots left.
2231 */
2232 DPRINTF(WM_DEBUG_TX,
2233 ("%s: TX: need %d (%d) descriptors, have %d\n",
2234 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2235 segs_needed, sc->sc_txfree - 1));
2236 ifp->if_flags |= IFF_OACTIVE;
2237 bus_dmamap_unload(sc->sc_dmat, dmamap);
2238 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2239 break;
2240 }
2241
2242 /*
2243 * Check for 82547 Tx FIFO bug. We need to do this
2244 * once we know we can transmit the packet, since we
2245 * do some internal FIFO space accounting here.
2246 */
2247 if (sc->sc_type == WM_T_82547 &&
2248 wm_82547_txfifo_bugchk(sc, m0)) {
2249 DPRINTF(WM_DEBUG_TX,
2250 ("%s: TX: 82547 Tx FIFO bug detected\n",
2251 device_xname(sc->sc_dev)));
2252 ifp->if_flags |= IFF_OACTIVE;
2253 bus_dmamap_unload(sc->sc_dmat, dmamap);
2254 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2255 break;
2256 }
2257
2258 IFQ_DEQUEUE(&ifp->if_snd, m0);
2259
2260 /*
2261 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2262 */
2263
2264 DPRINTF(WM_DEBUG_TX,
2265 ("%s: TX: packet has %d (%d) DMA segments\n",
2266 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2267
2268 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2269
2270 /*
2271 * Store a pointer to the packet so that we can free it
2272 * later.
2273 *
2274 * Initially, we consider the number of descriptors the
2275 * packet uses the number of DMA segments. This may be
2276 * incremented by 1 if we do checksum offload (a descriptor
2277 * is used to set the checksum context).
2278 */
2279 txs->txs_mbuf = m0;
2280 txs->txs_firstdesc = sc->sc_txnext;
2281 txs->txs_ndesc = segs_needed;
2282
2283 /* Set up offload parameters for this packet. */
2284 if (m0->m_pkthdr.csum_flags &
2285 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2286 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2287 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2288 if (wm_tx_offload(sc, txs, &cksumcmd,
2289 &cksumfields) != 0) {
2290 /* Error message already displayed. */
2291 bus_dmamap_unload(sc->sc_dmat, dmamap);
2292 continue;
2293 }
2294 } else {
2295 cksumcmd = 0;
2296 cksumfields = 0;
2297 }
2298
2299 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2300
2301 /* Sync the DMA map. */
2302 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2303 BUS_DMASYNC_PREWRITE);
2304
2305 /*
2306 * Initialize the transmit descriptor.
2307 */
2308 for (nexttx = sc->sc_txnext, seg = 0;
2309 seg < dmamap->dm_nsegs; seg++) {
2310 for (seglen = dmamap->dm_segs[seg].ds_len,
2311 curaddr = dmamap->dm_segs[seg].ds_addr;
2312 seglen != 0;
2313 curaddr += curlen, seglen -= curlen,
2314 nexttx = WM_NEXTTX(sc, nexttx)) {
2315 curlen = seglen;
2316
2317 /*
2318 * So says the Linux driver:
2319 * Work around for premature descriptor
2320 * write-backs in TSO mode. Append a
2321 * 4-byte sentinel descriptor.
2322 */
2323 if (use_tso &&
2324 seg == dmamap->dm_nsegs - 1 &&
2325 curlen > 8)
2326 curlen -= 4;
2327
2328 wm_set_dma_addr(
2329 &sc->sc_txdescs[nexttx].wtx_addr,
2330 curaddr);
2331 sc->sc_txdescs[nexttx].wtx_cmdlen =
2332 htole32(cksumcmd | curlen);
2333 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2334 0;
2335 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2336 cksumfields;
2337 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2338 lasttx = nexttx;
2339
2340 DPRINTF(WM_DEBUG_TX,
2341 ("%s: TX: desc %d: low 0x%08lx, "
2342 "len 0x%04x\n",
2343 device_xname(sc->sc_dev), nexttx,
2344 curaddr & 0xffffffffUL, (unsigned)curlen));
2345 }
2346 }
2347
2348 KASSERT(lasttx != -1);
2349
2350 /*
2351 * Set up the command byte on the last descriptor of
2352 * the packet. If we're in the interrupt delay window,
2353 * delay the interrupt.
2354 */
2355 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2356 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2357
2358 /*
2359 * If VLANs are enabled and the packet has a VLAN tag, set
2360 * up the descriptor to encapsulate the packet for us.
2361 *
2362 * This is only valid on the last descriptor of the packet.
2363 */
2364 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2365 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2366 htole32(WTX_CMD_VLE);
2367 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2368 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2369 }
2370
2371 txs->txs_lastdesc = lasttx;
2372
2373 DPRINTF(WM_DEBUG_TX,
2374 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2375 device_xname(sc->sc_dev),
2376 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2377
2378 /* Sync the descriptors we're using. */
2379 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2380 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2381
2382 /* Give the packet to the chip. */
2383 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2384
2385 DPRINTF(WM_DEBUG_TX,
2386 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2387
2388 DPRINTF(WM_DEBUG_TX,
2389 ("%s: TX: finished transmitting packet, job %d\n",
2390 device_xname(sc->sc_dev), sc->sc_txsnext));
2391
2392 /* Advance the tx pointer. */
2393 sc->sc_txfree -= txs->txs_ndesc;
2394 sc->sc_txnext = nexttx;
2395
2396 sc->sc_txsfree--;
2397 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2398
2399 /* Pass the packet to any BPF listeners. */
2400 if (ifp->if_bpf)
2401 bpf_ops->bpf_mtap(ifp->if_bpf, m0);
2402 }
2403
2404 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2405 /* No more slots; notify upper layer. */
2406 ifp->if_flags |= IFF_OACTIVE;
2407 }
2408
2409 if (sc->sc_txfree != ofree) {
2410 /* Set a watchdog timer in case the chip flakes out. */
2411 ifp->if_timer = 5;
2412 }
2413 }
2414
2415 /*
2416 * wm_watchdog: [ifnet interface function]
2417 *
2418 * Watchdog timer handler.
2419 */
2420 static void
2421 wm_watchdog(struct ifnet *ifp)
2422 {
2423 struct wm_softc *sc = ifp->if_softc;
2424
2425 /*
2426 * Since we're using delayed interrupts, sweep up
2427 * before we report an error.
2428 */
2429 wm_txintr(sc);
2430
2431 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2432 log(LOG_ERR,
2433 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2434 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2435 sc->sc_txnext);
2436 ifp->if_oerrors++;
2437
2438 /* Reset the interface. */
2439 (void) wm_init(ifp);
2440 }
2441
2442 /* Try to get more packets going. */
2443 wm_start(ifp);
2444 }
2445
2446 /*
2447 * wm_ioctl: [ifnet interface function]
2448 *
2449 * Handle control requests from the operator.
2450 */
2451 static int
2452 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2453 {
2454 struct wm_softc *sc = ifp->if_softc;
2455 struct ifreq *ifr = (struct ifreq *) data;
2456 struct ifaddr *ifa = (struct ifaddr *)data;
2457 struct sockaddr_dl *sdl;
2458 int diff, s, error;
2459
2460 s = splnet();
2461
2462 switch (cmd) {
2463 case SIOCSIFFLAGS:
2464 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2465 break;
2466 if (ifp->if_flags & IFF_UP) {
2467 diff = (ifp->if_flags ^ sc->sc_if_flags)
2468 & (IFF_PROMISC | IFF_ALLMULTI);
2469 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2470 /*
2471 * If the difference bettween last flag and
2472 * new flag is only IFF_PROMISC or
2473 * IFF_ALLMULTI, set multicast filter only
2474 * (don't reset to prevent link down).
2475 */
2476 wm_set_filter(sc);
2477 } else {
2478 /*
2479 * Reset the interface to pick up changes in
2480 * any other flags that affect the hardware
2481 * state.
2482 */
2483 wm_init(ifp);
2484 }
2485 } else {
2486 if (ifp->if_flags & IFF_RUNNING)
2487 wm_stop(ifp, 1);
2488 }
2489 sc->sc_if_flags = ifp->if_flags;
2490 error = 0;
2491 break;
2492 case SIOCSIFMEDIA:
2493 case SIOCGIFMEDIA:
2494 /* Flow control requires full-duplex mode. */
2495 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2496 (ifr->ifr_media & IFM_FDX) == 0)
2497 ifr->ifr_media &= ~IFM_ETH_FMASK;
2498 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2499 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2500 /* We can do both TXPAUSE and RXPAUSE. */
2501 ifr->ifr_media |=
2502 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2503 }
2504 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2505 }
2506 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2507 break;
2508 case SIOCINITIFADDR:
2509 if (ifa->ifa_addr->sa_family == AF_LINK) {
2510 sdl = satosdl(ifp->if_dl->ifa_addr);
2511 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2512 LLADDR(satosdl(ifa->ifa_addr)),
2513 ifp->if_addrlen);
2514 /* unicast address is first multicast entry */
2515 wm_set_filter(sc);
2516 error = 0;
2517 break;
2518 }
2519 /* Fall through for rest */
2520 default:
2521 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2522 break;
2523
2524 error = 0;
2525
2526 if (cmd == SIOCSIFCAP)
2527 error = (*ifp->if_init)(ifp);
2528 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2529 ;
2530 else if (ifp->if_flags & IFF_RUNNING) {
2531 /*
2532 * Multicast list has changed; set the hardware filter
2533 * accordingly.
2534 */
2535 wm_set_filter(sc);
2536 }
2537 break;
2538 }
2539
2540 /* Try to get more packets going. */
2541 wm_start(ifp);
2542
2543 splx(s);
2544 return error;
2545 }
2546
2547 /*
2548 * wm_intr:
2549 *
2550 * Interrupt service routine.
2551 */
2552 static int
2553 wm_intr(void *arg)
2554 {
2555 struct wm_softc *sc = arg;
2556 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2557 uint32_t icr;
2558 int handled = 0;
2559
2560 while (1 /* CONSTCOND */) {
2561 icr = CSR_READ(sc, WMREG_ICR);
2562 if ((icr & sc->sc_icr) == 0)
2563 break;
2564 #if 0 /*NRND > 0*/
2565 if (RND_ENABLED(&sc->rnd_source))
2566 rnd_add_uint32(&sc->rnd_source, icr);
2567 #endif
2568
2569 handled = 1;
2570
2571 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2572 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2573 DPRINTF(WM_DEBUG_RX,
2574 ("%s: RX: got Rx intr 0x%08x\n",
2575 device_xname(sc->sc_dev),
2576 icr & (ICR_RXDMT0|ICR_RXT0)));
2577 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2578 }
2579 #endif
2580 wm_rxintr(sc);
2581
2582 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2583 if (icr & ICR_TXDW) {
2584 DPRINTF(WM_DEBUG_TX,
2585 ("%s: TX: got TXDW interrupt\n",
2586 device_xname(sc->sc_dev)));
2587 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2588 }
2589 #endif
2590 wm_txintr(sc);
2591
2592 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2593 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2594 wm_linkintr(sc, icr);
2595 }
2596
2597 if (icr & ICR_RXO) {
2598 ifp->if_ierrors++;
2599 #if defined(WM_DEBUG)
2600 log(LOG_WARNING, "%s: Receive overrun\n",
2601 device_xname(sc->sc_dev));
2602 #endif /* defined(WM_DEBUG) */
2603 }
2604 }
2605
2606 if (handled) {
2607 /* Try to get more packets going. */
2608 wm_start(ifp);
2609 }
2610
2611 return handled;
2612 }
2613
2614 /*
2615 * wm_txintr:
2616 *
2617 * Helper; handle transmit interrupts.
2618 */
2619 static void
2620 wm_txintr(struct wm_softc *sc)
2621 {
2622 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2623 struct wm_txsoft *txs;
2624 uint8_t status;
2625 int i;
2626
2627 ifp->if_flags &= ~IFF_OACTIVE;
2628
2629 /*
2630 * Go through the Tx list and free mbufs for those
2631 * frames which have been transmitted.
2632 */
2633 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2634 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2635 txs = &sc->sc_txsoft[i];
2636
2637 DPRINTF(WM_DEBUG_TX,
2638 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2639
2640 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2641 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2642
2643 status =
2644 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2645 if ((status & WTX_ST_DD) == 0) {
2646 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2647 BUS_DMASYNC_PREREAD);
2648 break;
2649 }
2650
2651 DPRINTF(WM_DEBUG_TX,
2652 ("%s: TX: job %d done: descs %d..%d\n",
2653 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2654 txs->txs_lastdesc));
2655
2656 /*
2657 * XXX We should probably be using the statistics
2658 * XXX registers, but I don't know if they exist
2659 * XXX on chips before the i82544.
2660 */
2661
2662 #ifdef WM_EVENT_COUNTERS
2663 if (status & WTX_ST_TU)
2664 WM_EVCNT_INCR(&sc->sc_ev_tu);
2665 #endif /* WM_EVENT_COUNTERS */
2666
2667 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2668 ifp->if_oerrors++;
2669 if (status & WTX_ST_LC)
2670 log(LOG_WARNING, "%s: late collision\n",
2671 device_xname(sc->sc_dev));
2672 else if (status & WTX_ST_EC) {
2673 ifp->if_collisions += 16;
2674 log(LOG_WARNING, "%s: excessive collisions\n",
2675 device_xname(sc->sc_dev));
2676 }
2677 } else
2678 ifp->if_opackets++;
2679
2680 sc->sc_txfree += txs->txs_ndesc;
2681 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2682 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2683 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2684 m_freem(txs->txs_mbuf);
2685 txs->txs_mbuf = NULL;
2686 }
2687
2688 /* Update the dirty transmit buffer pointer. */
2689 sc->sc_txsdirty = i;
2690 DPRINTF(WM_DEBUG_TX,
2691 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2692
2693 /*
2694 * If there are no more pending transmissions, cancel the watchdog
2695 * timer.
2696 */
2697 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2698 ifp->if_timer = 0;
2699 }
2700
2701 /*
2702 * wm_rxintr:
2703 *
2704 * Helper; handle receive interrupts.
2705 */
2706 static void
2707 wm_rxintr(struct wm_softc *sc)
2708 {
2709 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2710 struct wm_rxsoft *rxs;
2711 struct mbuf *m;
2712 int i, len;
2713 uint8_t status, errors;
2714 uint16_t vlantag;
2715
2716 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2717 rxs = &sc->sc_rxsoft[i];
2718
2719 DPRINTF(WM_DEBUG_RX,
2720 ("%s: RX: checking descriptor %d\n",
2721 device_xname(sc->sc_dev), i));
2722
2723 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2724
2725 status = sc->sc_rxdescs[i].wrx_status;
2726 errors = sc->sc_rxdescs[i].wrx_errors;
2727 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2728 vlantag = sc->sc_rxdescs[i].wrx_special;
2729
2730 if ((status & WRX_ST_DD) == 0) {
2731 /*
2732 * We have processed all of the receive descriptors.
2733 */
2734 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2735 break;
2736 }
2737
2738 if (__predict_false(sc->sc_rxdiscard)) {
2739 DPRINTF(WM_DEBUG_RX,
2740 ("%s: RX: discarding contents of descriptor %d\n",
2741 device_xname(sc->sc_dev), i));
2742 WM_INIT_RXDESC(sc, i);
2743 if (status & WRX_ST_EOP) {
2744 /* Reset our state. */
2745 DPRINTF(WM_DEBUG_RX,
2746 ("%s: RX: resetting rxdiscard -> 0\n",
2747 device_xname(sc->sc_dev)));
2748 sc->sc_rxdiscard = 0;
2749 }
2750 continue;
2751 }
2752
2753 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2754 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2755
2756 m = rxs->rxs_mbuf;
2757
2758 /*
2759 * Add a new receive buffer to the ring, unless of
2760 * course the length is zero. Treat the latter as a
2761 * failed mapping.
2762 */
2763 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2764 /*
2765 * Failed, throw away what we've done so
2766 * far, and discard the rest of the packet.
2767 */
2768 ifp->if_ierrors++;
2769 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2770 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2771 WM_INIT_RXDESC(sc, i);
2772 if ((status & WRX_ST_EOP) == 0)
2773 sc->sc_rxdiscard = 1;
2774 if (sc->sc_rxhead != NULL)
2775 m_freem(sc->sc_rxhead);
2776 WM_RXCHAIN_RESET(sc);
2777 DPRINTF(WM_DEBUG_RX,
2778 ("%s: RX: Rx buffer allocation failed, "
2779 "dropping packet%s\n", device_xname(sc->sc_dev),
2780 sc->sc_rxdiscard ? " (discard)" : ""));
2781 continue;
2782 }
2783
2784 m->m_len = len;
2785 sc->sc_rxlen += len;
2786 DPRINTF(WM_DEBUG_RX,
2787 ("%s: RX: buffer at %p len %d\n",
2788 device_xname(sc->sc_dev), m->m_data, len));
2789
2790 /*
2791 * If this is not the end of the packet, keep
2792 * looking.
2793 */
2794 if ((status & WRX_ST_EOP) == 0) {
2795 WM_RXCHAIN_LINK(sc, m);
2796 DPRINTF(WM_DEBUG_RX,
2797 ("%s: RX: not yet EOP, rxlen -> %d\n",
2798 device_xname(sc->sc_dev), sc->sc_rxlen));
2799 continue;
2800 }
2801
2802 /*
2803 * Okay, we have the entire packet now. The chip is
2804 * configured to include the FCS (not all chips can
2805 * be configured to strip it), so we need to trim it.
2806 * May need to adjust length of previous mbuf in the
2807 * chain if the current mbuf is too short.
2808 */
2809 if (m->m_len < ETHER_CRC_LEN) {
2810 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2811 m->m_len = 0;
2812 } else {
2813 m->m_len -= ETHER_CRC_LEN;
2814 }
2815 len = sc->sc_rxlen - ETHER_CRC_LEN;
2816
2817 WM_RXCHAIN_LINK(sc, m);
2818
2819 *sc->sc_rxtailp = NULL;
2820 m = sc->sc_rxhead;
2821
2822 WM_RXCHAIN_RESET(sc);
2823
2824 DPRINTF(WM_DEBUG_RX,
2825 ("%s: RX: have entire packet, len -> %d\n",
2826 device_xname(sc->sc_dev), len));
2827
2828 /*
2829 * If an error occurred, update stats and drop the packet.
2830 */
2831 if (errors &
2832 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2833 ifp->if_ierrors++;
2834 if (errors & WRX_ER_SE)
2835 log(LOG_WARNING, "%s: symbol error\n",
2836 device_xname(sc->sc_dev));
2837 else if (errors & WRX_ER_SEQ)
2838 log(LOG_WARNING, "%s: receive sequence error\n",
2839 device_xname(sc->sc_dev));
2840 else if (errors & WRX_ER_CE)
2841 log(LOG_WARNING, "%s: CRC error\n",
2842 device_xname(sc->sc_dev));
2843 m_freem(m);
2844 continue;
2845 }
2846
2847 /*
2848 * No errors. Receive the packet.
2849 */
2850 m->m_pkthdr.rcvif = ifp;
2851 m->m_pkthdr.len = len;
2852
2853 /*
2854 * If VLANs are enabled, VLAN packets have been unwrapped
2855 * for us. Associate the tag with the packet.
2856 */
2857 if ((status & WRX_ST_VP) != 0) {
2858 VLAN_INPUT_TAG(ifp, m,
2859 le16toh(vlantag),
2860 continue);
2861 }
2862
2863 /*
2864 * Set up checksum info for this packet.
2865 */
2866 if ((status & WRX_ST_IXSM) == 0) {
2867 if (status & WRX_ST_IPCS) {
2868 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2869 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2870 if (errors & WRX_ER_IPE)
2871 m->m_pkthdr.csum_flags |=
2872 M_CSUM_IPv4_BAD;
2873 }
2874 if (status & WRX_ST_TCPCS) {
2875 /*
2876 * Note: we don't know if this was TCP or UDP,
2877 * so we just set both bits, and expect the
2878 * upper layers to deal.
2879 */
2880 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2881 m->m_pkthdr.csum_flags |=
2882 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2883 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2884 if (errors & WRX_ER_TCPE)
2885 m->m_pkthdr.csum_flags |=
2886 M_CSUM_TCP_UDP_BAD;
2887 }
2888 }
2889
2890 ifp->if_ipackets++;
2891
2892 /* Pass this up to any BPF listeners. */
2893 if (ifp->if_bpf)
2894 bpf_ops->bpf_mtap(ifp->if_bpf, m);
2895
2896 /* Pass it on. */
2897 (*ifp->if_input)(ifp, m);
2898 }
2899
2900 /* Update the receive pointer. */
2901 sc->sc_rxptr = i;
2902
2903 DPRINTF(WM_DEBUG_RX,
2904 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2905 }
2906
2907 /*
2908 * wm_linkintr_gmii:
2909 *
2910 * Helper; handle link interrupts for GMII.
2911 */
2912 static void
2913 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
2914 {
2915
2916 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2917 __func__));
2918
2919 if (icr & ICR_LSC) {
2920 DPRINTF(WM_DEBUG_LINK,
2921 ("%s: LINK: LSC -> mii_tick\n",
2922 device_xname(sc->sc_dev)));
2923 mii_tick(&sc->sc_mii);
2924 if (sc->sc_type == WM_T_82543) {
2925 int miistatus, active;
2926
2927 /*
2928 * With 82543, we need to force speed and
2929 * duplex on the MAC equal to what the PHY
2930 * speed and duplex configuration is.
2931 */
2932 miistatus = sc->sc_mii.mii_media_status;
2933
2934 if (miistatus & IFM_ACTIVE) {
2935 active = sc->sc_mii.mii_media_active;
2936 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
2937 switch (IFM_SUBTYPE(active)) {
2938 case IFM_10_T:
2939 sc->sc_ctrl |= CTRL_SPEED_10;
2940 break;
2941 case IFM_100_TX:
2942 sc->sc_ctrl |= CTRL_SPEED_100;
2943 break;
2944 case IFM_1000_T:
2945 sc->sc_ctrl |= CTRL_SPEED_1000;
2946 break;
2947 default:
2948 /*
2949 * fiber?
2950 * Shoud not enter here.
2951 */
2952 printf("unknown media (%x)\n",
2953 active);
2954 break;
2955 }
2956 if (active & IFM_FDX)
2957 sc->sc_ctrl |= CTRL_FD;
2958 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2959 }
2960 } else if (sc->sc_type == WM_T_PCH) {
2961 wm_k1_gig_workaround_hv(sc,
2962 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
2963 }
2964
2965 if ((sc->sc_phytype == WMPHY_82578)
2966 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
2967 == IFM_1000_T)) {
2968
2969 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
2970 delay(200*1000); /* XXX too big */
2971
2972 /* Link stall fix for link up */
2973 wm_gmii_hv_writereg(sc->sc_dev, 1,
2974 HV_MUX_DATA_CTRL,
2975 HV_MUX_DATA_CTRL_GEN_TO_MAC
2976 | HV_MUX_DATA_CTRL_FORCE_SPEED);
2977 wm_gmii_hv_writereg(sc->sc_dev, 1,
2978 HV_MUX_DATA_CTRL,
2979 HV_MUX_DATA_CTRL_GEN_TO_MAC);
2980 }
2981 }
2982 } else if (icr & ICR_RXSEQ) {
2983 DPRINTF(WM_DEBUG_LINK,
2984 ("%s: LINK Receive sequence error\n",
2985 device_xname(sc->sc_dev)));
2986 }
2987 }
2988
2989 /*
2990 * wm_linkintr_tbi:
2991 *
2992 * Helper; handle link interrupts for TBI mode.
2993 */
2994 static void
2995 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
2996 {
2997 uint32_t status;
2998
2999 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3000 __func__));
3001
3002 status = CSR_READ(sc, WMREG_STATUS);
3003 if (icr & ICR_LSC) {
3004 if (status & STATUS_LU) {
3005 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3006 device_xname(sc->sc_dev),
3007 (status & STATUS_FD) ? "FDX" : "HDX"));
3008 /*
3009 * NOTE: CTRL will update TFCE and RFCE automatically,
3010 * so we should update sc->sc_ctrl
3011 */
3012
3013 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3014 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3015 sc->sc_fcrtl &= ~FCRTL_XONE;
3016 if (status & STATUS_FD)
3017 sc->sc_tctl |=
3018 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3019 else
3020 sc->sc_tctl |=
3021 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3022 if (sc->sc_ctrl & CTRL_TFCE)
3023 sc->sc_fcrtl |= FCRTL_XONE;
3024 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3025 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3026 WMREG_OLD_FCRTL : WMREG_FCRTL,
3027 sc->sc_fcrtl);
3028 sc->sc_tbi_linkup = 1;
3029 } else {
3030 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3031 device_xname(sc->sc_dev)));
3032 sc->sc_tbi_linkup = 0;
3033 }
3034 wm_tbi_set_linkled(sc);
3035 } else if (icr & ICR_RXCFG) {
3036 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3037 device_xname(sc->sc_dev)));
3038 sc->sc_tbi_nrxcfg++;
3039 wm_check_for_link(sc);
3040 } else if (icr & ICR_RXSEQ) {
3041 DPRINTF(WM_DEBUG_LINK,
3042 ("%s: LINK: Receive sequence error\n",
3043 device_xname(sc->sc_dev)));
3044 }
3045 }
3046
3047 /*
3048 * wm_linkintr:
3049 *
3050 * Helper; handle link interrupts.
3051 */
3052 static void
3053 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3054 {
3055
3056 if (sc->sc_flags & WM_F_HAS_MII)
3057 wm_linkintr_gmii(sc, icr);
3058 else
3059 wm_linkintr_tbi(sc, icr);
3060 }
3061
3062 /*
3063 * wm_tick:
3064 *
3065 * One second timer, used to check link status, sweep up
3066 * completed transmit jobs, etc.
3067 */
3068 static void
3069 wm_tick(void *arg)
3070 {
3071 struct wm_softc *sc = arg;
3072 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3073 int s;
3074
3075 s = splnet();
3076
3077 if (sc->sc_type >= WM_T_82542_2_1) {
3078 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3079 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3080 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3081 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3082 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3083 }
3084
3085 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3086 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3087
3088 if (sc->sc_flags & WM_F_HAS_MII)
3089 mii_tick(&sc->sc_mii);
3090 else
3091 wm_tbi_check_link(sc);
3092
3093 splx(s);
3094
3095 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3096 }
3097
3098 /*
3099 * wm_reset:
3100 *
3101 * Reset the i82542 chip.
3102 */
3103 static void
3104 wm_reset(struct wm_softc *sc)
3105 {
3106 int phy_reset = 0;
3107 uint32_t reg, func, mask;
3108 int i;
3109
3110 /*
3111 * Allocate on-chip memory according to the MTU size.
3112 * The Packet Buffer Allocation register must be written
3113 * before the chip is reset.
3114 */
3115 switch (sc->sc_type) {
3116 case WM_T_82547:
3117 case WM_T_82547_2:
3118 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3119 PBA_22K : PBA_30K;
3120 sc->sc_txfifo_head = 0;
3121 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3122 sc->sc_txfifo_size =
3123 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3124 sc->sc_txfifo_stall = 0;
3125 break;
3126 case WM_T_82571:
3127 case WM_T_82572:
3128 case WM_T_80003:
3129 sc->sc_pba = PBA_32K;
3130 break;
3131 case WM_T_82573:
3132 sc->sc_pba = PBA_12K;
3133 break;
3134 case WM_T_82574:
3135 case WM_T_82583:
3136 sc->sc_pba = PBA_20K;
3137 break;
3138 case WM_T_ICH8:
3139 sc->sc_pba = PBA_8K;
3140 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3141 break;
3142 case WM_T_ICH9:
3143 case WM_T_ICH10:
3144 case WM_T_PCH:
3145 sc->sc_pba = PBA_10K;
3146 break;
3147 default:
3148 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3149 PBA_40K : PBA_48K;
3150 break;
3151 }
3152 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3153
3154 if (sc->sc_flags & WM_F_PCIE) {
3155 int timeout = 800;
3156
3157 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3158 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3159
3160 while (timeout--) {
3161 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3162 break;
3163 delay(100);
3164 }
3165 }
3166
3167 /* clear interrupt */
3168 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3169
3170 /* Stop the transmit and receive processes. */
3171 CSR_WRITE(sc, WMREG_RCTL, 0);
3172 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3173
3174 /* set_tbi_sbp_82543() */
3175
3176 delay(10*1000);
3177
3178 /* Must acquire the MDIO ownership before MAC reset */
3179 switch (sc->sc_type) {
3180 case WM_T_82573:
3181 case WM_T_82574:
3182 case WM_T_82583:
3183 i = 0;
3184 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3185 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3186 do {
3187 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3188 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3189 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3190 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3191 break;
3192 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3193 delay(2*1000);
3194 i++;
3195 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3196 break;
3197 default:
3198 break;
3199 }
3200
3201 /*
3202 * 82541 Errata 29? & 82547 Errata 28?
3203 * See also the description about PHY_RST bit in CTRL register
3204 * in 8254x_GBe_SDM.pdf.
3205 */
3206 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3207 CSR_WRITE(sc, WMREG_CTRL,
3208 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3209 delay(5000);
3210 }
3211
3212 switch (sc->sc_type) {
3213 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3214 case WM_T_82541:
3215 case WM_T_82541_2:
3216 case WM_T_82547:
3217 case WM_T_82547_2:
3218 /*
3219 * On some chipsets, a reset through a memory-mapped write
3220 * cycle can cause the chip to reset before completing the
3221 * write cycle. This causes major headache that can be
3222 * avoided by issuing the reset via indirect register writes
3223 * through I/O space.
3224 *
3225 * So, if we successfully mapped the I/O BAR at attach time,
3226 * use that. Otherwise, try our luck with a memory-mapped
3227 * reset.
3228 */
3229 if (sc->sc_flags & WM_F_IOH_VALID)
3230 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3231 else
3232 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3233 break;
3234 case WM_T_82545_3:
3235 case WM_T_82546_3:
3236 /* Use the shadow control register on these chips. */
3237 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3238 break;
3239 case WM_T_80003:
3240 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3241 mask = func ? SWFW_PHY1_SM : SWFW_PHY0_SM;
3242 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3243 wm_get_swfw_semaphore(sc, mask);
3244 CSR_WRITE(sc, WMREG_CTRL, reg);
3245 wm_put_swfw_semaphore(sc, mask);
3246 break;
3247 case WM_T_ICH8:
3248 case WM_T_ICH9:
3249 case WM_T_ICH10:
3250 case WM_T_PCH:
3251 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3252 if (wm_check_reset_block(sc) == 0) {
3253 if (sc->sc_type >= WM_T_PCH) {
3254 uint32_t status;
3255
3256 status = CSR_READ(sc, WMREG_STATUS);
3257 CSR_WRITE(sc, WMREG_STATUS,
3258 status & ~STATUS_PHYRA);
3259 }
3260
3261 reg |= CTRL_PHY_RESET;
3262 phy_reset = 1;
3263 }
3264 wm_get_swfwhw_semaphore(sc);
3265 CSR_WRITE(sc, WMREG_CTRL, reg);
3266 delay(20*1000);
3267 wm_put_swfwhw_semaphore(sc);
3268 break;
3269 case WM_T_82542_2_0:
3270 case WM_T_82542_2_1:
3271 case WM_T_82543:
3272 case WM_T_82540:
3273 case WM_T_82545:
3274 case WM_T_82546:
3275 case WM_T_82571:
3276 case WM_T_82572:
3277 case WM_T_82573:
3278 case WM_T_82574:
3279 case WM_T_82583:
3280 default:
3281 /* Everything else can safely use the documented method. */
3282 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3283 break;
3284 }
3285
3286 if (phy_reset != 0)
3287 wm_get_cfg_done(sc);
3288
3289 /* reload EEPROM */
3290 switch (sc->sc_type) {
3291 case WM_T_82542_2_0:
3292 case WM_T_82542_2_1:
3293 case WM_T_82543:
3294 case WM_T_82544:
3295 delay(10);
3296 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3297 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3298 delay(2000);
3299 break;
3300 case WM_T_82540:
3301 case WM_T_82545:
3302 case WM_T_82545_3:
3303 case WM_T_82546:
3304 case WM_T_82546_3:
3305 delay(5*1000);
3306 /* XXX Disable HW ARPs on ASF enabled adapters */
3307 break;
3308 case WM_T_82541:
3309 case WM_T_82541_2:
3310 case WM_T_82547:
3311 case WM_T_82547_2:
3312 delay(20000);
3313 /* XXX Disable HW ARPs on ASF enabled adapters */
3314 break;
3315 case WM_T_82571:
3316 case WM_T_82572:
3317 case WM_T_82573:
3318 case WM_T_82574:
3319 case WM_T_82583:
3320 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3321 delay(10);
3322 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3323 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3324 }
3325 /* check EECD_EE_AUTORD */
3326 wm_get_auto_rd_done(sc);
3327 /*
3328 * Phy configuration from NVM just starts after EECD_AUTO_RD
3329 * is set.
3330 */
3331 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3332 || (sc->sc_type == WM_T_82583))
3333 delay(25*1000);
3334 break;
3335 case WM_T_80003:
3336 case WM_T_ICH8:
3337 case WM_T_ICH9:
3338 /* check EECD_EE_AUTORD */
3339 wm_get_auto_rd_done(sc);
3340 break;
3341 case WM_T_ICH10:
3342 case WM_T_PCH:
3343 wm_lan_init_done(sc);
3344 break;
3345 default:
3346 panic("%s: unknown type\n", __func__);
3347 }
3348
3349 /* reload sc_ctrl */
3350 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3351
3352 /* dummy read from WUC */
3353 if (sc->sc_type == WM_T_PCH)
3354 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3355 /*
3356 * For PCH, this write will make sure that any noise will be detected
3357 * as a CRC error and be dropped rather than show up as a bad packet
3358 * to the DMA engine
3359 */
3360 if (sc->sc_type == WM_T_PCH)
3361 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3362
3363 #if 0
3364 for (i = 0; i < 1000; i++) {
3365 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3366 return;
3367 }
3368 delay(20);
3369 }
3370
3371 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3372 log(LOG_ERR, "%s: reset failed to complete\n",
3373 device_xname(sc->sc_dev));
3374 #endif
3375 }
3376
3377 /*
3378 * wm_init: [ifnet interface function]
3379 *
3380 * Initialize the interface. Must be called at splnet().
3381 */
3382 static int
3383 wm_init(struct ifnet *ifp)
3384 {
3385 struct wm_softc *sc = ifp->if_softc;
3386 struct wm_rxsoft *rxs;
3387 int i, error = 0;
3388 uint32_t reg;
3389
3390 /*
3391 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3392 * There is a small but measurable benefit to avoiding the adjusment
3393 * of the descriptor so that the headers are aligned, for normal mtu,
3394 * on such platforms. One possibility is that the DMA itself is
3395 * slightly more efficient if the front of the entire packet (instead
3396 * of the front of the headers) is aligned.
3397 *
3398 * Note we must always set align_tweak to 0 if we are using
3399 * jumbo frames.
3400 */
3401 #ifdef __NO_STRICT_ALIGNMENT
3402 sc->sc_align_tweak = 0;
3403 #else
3404 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3405 sc->sc_align_tweak = 0;
3406 else
3407 sc->sc_align_tweak = 2;
3408 #endif /* __NO_STRICT_ALIGNMENT */
3409
3410 /* Cancel any pending I/O. */
3411 wm_stop(ifp, 0);
3412
3413 /* update statistics before reset */
3414 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3415 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3416
3417 /* Reset the chip to a known state. */
3418 wm_reset(sc);
3419
3420 switch (sc->sc_type) {
3421 case WM_T_82571:
3422 case WM_T_82572:
3423 case WM_T_82573:
3424 case WM_T_82574:
3425 case WM_T_82583:
3426 case WM_T_80003:
3427 case WM_T_ICH8:
3428 case WM_T_ICH9:
3429 case WM_T_ICH10:
3430 case WM_T_PCH:
3431 if (wm_check_mng_mode(sc) != 0)
3432 wm_get_hw_control(sc);
3433 break;
3434 default:
3435 break;
3436 }
3437
3438 /* Reset the PHY. */
3439 if (sc->sc_flags & WM_F_HAS_MII)
3440 wm_gmii_reset(sc);
3441
3442 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3443 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3444 if (sc->sc_type == WM_T_PCH)
3445 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3446
3447 /* Initialize the transmit descriptor ring. */
3448 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3449 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3450 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3451 sc->sc_txfree = WM_NTXDESC(sc);
3452 sc->sc_txnext = 0;
3453
3454 if (sc->sc_type < WM_T_82543) {
3455 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3456 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3457 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3458 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3459 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3460 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3461 } else {
3462 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3463 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3464 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3465 CSR_WRITE(sc, WMREG_TDH, 0);
3466 CSR_WRITE(sc, WMREG_TDT, 0);
3467 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3468 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3469
3470 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3471 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3472 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3473 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3474 }
3475 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3476 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3477
3478 /* Initialize the transmit job descriptors. */
3479 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3480 sc->sc_txsoft[i].txs_mbuf = NULL;
3481 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3482 sc->sc_txsnext = 0;
3483 sc->sc_txsdirty = 0;
3484
3485 /*
3486 * Initialize the receive descriptor and receive job
3487 * descriptor rings.
3488 */
3489 if (sc->sc_type < WM_T_82543) {
3490 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3491 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3492 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3493 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3494 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3495 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3496
3497 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3498 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3499 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3500 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3501 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3502 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3503 } else {
3504 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3505 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3506 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3507 CSR_WRITE(sc, WMREG_RDH, 0);
3508 CSR_WRITE(sc, WMREG_RDT, 0);
3509 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3510 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3511 }
3512 for (i = 0; i < WM_NRXDESC; i++) {
3513 rxs = &sc->sc_rxsoft[i];
3514 if (rxs->rxs_mbuf == NULL) {
3515 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3516 log(LOG_ERR, "%s: unable to allocate or map rx "
3517 "buffer %d, error = %d\n",
3518 device_xname(sc->sc_dev), i, error);
3519 /*
3520 * XXX Should attempt to run with fewer receive
3521 * XXX buffers instead of just failing.
3522 */
3523 wm_rxdrain(sc);
3524 goto out;
3525 }
3526 } else
3527 WM_INIT_RXDESC(sc, i);
3528 }
3529 sc->sc_rxptr = 0;
3530 sc->sc_rxdiscard = 0;
3531 WM_RXCHAIN_RESET(sc);
3532
3533 /*
3534 * Clear out the VLAN table -- we don't use it (yet).
3535 */
3536 CSR_WRITE(sc, WMREG_VET, 0);
3537 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3538 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3539
3540 /*
3541 * Set up flow-control parameters.
3542 *
3543 * XXX Values could probably stand some tuning.
3544 */
3545 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3546 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3547 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3548 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3549 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3550 }
3551
3552 sc->sc_fcrtl = FCRTL_DFLT;
3553 if (sc->sc_type < WM_T_82543) {
3554 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3555 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3556 } else {
3557 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3558 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3559 }
3560
3561 if (sc->sc_type == WM_T_80003)
3562 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3563 else
3564 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3565
3566 /* Deal with VLAN enables. */
3567 if (VLAN_ATTACHED(&sc->sc_ethercom))
3568 sc->sc_ctrl |= CTRL_VME;
3569 else
3570 sc->sc_ctrl &= ~CTRL_VME;
3571
3572 /* Write the control registers. */
3573 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3574
3575 if (sc->sc_flags & WM_F_HAS_MII) {
3576 int val;
3577
3578 switch (sc->sc_type) {
3579 case WM_T_80003:
3580 case WM_T_ICH8:
3581 case WM_T_ICH9:
3582 case WM_T_ICH10:
3583 case WM_T_PCH:
3584 /*
3585 * Set the mac to wait the maximum time between each
3586 * iteration and increase the max iterations when
3587 * polling the phy; this fixes erroneous timeouts at
3588 * 10Mbps.
3589 */
3590 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3591 0xFFFF);
3592 val = wm_kmrn_readreg(sc,
3593 KUMCTRLSTA_OFFSET_INB_PARAM);
3594 val |= 0x3F;
3595 wm_kmrn_writereg(sc,
3596 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3597 break;
3598 default:
3599 break;
3600 }
3601
3602 if (sc->sc_type == WM_T_80003) {
3603 val = CSR_READ(sc, WMREG_CTRL_EXT);
3604 val &= ~CTRL_EXT_LINK_MODE_MASK;
3605 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3606
3607 /* Bypass RX and TX FIFO's */
3608 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3609 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3610 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3611
3612 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3613 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3614 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3615 }
3616 }
3617 #if 0
3618 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3619 #endif
3620
3621 /*
3622 * Set up checksum offload parameters.
3623 */
3624 reg = CSR_READ(sc, WMREG_RXCSUM);
3625 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3626 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3627 reg |= RXCSUM_IPOFL;
3628 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3629 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3630 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3631 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3632 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3633
3634 /* Reset TBI's RXCFG count */
3635 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3636
3637 /*
3638 * Set up the interrupt registers.
3639 */
3640 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3641 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3642 ICR_RXO | ICR_RXT0;
3643 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3644 sc->sc_icr |= ICR_RXCFG;
3645 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3646
3647 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3648 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
3649 reg = CSR_READ(sc, WMREG_KABGTXD);
3650 reg |= KABGTXD_BGSQLBIAS;
3651 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3652 }
3653
3654 /* Set up the inter-packet gap. */
3655 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3656
3657 if (sc->sc_type >= WM_T_82543) {
3658 /*
3659 * Set up the interrupt throttling register (units of 256ns)
3660 * Note that a footnote in Intel's documentation says this
3661 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3662 * or 10Mbit mode. Empirically, it appears to be the case
3663 * that that is also true for the 1024ns units of the other
3664 * interrupt-related timer registers -- so, really, we ought
3665 * to divide this value by 4 when the link speed is low.
3666 *
3667 * XXX implement this division at link speed change!
3668 */
3669
3670 /*
3671 * For N interrupts/sec, set this value to:
3672 * 1000000000 / (N * 256). Note that we set the
3673 * absolute and packet timer values to this value
3674 * divided by 4 to get "simple timer" behavior.
3675 */
3676
3677 sc->sc_itr = 1500; /* 2604 ints/sec */
3678 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3679 }
3680
3681 /* Set the VLAN ethernetype. */
3682 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3683
3684 /*
3685 * Set up the transmit control register; we start out with
3686 * a collision distance suitable for FDX, but update it whe
3687 * we resolve the media type.
3688 */
3689 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3690 | TCTL_CT(TX_COLLISION_THRESHOLD)
3691 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3692 if (sc->sc_type >= WM_T_82571)
3693 sc->sc_tctl |= TCTL_MULR;
3694 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3695
3696 if (sc->sc_type == WM_T_80003) {
3697 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3698 reg &= ~TCTL_EXT_GCEX_MASK;
3699 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3700 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3701 }
3702
3703 /* Set the media. */
3704 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3705 goto out;
3706
3707 /*
3708 * Set up the receive control register; we actually program
3709 * the register when we set the receive filter. Use multicast
3710 * address offset type 0.
3711 *
3712 * Only the i82544 has the ability to strip the incoming
3713 * CRC, so we don't enable that feature.
3714 */
3715 sc->sc_mchash_type = 0;
3716 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3717 | RCTL_MO(sc->sc_mchash_type);
3718
3719 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3720 && (ifp->if_mtu > ETHERMTU))
3721 sc->sc_rctl |= RCTL_LPE;
3722
3723 if (MCLBYTES == 2048) {
3724 sc->sc_rctl |= RCTL_2k;
3725 } else {
3726 if (sc->sc_type >= WM_T_82543) {
3727 switch (MCLBYTES) {
3728 case 4096:
3729 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3730 break;
3731 case 8192:
3732 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3733 break;
3734 case 16384:
3735 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3736 break;
3737 default:
3738 panic("wm_init: MCLBYTES %d unsupported",
3739 MCLBYTES);
3740 break;
3741 }
3742 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3743 }
3744
3745 /* Set the receive filter. */
3746 wm_set_filter(sc);
3747
3748 /* Start the one second link check clock. */
3749 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3750
3751 /* ...all done! */
3752 ifp->if_flags |= IFF_RUNNING;
3753 ifp->if_flags &= ~IFF_OACTIVE;
3754
3755 out:
3756 if (error)
3757 log(LOG_ERR, "%s: interface not running\n",
3758 device_xname(sc->sc_dev));
3759 return error;
3760 }
3761
3762 /*
3763 * wm_rxdrain:
3764 *
3765 * Drain the receive queue.
3766 */
3767 static void
3768 wm_rxdrain(struct wm_softc *sc)
3769 {
3770 struct wm_rxsoft *rxs;
3771 int i;
3772
3773 for (i = 0; i < WM_NRXDESC; i++) {
3774 rxs = &sc->sc_rxsoft[i];
3775 if (rxs->rxs_mbuf != NULL) {
3776 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3777 m_freem(rxs->rxs_mbuf);
3778 rxs->rxs_mbuf = NULL;
3779 }
3780 }
3781 }
3782
3783 /*
3784 * wm_stop: [ifnet interface function]
3785 *
3786 * Stop transmission on the interface.
3787 */
3788 static void
3789 wm_stop(struct ifnet *ifp, int disable)
3790 {
3791 struct wm_softc *sc = ifp->if_softc;
3792 struct wm_txsoft *txs;
3793 int i;
3794
3795 /* Stop the one second clock. */
3796 callout_stop(&sc->sc_tick_ch);
3797
3798 /* Stop the 82547 Tx FIFO stall check timer. */
3799 if (sc->sc_type == WM_T_82547)
3800 callout_stop(&sc->sc_txfifo_ch);
3801
3802 if (sc->sc_flags & WM_F_HAS_MII) {
3803 /* Down the MII. */
3804 mii_down(&sc->sc_mii);
3805 } else {
3806 #if 0
3807 /* Should we clear PHY's status properly? */
3808 wm_reset(sc);
3809 #endif
3810 }
3811
3812 /* Stop the transmit and receive processes. */
3813 CSR_WRITE(sc, WMREG_TCTL, 0);
3814 CSR_WRITE(sc, WMREG_RCTL, 0);
3815
3816 /*
3817 * Clear the interrupt mask to ensure the device cannot assert its
3818 * interrupt line.
3819 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3820 * any currently pending or shared interrupt.
3821 */
3822 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3823 sc->sc_icr = 0;
3824
3825 /* Release any queued transmit buffers. */
3826 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3827 txs = &sc->sc_txsoft[i];
3828 if (txs->txs_mbuf != NULL) {
3829 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3830 m_freem(txs->txs_mbuf);
3831 txs->txs_mbuf = NULL;
3832 }
3833 }
3834
3835 /* Mark the interface as down and cancel the watchdog timer. */
3836 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3837 ifp->if_timer = 0;
3838
3839 if (disable)
3840 wm_rxdrain(sc);
3841 }
3842
3843 void
3844 wm_get_auto_rd_done(struct wm_softc *sc)
3845 {
3846 int i;
3847
3848 /* wait for eeprom to reload */
3849 switch (sc->sc_type) {
3850 case WM_T_82571:
3851 case WM_T_82572:
3852 case WM_T_82573:
3853 case WM_T_82574:
3854 case WM_T_82583:
3855 case WM_T_80003:
3856 case WM_T_ICH8:
3857 case WM_T_ICH9:
3858 for (i = 0; i < 10; i++) {
3859 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3860 break;
3861 delay(1000);
3862 }
3863 if (i == 10) {
3864 log(LOG_ERR, "%s: auto read from eeprom failed to "
3865 "complete\n", device_xname(sc->sc_dev));
3866 }
3867 break;
3868 default:
3869 break;
3870 }
3871 }
3872
3873 void
3874 wm_lan_init_done(struct wm_softc *sc)
3875 {
3876 uint32_t reg = 0;
3877 int i;
3878
3879 /* wait for eeprom to reload */
3880 switch (sc->sc_type) {
3881 case WM_T_ICH10:
3882 case WM_T_PCH:
3883 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3884 reg = CSR_READ(sc, WMREG_STATUS);
3885 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3886 break;
3887 delay(100);
3888 }
3889 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3890 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3891 "complete\n", device_xname(sc->sc_dev), __func__);
3892 }
3893 break;
3894 default:
3895 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3896 __func__);
3897 break;
3898 }
3899
3900 reg &= ~STATUS_LAN_INIT_DONE;
3901 CSR_WRITE(sc, WMREG_STATUS, reg);
3902 }
3903
3904 void
3905 wm_get_cfg_done(struct wm_softc *sc)
3906 {
3907 int func = 0;
3908 int mask;
3909 uint32_t reg;
3910 int i;
3911
3912 /* wait for eeprom to reload */
3913 switch (sc->sc_type) {
3914 case WM_T_82542_2_0:
3915 case WM_T_82542_2_1:
3916 /* null */
3917 break;
3918 case WM_T_82543:
3919 case WM_T_82544:
3920 case WM_T_82540:
3921 case WM_T_82545:
3922 case WM_T_82545_3:
3923 case WM_T_82546:
3924 case WM_T_82546_3:
3925 case WM_T_82541:
3926 case WM_T_82541_2:
3927 case WM_T_82547:
3928 case WM_T_82547_2:
3929 case WM_T_82573:
3930 case WM_T_82574:
3931 case WM_T_82583:
3932 /* generic */
3933 delay(10*1000);
3934 break;
3935 case WM_T_80003:
3936 case WM_T_82571:
3937 case WM_T_82572:
3938 if (sc->sc_type == WM_T_80003)
3939 func = (CSR_READ(sc, WMREG_STATUS)
3940 >> STATUS_FUNCID_SHIFT) & 1;
3941 else
3942 func = 0; /* XXX Is it true for 82571? */
3943 mask = (func == 1) ? EEMNGCTL_CFGDONE_1 : EEMNGCTL_CFGDONE_0;
3944 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3945 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3946 break;
3947 delay(1000);
3948 }
3949 if (i >= WM_PHY_CFG_TIMEOUT) {
3950 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3951 device_xname(sc->sc_dev), __func__));
3952 }
3953 break;
3954 case WM_T_ICH8:
3955 case WM_T_ICH9:
3956 case WM_T_ICH10:
3957 case WM_T_PCH:
3958 if (sc->sc_type >= WM_T_PCH) {
3959 reg = CSR_READ(sc, WMREG_STATUS);
3960 if ((reg & STATUS_PHYRA) != 0)
3961 CSR_WRITE(sc, WMREG_STATUS,
3962 reg & ~STATUS_PHYRA);
3963 }
3964 delay(10*1000);
3965 break;
3966 default:
3967 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3968 __func__);
3969 break;
3970 }
3971 }
3972
3973 /*
3974 * wm_acquire_eeprom:
3975 *
3976 * Perform the EEPROM handshake required on some chips.
3977 */
3978 static int
3979 wm_acquire_eeprom(struct wm_softc *sc)
3980 {
3981 uint32_t reg;
3982 int x;
3983 int ret = 0;
3984
3985 /* always success */
3986 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3987 return 0;
3988
3989 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3990 ret = wm_get_swfwhw_semaphore(sc);
3991 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3992 /* this will also do wm_get_swsm_semaphore() if needed */
3993 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3994 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3995 ret = wm_get_swsm_semaphore(sc);
3996 }
3997
3998 if (ret) {
3999 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4000 __func__);
4001 return 1;
4002 }
4003
4004 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4005 reg = CSR_READ(sc, WMREG_EECD);
4006
4007 /* Request EEPROM access. */
4008 reg |= EECD_EE_REQ;
4009 CSR_WRITE(sc, WMREG_EECD, reg);
4010
4011 /* ..and wait for it to be granted. */
4012 for (x = 0; x < 1000; x++) {
4013 reg = CSR_READ(sc, WMREG_EECD);
4014 if (reg & EECD_EE_GNT)
4015 break;
4016 delay(5);
4017 }
4018 if ((reg & EECD_EE_GNT) == 0) {
4019 aprint_error_dev(sc->sc_dev,
4020 "could not acquire EEPROM GNT\n");
4021 reg &= ~EECD_EE_REQ;
4022 CSR_WRITE(sc, WMREG_EECD, reg);
4023 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4024 wm_put_swfwhw_semaphore(sc);
4025 if (sc->sc_flags & WM_F_SWFW_SYNC)
4026 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4027 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4028 wm_put_swsm_semaphore(sc);
4029 return 1;
4030 }
4031 }
4032
4033 return 0;
4034 }
4035
4036 /*
4037 * wm_release_eeprom:
4038 *
4039 * Release the EEPROM mutex.
4040 */
4041 static void
4042 wm_release_eeprom(struct wm_softc *sc)
4043 {
4044 uint32_t reg;
4045
4046 /* always success */
4047 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4048 return;
4049
4050 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4051 reg = CSR_READ(sc, WMREG_EECD);
4052 reg &= ~EECD_EE_REQ;
4053 CSR_WRITE(sc, WMREG_EECD, reg);
4054 }
4055
4056 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4057 wm_put_swfwhw_semaphore(sc);
4058 if (sc->sc_flags & WM_F_SWFW_SYNC)
4059 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4060 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4061 wm_put_swsm_semaphore(sc);
4062 }
4063
4064 /*
4065 * wm_eeprom_sendbits:
4066 *
4067 * Send a series of bits to the EEPROM.
4068 */
4069 static void
4070 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4071 {
4072 uint32_t reg;
4073 int x;
4074
4075 reg = CSR_READ(sc, WMREG_EECD);
4076
4077 for (x = nbits; x > 0; x--) {
4078 if (bits & (1U << (x - 1)))
4079 reg |= EECD_DI;
4080 else
4081 reg &= ~EECD_DI;
4082 CSR_WRITE(sc, WMREG_EECD, reg);
4083 delay(2);
4084 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4085 delay(2);
4086 CSR_WRITE(sc, WMREG_EECD, reg);
4087 delay(2);
4088 }
4089 }
4090
4091 /*
4092 * wm_eeprom_recvbits:
4093 *
4094 * Receive a series of bits from the EEPROM.
4095 */
4096 static void
4097 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4098 {
4099 uint32_t reg, val;
4100 int x;
4101
4102 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4103
4104 val = 0;
4105 for (x = nbits; x > 0; x--) {
4106 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4107 delay(2);
4108 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4109 val |= (1U << (x - 1));
4110 CSR_WRITE(sc, WMREG_EECD, reg);
4111 delay(2);
4112 }
4113 *valp = val;
4114 }
4115
4116 /*
4117 * wm_read_eeprom_uwire:
4118 *
4119 * Read a word from the EEPROM using the MicroWire protocol.
4120 */
4121 static int
4122 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4123 {
4124 uint32_t reg, val;
4125 int i;
4126
4127 for (i = 0; i < wordcnt; i++) {
4128 /* Clear SK and DI. */
4129 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4130 CSR_WRITE(sc, WMREG_EECD, reg);
4131
4132 /* Set CHIP SELECT. */
4133 reg |= EECD_CS;
4134 CSR_WRITE(sc, WMREG_EECD, reg);
4135 delay(2);
4136
4137 /* Shift in the READ command. */
4138 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4139
4140 /* Shift in address. */
4141 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4142
4143 /* Shift out the data. */
4144 wm_eeprom_recvbits(sc, &val, 16);
4145 data[i] = val & 0xffff;
4146
4147 /* Clear CHIP SELECT. */
4148 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4149 CSR_WRITE(sc, WMREG_EECD, reg);
4150 delay(2);
4151 }
4152
4153 return 0;
4154 }
4155
4156 /*
4157 * wm_spi_eeprom_ready:
4158 *
4159 * Wait for a SPI EEPROM to be ready for commands.
4160 */
4161 static int
4162 wm_spi_eeprom_ready(struct wm_softc *sc)
4163 {
4164 uint32_t val;
4165 int usec;
4166
4167 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4168 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4169 wm_eeprom_recvbits(sc, &val, 8);
4170 if ((val & SPI_SR_RDY) == 0)
4171 break;
4172 }
4173 if (usec >= SPI_MAX_RETRIES) {
4174 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4175 return 1;
4176 }
4177 return 0;
4178 }
4179
4180 /*
4181 * wm_read_eeprom_spi:
4182 *
4183 * Read a work from the EEPROM using the SPI protocol.
4184 */
4185 static int
4186 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4187 {
4188 uint32_t reg, val;
4189 int i;
4190 uint8_t opc;
4191
4192 /* Clear SK and CS. */
4193 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4194 CSR_WRITE(sc, WMREG_EECD, reg);
4195 delay(2);
4196
4197 if (wm_spi_eeprom_ready(sc))
4198 return 1;
4199
4200 /* Toggle CS to flush commands. */
4201 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4202 delay(2);
4203 CSR_WRITE(sc, WMREG_EECD, reg);
4204 delay(2);
4205
4206 opc = SPI_OPC_READ;
4207 if (sc->sc_ee_addrbits == 8 && word >= 128)
4208 opc |= SPI_OPC_A8;
4209
4210 wm_eeprom_sendbits(sc, opc, 8);
4211 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4212
4213 for (i = 0; i < wordcnt; i++) {
4214 wm_eeprom_recvbits(sc, &val, 16);
4215 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4216 }
4217
4218 /* Raise CS and clear SK. */
4219 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4220 CSR_WRITE(sc, WMREG_EECD, reg);
4221 delay(2);
4222
4223 return 0;
4224 }
4225
4226 #define EEPROM_CHECKSUM 0xBABA
4227 #define EEPROM_SIZE 0x0040
4228
4229 /*
4230 * wm_validate_eeprom_checksum
4231 *
4232 * The checksum is defined as the sum of the first 64 (16 bit) words.
4233 */
4234 static int
4235 wm_validate_eeprom_checksum(struct wm_softc *sc)
4236 {
4237 uint16_t checksum;
4238 uint16_t eeprom_data;
4239 int i;
4240
4241 checksum = 0;
4242
4243 for (i = 0; i < EEPROM_SIZE; i++) {
4244 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4245 return 1;
4246 checksum += eeprom_data;
4247 }
4248
4249 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4250 return 1;
4251
4252 return 0;
4253 }
4254
4255 /*
4256 * wm_read_eeprom:
4257 *
4258 * Read data from the serial EEPROM.
4259 */
4260 static int
4261 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4262 {
4263 int rv;
4264
4265 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4266 return 1;
4267
4268 if (wm_acquire_eeprom(sc))
4269 return 1;
4270
4271 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4272 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4273 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4274 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4275 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4276 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4277 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4278 else
4279 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4280
4281 wm_release_eeprom(sc);
4282 return rv;
4283 }
4284
4285 static int
4286 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4287 uint16_t *data)
4288 {
4289 int i, eerd = 0;
4290 int error = 0;
4291
4292 for (i = 0; i < wordcnt; i++) {
4293 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4294
4295 CSR_WRITE(sc, WMREG_EERD, eerd);
4296 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4297 if (error != 0)
4298 break;
4299
4300 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4301 }
4302
4303 return error;
4304 }
4305
4306 static int
4307 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4308 {
4309 uint32_t attempts = 100000;
4310 uint32_t i, reg = 0;
4311 int32_t done = -1;
4312
4313 for (i = 0; i < attempts; i++) {
4314 reg = CSR_READ(sc, rw);
4315
4316 if (reg & EERD_DONE) {
4317 done = 0;
4318 break;
4319 }
4320 delay(5);
4321 }
4322
4323 return done;
4324 }
4325
4326 /*
4327 * wm_add_rxbuf:
4328 *
4329 * Add a receive buffer to the indiciated descriptor.
4330 */
4331 static int
4332 wm_add_rxbuf(struct wm_softc *sc, int idx)
4333 {
4334 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4335 struct mbuf *m;
4336 int error;
4337
4338 MGETHDR(m, M_DONTWAIT, MT_DATA);
4339 if (m == NULL)
4340 return ENOBUFS;
4341
4342 MCLGET(m, M_DONTWAIT);
4343 if ((m->m_flags & M_EXT) == 0) {
4344 m_freem(m);
4345 return ENOBUFS;
4346 }
4347
4348 if (rxs->rxs_mbuf != NULL)
4349 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4350
4351 rxs->rxs_mbuf = m;
4352
4353 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4354 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4355 BUS_DMA_READ|BUS_DMA_NOWAIT);
4356 if (error) {
4357 /* XXX XXX XXX */
4358 aprint_error_dev(sc->sc_dev,
4359 "unable to load rx DMA map %d, error = %d\n",
4360 idx, error);
4361 panic("wm_add_rxbuf");
4362 }
4363
4364 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4365 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4366
4367 WM_INIT_RXDESC(sc, idx);
4368
4369 return 0;
4370 }
4371
4372 /*
4373 * wm_set_ral:
4374 *
4375 * Set an entery in the receive address list.
4376 */
4377 static void
4378 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4379 {
4380 uint32_t ral_lo, ral_hi;
4381
4382 if (enaddr != NULL) {
4383 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4384 (enaddr[3] << 24);
4385 ral_hi = enaddr[4] | (enaddr[5] << 8);
4386 ral_hi |= RAL_AV;
4387 } else {
4388 ral_lo = 0;
4389 ral_hi = 0;
4390 }
4391
4392 if (sc->sc_type >= WM_T_82544) {
4393 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4394 ral_lo);
4395 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4396 ral_hi);
4397 } else {
4398 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4399 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4400 }
4401 }
4402
4403 /*
4404 * wm_mchash:
4405 *
4406 * Compute the hash of the multicast address for the 4096-bit
4407 * multicast filter.
4408 */
4409 static uint32_t
4410 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4411 {
4412 static const int lo_shift[4] = { 4, 3, 2, 0 };
4413 static const int hi_shift[4] = { 4, 5, 6, 8 };
4414 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4415 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4416 uint32_t hash;
4417
4418 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4419 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4420 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4421 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4422 return (hash & 0x3ff);
4423 }
4424 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4425 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4426
4427 return (hash & 0xfff);
4428 }
4429
4430 /*
4431 * wm_set_filter:
4432 *
4433 * Set up the receive filter.
4434 */
4435 static void
4436 wm_set_filter(struct wm_softc *sc)
4437 {
4438 struct ethercom *ec = &sc->sc_ethercom;
4439 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4440 struct ether_multi *enm;
4441 struct ether_multistep step;
4442 bus_addr_t mta_reg;
4443 uint32_t hash, reg, bit;
4444 int i, size;
4445
4446 if (sc->sc_type >= WM_T_82544)
4447 mta_reg = WMREG_CORDOVA_MTA;
4448 else
4449 mta_reg = WMREG_MTA;
4450
4451 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4452
4453 if (ifp->if_flags & IFF_BROADCAST)
4454 sc->sc_rctl |= RCTL_BAM;
4455 if (ifp->if_flags & IFF_PROMISC) {
4456 sc->sc_rctl |= RCTL_UPE;
4457 goto allmulti;
4458 }
4459
4460 /*
4461 * Set the station address in the first RAL slot, and
4462 * clear the remaining slots.
4463 */
4464 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4465 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4466 size = WM_ICH8_RAL_TABSIZE;
4467 else
4468 size = WM_RAL_TABSIZE;
4469 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4470 for (i = 1; i < size; i++)
4471 wm_set_ral(sc, NULL, i);
4472
4473 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4474 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4475 size = WM_ICH8_MC_TABSIZE;
4476 else
4477 size = WM_MC_TABSIZE;
4478 /* Clear out the multicast table. */
4479 for (i = 0; i < size; i++)
4480 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4481
4482 ETHER_FIRST_MULTI(step, ec, enm);
4483 while (enm != NULL) {
4484 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4485 /*
4486 * We must listen to a range of multicast addresses.
4487 * For now, just accept all multicasts, rather than
4488 * trying to set only those filter bits needed to match
4489 * the range. (At this time, the only use of address
4490 * ranges is for IP multicast routing, for which the
4491 * range is big enough to require all bits set.)
4492 */
4493 goto allmulti;
4494 }
4495
4496 hash = wm_mchash(sc, enm->enm_addrlo);
4497
4498 reg = (hash >> 5);
4499 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4500 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4501 reg &= 0x1f;
4502 else
4503 reg &= 0x7f;
4504 bit = hash & 0x1f;
4505
4506 hash = CSR_READ(sc, mta_reg + (reg << 2));
4507 hash |= 1U << bit;
4508
4509 /* XXX Hardware bug?? */
4510 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4511 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4512 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4513 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4514 } else
4515 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4516
4517 ETHER_NEXT_MULTI(step, enm);
4518 }
4519
4520 ifp->if_flags &= ~IFF_ALLMULTI;
4521 goto setit;
4522
4523 allmulti:
4524 ifp->if_flags |= IFF_ALLMULTI;
4525 sc->sc_rctl |= RCTL_MPE;
4526
4527 setit:
4528 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4529 }
4530
4531 /*
4532 * wm_tbi_mediainit:
4533 *
4534 * Initialize media for use on 1000BASE-X devices.
4535 */
4536 static void
4537 wm_tbi_mediainit(struct wm_softc *sc)
4538 {
4539 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4540 const char *sep = "";
4541
4542 if (sc->sc_type < WM_T_82543)
4543 sc->sc_tipg = TIPG_WM_DFLT;
4544 else
4545 sc->sc_tipg = TIPG_LG_DFLT;
4546
4547 sc->sc_tbi_anegticks = 5;
4548
4549 /* Initialize our media structures */
4550 sc->sc_mii.mii_ifp = ifp;
4551
4552 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4553 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4554 wm_tbi_mediastatus);
4555
4556 /*
4557 * SWD Pins:
4558 *
4559 * 0 = Link LED (output)
4560 * 1 = Loss Of Signal (input)
4561 */
4562 sc->sc_ctrl |= CTRL_SWDPIO(0);
4563 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4564
4565 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4566
4567 #define ADD(ss, mm, dd) \
4568 do { \
4569 aprint_normal("%s%s", sep, ss); \
4570 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4571 sep = ", "; \
4572 } while (/*CONSTCOND*/0)
4573
4574 aprint_normal_dev(sc->sc_dev, "");
4575 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4576 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4577 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4578 aprint_normal("\n");
4579
4580 #undef ADD
4581
4582 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4583 }
4584
4585 /*
4586 * wm_tbi_mediastatus: [ifmedia interface function]
4587 *
4588 * Get the current interface media status on a 1000BASE-X device.
4589 */
4590 static void
4591 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4592 {
4593 struct wm_softc *sc = ifp->if_softc;
4594 uint32_t ctrl, status;
4595
4596 ifmr->ifm_status = IFM_AVALID;
4597 ifmr->ifm_active = IFM_ETHER;
4598
4599 status = CSR_READ(sc, WMREG_STATUS);
4600 if ((status & STATUS_LU) == 0) {
4601 ifmr->ifm_active |= IFM_NONE;
4602 return;
4603 }
4604
4605 ifmr->ifm_status |= IFM_ACTIVE;
4606 ifmr->ifm_active |= IFM_1000_SX;
4607 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4608 ifmr->ifm_active |= IFM_FDX;
4609 ctrl = CSR_READ(sc, WMREG_CTRL);
4610 if (ctrl & CTRL_RFCE)
4611 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4612 if (ctrl & CTRL_TFCE)
4613 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4614 }
4615
4616 /*
4617 * wm_tbi_mediachange: [ifmedia interface function]
4618 *
4619 * Set hardware to newly-selected media on a 1000BASE-X device.
4620 */
4621 static int
4622 wm_tbi_mediachange(struct ifnet *ifp)
4623 {
4624 struct wm_softc *sc = ifp->if_softc;
4625 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4626 uint32_t status;
4627 int i;
4628
4629 sc->sc_txcw = 0;
4630 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4631 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4632 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4633 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4634 sc->sc_txcw |= TXCW_ANE;
4635 } else {
4636 /*
4637 * If autonegotiation is turned off, force link up and turn on
4638 * full duplex
4639 */
4640 sc->sc_txcw &= ~TXCW_ANE;
4641 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4642 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4643 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4644 delay(1000);
4645 }
4646
4647 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4648 device_xname(sc->sc_dev),sc->sc_txcw));
4649 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4650 delay(10000);
4651
4652 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4653 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4654
4655 /*
4656 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4657 * optics detect a signal, 0 if they don't.
4658 */
4659 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4660 /* Have signal; wait for the link to come up. */
4661
4662 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4663 /*
4664 * Reset the link, and let autonegotiation do its thing
4665 */
4666 sc->sc_ctrl |= CTRL_LRST;
4667 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4668 delay(1000);
4669 sc->sc_ctrl &= ~CTRL_LRST;
4670 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4671 delay(1000);
4672 }
4673
4674 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4675 delay(10000);
4676 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4677 break;
4678 }
4679
4680 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4681 device_xname(sc->sc_dev),i));
4682
4683 status = CSR_READ(sc, WMREG_STATUS);
4684 DPRINTF(WM_DEBUG_LINK,
4685 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4686 device_xname(sc->sc_dev),status, STATUS_LU));
4687 if (status & STATUS_LU) {
4688 /* Link is up. */
4689 DPRINTF(WM_DEBUG_LINK,
4690 ("%s: LINK: set media -> link up %s\n",
4691 device_xname(sc->sc_dev),
4692 (status & STATUS_FD) ? "FDX" : "HDX"));
4693
4694 /*
4695 * NOTE: CTRL will update TFCE and RFCE automatically,
4696 * so we should update sc->sc_ctrl
4697 */
4698 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4699 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4700 sc->sc_fcrtl &= ~FCRTL_XONE;
4701 if (status & STATUS_FD)
4702 sc->sc_tctl |=
4703 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4704 else
4705 sc->sc_tctl |=
4706 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4707 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4708 sc->sc_fcrtl |= FCRTL_XONE;
4709 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4710 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4711 WMREG_OLD_FCRTL : WMREG_FCRTL,
4712 sc->sc_fcrtl);
4713 sc->sc_tbi_linkup = 1;
4714 } else {
4715 if (i == WM_LINKUP_TIMEOUT)
4716 wm_check_for_link(sc);
4717 /* Link is down. */
4718 DPRINTF(WM_DEBUG_LINK,
4719 ("%s: LINK: set media -> link down\n",
4720 device_xname(sc->sc_dev)));
4721 sc->sc_tbi_linkup = 0;
4722 }
4723 } else {
4724 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4725 device_xname(sc->sc_dev)));
4726 sc->sc_tbi_linkup = 0;
4727 }
4728
4729 wm_tbi_set_linkled(sc);
4730
4731 return 0;
4732 }
4733
4734 /*
4735 * wm_tbi_set_linkled:
4736 *
4737 * Update the link LED on 1000BASE-X devices.
4738 */
4739 static void
4740 wm_tbi_set_linkled(struct wm_softc *sc)
4741 {
4742
4743 if (sc->sc_tbi_linkup)
4744 sc->sc_ctrl |= CTRL_SWDPIN(0);
4745 else
4746 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4747
4748 /* 82540 or newer devices are active low */
4749 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4750
4751 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4752 }
4753
4754 /*
4755 * wm_tbi_check_link:
4756 *
4757 * Check the link on 1000BASE-X devices.
4758 */
4759 static void
4760 wm_tbi_check_link(struct wm_softc *sc)
4761 {
4762 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4763 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4764 uint32_t rxcw, ctrl, status;
4765
4766 status = CSR_READ(sc, WMREG_STATUS);
4767
4768 rxcw = CSR_READ(sc, WMREG_RXCW);
4769 ctrl = CSR_READ(sc, WMREG_CTRL);
4770
4771 /* set link status */
4772 if ((status & STATUS_LU) == 0) {
4773 DPRINTF(WM_DEBUG_LINK,
4774 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4775 sc->sc_tbi_linkup = 0;
4776 } else if (sc->sc_tbi_linkup == 0) {
4777 DPRINTF(WM_DEBUG_LINK,
4778 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4779 (status & STATUS_FD) ? "FDX" : "HDX"));
4780 sc->sc_tbi_linkup = 1;
4781 }
4782
4783 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4784 && ((status & STATUS_LU) == 0)) {
4785 sc->sc_tbi_linkup = 0;
4786 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4787 /* RXCFG storm! */
4788 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4789 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4790 wm_init(ifp);
4791 wm_start(ifp);
4792 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4793 /* If the timer expired, retry autonegotiation */
4794 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4795 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4796 sc->sc_tbi_ticks = 0;
4797 /*
4798 * Reset the link, and let autonegotiation do
4799 * its thing
4800 */
4801 sc->sc_ctrl |= CTRL_LRST;
4802 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4803 delay(1000);
4804 sc->sc_ctrl &= ~CTRL_LRST;
4805 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4806 delay(1000);
4807 CSR_WRITE(sc, WMREG_TXCW,
4808 sc->sc_txcw & ~TXCW_ANE);
4809 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4810 }
4811 }
4812 }
4813
4814 wm_tbi_set_linkled(sc);
4815 }
4816
4817 /*
4818 * wm_gmii_reset:
4819 *
4820 * Reset the PHY.
4821 */
4822 static void
4823 wm_gmii_reset(struct wm_softc *sc)
4824 {
4825 uint32_t reg;
4826 int func = 0; /* XXX gcc */
4827 int rv;
4828
4829 /* get phy semaphore */
4830 switch (sc->sc_type) {
4831 case WM_T_82571:
4832 case WM_T_82572:
4833 case WM_T_82573:
4834 case WM_T_82574:
4835 case WM_T_82583:
4836 /* XXX should get sw semaphore, too */
4837 rv = wm_get_swsm_semaphore(sc);
4838 break;
4839 case WM_T_80003:
4840 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4841 rv = wm_get_swfw_semaphore(sc,
4842 func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4843 break;
4844 case WM_T_ICH8:
4845 case WM_T_ICH9:
4846 case WM_T_ICH10:
4847 case WM_T_PCH:
4848 rv = wm_get_swfwhw_semaphore(sc);
4849 break;
4850 default:
4851 /* nothing to do*/
4852 rv = 0;
4853 break;
4854 }
4855 if (rv != 0) {
4856 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4857 __func__);
4858 return;
4859 }
4860
4861 switch (sc->sc_type) {
4862 case WM_T_82542_2_0:
4863 case WM_T_82542_2_1:
4864 /* null */
4865 break;
4866 case WM_T_82543:
4867 /*
4868 * With 82543, we need to force speed and duplex on the MAC
4869 * equal to what the PHY speed and duplex configuration is.
4870 * In addition, we need to perform a hardware reset on the PHY
4871 * to take it out of reset.
4872 */
4873 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4874 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4875
4876 /* The PHY reset pin is active-low. */
4877 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4878 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4879 CTRL_EXT_SWDPIN(4));
4880 reg |= CTRL_EXT_SWDPIO(4);
4881
4882 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4883 delay(10*1000);
4884
4885 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4886 delay(150);
4887 #if 0
4888 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4889 #endif
4890 delay(20*1000); /* XXX extra delay to get PHY ID? */
4891 break;
4892 case WM_T_82544: /* reset 10000us */
4893 case WM_T_82540:
4894 case WM_T_82545:
4895 case WM_T_82545_3:
4896 case WM_T_82546:
4897 case WM_T_82546_3:
4898 case WM_T_82541:
4899 case WM_T_82541_2:
4900 case WM_T_82547:
4901 case WM_T_82547_2:
4902 case WM_T_82571: /* reset 100us */
4903 case WM_T_82572:
4904 case WM_T_82573:
4905 case WM_T_82574:
4906 case WM_T_82583:
4907 case WM_T_80003:
4908 /* generic reset */
4909 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4910 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4911 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4912 delay(150);
4913
4914 if ((sc->sc_type == WM_T_82541)
4915 || (sc->sc_type == WM_T_82541_2)
4916 || (sc->sc_type == WM_T_82547)
4917 || (sc->sc_type == WM_T_82547_2)) {
4918 /* workaround for igp are done in igp_reset() */
4919 /* XXX add code to set LED after phy reset */
4920 }
4921 break;
4922 case WM_T_ICH8:
4923 case WM_T_ICH9:
4924 case WM_T_ICH10:
4925 case WM_T_PCH:
4926 /* generic reset */
4927 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4928 delay(100);
4929 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4930 delay(150);
4931 break;
4932 default:
4933 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4934 __func__);
4935 break;
4936 }
4937
4938 /* release PHY semaphore */
4939 switch (sc->sc_type) {
4940 case WM_T_82571:
4941 case WM_T_82572:
4942 case WM_T_82573:
4943 case WM_T_82574:
4944 case WM_T_82583:
4945 /* XXX sould put sw semaphore, too */
4946 wm_put_swsm_semaphore(sc);
4947 break;
4948 case WM_T_80003:
4949 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4950 break;
4951 case WM_T_ICH8:
4952 case WM_T_ICH9:
4953 case WM_T_ICH10:
4954 case WM_T_PCH:
4955 wm_put_swfwhw_semaphore(sc);
4956 break;
4957 default:
4958 /* nothing to do*/
4959 rv = 0;
4960 break;
4961 }
4962
4963 /* get_cfg_done */
4964 wm_get_cfg_done(sc);
4965
4966 /* extra setup */
4967 switch (sc->sc_type) {
4968 case WM_T_82542_2_0:
4969 case WM_T_82542_2_1:
4970 case WM_T_82543:
4971 case WM_T_82544:
4972 case WM_T_82540:
4973 case WM_T_82545:
4974 case WM_T_82545_3:
4975 case WM_T_82546:
4976 case WM_T_82546_3:
4977 case WM_T_82541_2:
4978 case WM_T_82547_2:
4979 case WM_T_82571:
4980 case WM_T_82572:
4981 case WM_T_82573:
4982 case WM_T_82574:
4983 case WM_T_82583:
4984 case WM_T_80003:
4985 /* null */
4986 break;
4987 case WM_T_82541:
4988 case WM_T_82547:
4989 /* XXX Configure actively LED after PHY reset */
4990 break;
4991 case WM_T_ICH8:
4992 case WM_T_ICH9:
4993 case WM_T_ICH10:
4994 case WM_T_PCH:
4995 /* Allow time for h/w to get to a quiescent state afer reset */
4996 delay(10*1000);
4997
4998 if (sc->sc_type == WM_T_PCH) {
4999 wm_hv_phy_workaround_ich8lan(sc);
5000
5001 /*
5002 * dummy read to clear the phy wakeup bit after lcd
5003 * reset
5004 */
5005 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5006 }
5007
5008 /*
5009 * XXX Configure the LCD with th extended configuration region
5010 * in NVM
5011 */
5012
5013 /* Configure the LCD with the OEM bits in NVM */
5014 if (sc->sc_type == WM_T_PCH) {
5015 /*
5016 * Disable LPLU.
5017 * XXX It seems that 82567 has LPLU, too.
5018 */
5019 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5020 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5021 reg |= HV_OEM_BITS_ANEGNOW;
5022 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5023 }
5024 break;
5025 default:
5026 panic("%s: unknown type\n", __func__);
5027 break;
5028 }
5029 }
5030
5031 /*
5032 * wm_gmii_mediainit:
5033 *
5034 * Initialize media for use on 1000BASE-T devices.
5035 */
5036 static void
5037 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5038 {
5039 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5040
5041 /* We have MII. */
5042 sc->sc_flags |= WM_F_HAS_MII;
5043
5044 if (sc->sc_type == WM_T_80003)
5045 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5046 else
5047 sc->sc_tipg = TIPG_1000T_DFLT;
5048
5049 /*
5050 * Let the chip set speed/duplex on its own based on
5051 * signals from the PHY.
5052 * XXXbouyer - I'm not sure this is right for the 80003,
5053 * the em driver only sets CTRL_SLU here - but it seems to work.
5054 */
5055 sc->sc_ctrl |= CTRL_SLU;
5056 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5057
5058 /* Initialize our media structures and probe the GMII. */
5059 sc->sc_mii.mii_ifp = ifp;
5060
5061 switch (prodid) {
5062 case PCI_PRODUCT_INTEL_PCH_M_LM:
5063 case PCI_PRODUCT_INTEL_PCH_M_LC:
5064 /* 82577 */
5065 sc->sc_phytype = WMPHY_82577;
5066 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5067 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5068 break;
5069 case PCI_PRODUCT_INTEL_PCH_D_DM:
5070 case PCI_PRODUCT_INTEL_PCH_D_DC:
5071 /* 82578 */
5072 sc->sc_phytype = WMPHY_82578;
5073 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5074 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5075 break;
5076 case PCI_PRODUCT_INTEL_82801I_BM:
5077 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5078 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5079 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5080 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5081 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5082 /* 82567 */
5083 sc->sc_phytype = WMPHY_BM;
5084 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5085 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5086 break;
5087 default:
5088 if (sc->sc_type >= WM_T_80003) {
5089 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5090 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5091 } else if (sc->sc_type >= WM_T_82544) {
5092 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5093 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5094 } else {
5095 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5096 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5097 }
5098 break;
5099
5100 }
5101 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5102
5103 wm_gmii_reset(sc);
5104
5105 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5106 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5107 wm_gmii_mediastatus);
5108
5109 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5110 MII_OFFSET_ANY, MIIF_DOPAUSE);
5111
5112 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5113 /* if failed, retry with *_bm_* */
5114 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5115 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5116
5117 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5118 MII_OFFSET_ANY, MIIF_DOPAUSE);
5119 }
5120 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5121 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5122 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5123 sc->sc_phytype = WMPHY_NONE;
5124 } else {
5125 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
5126 }
5127 }
5128
5129 /*
5130 * wm_gmii_mediastatus: [ifmedia interface function]
5131 *
5132 * Get the current interface media status on a 1000BASE-T device.
5133 */
5134 static void
5135 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5136 {
5137 struct wm_softc *sc = ifp->if_softc;
5138
5139 ether_mediastatus(ifp, ifmr);
5140 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
5141 sc->sc_flowflags;
5142 }
5143
5144 /*
5145 * wm_gmii_mediachange: [ifmedia interface function]
5146 *
5147 * Set hardware to newly-selected media on a 1000BASE-T device.
5148 */
5149 static int
5150 wm_gmii_mediachange(struct ifnet *ifp)
5151 {
5152 struct wm_softc *sc = ifp->if_softc;
5153 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5154 int rc;
5155
5156 if ((ifp->if_flags & IFF_UP) == 0)
5157 return 0;
5158
5159 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5160 sc->sc_ctrl |= CTRL_SLU;
5161 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5162 || (sc->sc_type > WM_T_82543)) {
5163 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5164 } else {
5165 sc->sc_ctrl &= ~CTRL_ASDE;
5166 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5167 if (ife->ifm_media & IFM_FDX)
5168 sc->sc_ctrl |= CTRL_FD;
5169 switch (IFM_SUBTYPE(ife->ifm_media)) {
5170 case IFM_10_T:
5171 sc->sc_ctrl |= CTRL_SPEED_10;
5172 break;
5173 case IFM_100_TX:
5174 sc->sc_ctrl |= CTRL_SPEED_100;
5175 break;
5176 case IFM_1000_T:
5177 sc->sc_ctrl |= CTRL_SPEED_1000;
5178 break;
5179 default:
5180 panic("wm_gmii_mediachange: bad media 0x%x",
5181 ife->ifm_media);
5182 }
5183 }
5184 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5185 if (sc->sc_type <= WM_T_82543)
5186 wm_gmii_reset(sc);
5187
5188 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5189 return 0;
5190 return rc;
5191 }
5192
5193 #define MDI_IO CTRL_SWDPIN(2)
5194 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5195 #define MDI_CLK CTRL_SWDPIN(3)
5196
5197 static void
5198 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5199 {
5200 uint32_t i, v;
5201
5202 v = CSR_READ(sc, WMREG_CTRL);
5203 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5204 v |= MDI_DIR | CTRL_SWDPIO(3);
5205
5206 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5207 if (data & i)
5208 v |= MDI_IO;
5209 else
5210 v &= ~MDI_IO;
5211 CSR_WRITE(sc, WMREG_CTRL, v);
5212 delay(10);
5213 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5214 delay(10);
5215 CSR_WRITE(sc, WMREG_CTRL, v);
5216 delay(10);
5217 }
5218 }
5219
5220 static uint32_t
5221 i82543_mii_recvbits(struct wm_softc *sc)
5222 {
5223 uint32_t v, i, data = 0;
5224
5225 v = CSR_READ(sc, WMREG_CTRL);
5226 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5227 v |= CTRL_SWDPIO(3);
5228
5229 CSR_WRITE(sc, WMREG_CTRL, v);
5230 delay(10);
5231 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5232 delay(10);
5233 CSR_WRITE(sc, WMREG_CTRL, v);
5234 delay(10);
5235
5236 for (i = 0; i < 16; i++) {
5237 data <<= 1;
5238 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5239 delay(10);
5240 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5241 data |= 1;
5242 CSR_WRITE(sc, WMREG_CTRL, v);
5243 delay(10);
5244 }
5245
5246 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5247 delay(10);
5248 CSR_WRITE(sc, WMREG_CTRL, v);
5249 delay(10);
5250
5251 return data;
5252 }
5253
5254 #undef MDI_IO
5255 #undef MDI_DIR
5256 #undef MDI_CLK
5257
5258 /*
5259 * wm_gmii_i82543_readreg: [mii interface function]
5260 *
5261 * Read a PHY register on the GMII (i82543 version).
5262 */
5263 static int
5264 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5265 {
5266 struct wm_softc *sc = device_private(self);
5267 int rv;
5268
5269 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5270 i82543_mii_sendbits(sc, reg | (phy << 5) |
5271 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5272 rv = i82543_mii_recvbits(sc) & 0xffff;
5273
5274 DPRINTF(WM_DEBUG_GMII,
5275 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5276 device_xname(sc->sc_dev), phy, reg, rv));
5277
5278 return rv;
5279 }
5280
5281 /*
5282 * wm_gmii_i82543_writereg: [mii interface function]
5283 *
5284 * Write a PHY register on the GMII (i82543 version).
5285 */
5286 static void
5287 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5288 {
5289 struct wm_softc *sc = device_private(self);
5290
5291 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5292 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5293 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5294 (MII_COMMAND_START << 30), 32);
5295 }
5296
5297 /*
5298 * wm_gmii_i82544_readreg: [mii interface function]
5299 *
5300 * Read a PHY register on the GMII.
5301 */
5302 static int
5303 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5304 {
5305 struct wm_softc *sc = device_private(self);
5306 uint32_t mdic = 0;
5307 int i, rv;
5308
5309 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5310 MDIC_REGADD(reg));
5311
5312 for (i = 0; i < 320; i++) {
5313 mdic = CSR_READ(sc, WMREG_MDIC);
5314 if (mdic & MDIC_READY)
5315 break;
5316 delay(10);
5317 }
5318
5319 if ((mdic & MDIC_READY) == 0) {
5320 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5321 device_xname(sc->sc_dev), phy, reg);
5322 rv = 0;
5323 } else if (mdic & MDIC_E) {
5324 #if 0 /* This is normal if no PHY is present. */
5325 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5326 device_xname(sc->sc_dev), phy, reg);
5327 #endif
5328 rv = 0;
5329 } else {
5330 rv = MDIC_DATA(mdic);
5331 if (rv == 0xffff)
5332 rv = 0;
5333 }
5334
5335 return rv;
5336 }
5337
5338 /*
5339 * wm_gmii_i82544_writereg: [mii interface function]
5340 *
5341 * Write a PHY register on the GMII.
5342 */
5343 static void
5344 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5345 {
5346 struct wm_softc *sc = device_private(self);
5347 uint32_t mdic = 0;
5348 int i;
5349
5350 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5351 MDIC_REGADD(reg) | MDIC_DATA(val));
5352
5353 for (i = 0; i < 320; i++) {
5354 mdic = CSR_READ(sc, WMREG_MDIC);
5355 if (mdic & MDIC_READY)
5356 break;
5357 delay(10);
5358 }
5359
5360 if ((mdic & MDIC_READY) == 0)
5361 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5362 device_xname(sc->sc_dev), phy, reg);
5363 else if (mdic & MDIC_E)
5364 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5365 device_xname(sc->sc_dev), phy, reg);
5366 }
5367
5368 /*
5369 * wm_gmii_i80003_readreg: [mii interface function]
5370 *
5371 * Read a PHY register on the kumeran
5372 * This could be handled by the PHY layer if we didn't have to lock the
5373 * ressource ...
5374 */
5375 static int
5376 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5377 {
5378 struct wm_softc *sc = device_private(self);
5379 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5380 int rv;
5381
5382 if (phy != 1) /* only one PHY on kumeran bus */
5383 return 0;
5384
5385 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5386 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5387 __func__);
5388 return 0;
5389 }
5390
5391 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5392 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5393 reg >> GG82563_PAGE_SHIFT);
5394 } else {
5395 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5396 reg >> GG82563_PAGE_SHIFT);
5397 }
5398 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5399 delay(200);
5400 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5401 delay(200);
5402
5403 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5404 return rv;
5405 }
5406
5407 /*
5408 * wm_gmii_i80003_writereg: [mii interface function]
5409 *
5410 * Write a PHY register on the kumeran.
5411 * This could be handled by the PHY layer if we didn't have to lock the
5412 * ressource ...
5413 */
5414 static void
5415 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5416 {
5417 struct wm_softc *sc = device_private(self);
5418 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5419
5420 if (phy != 1) /* only one PHY on kumeran bus */
5421 return;
5422
5423 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5424 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5425 __func__);
5426 return;
5427 }
5428
5429 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5430 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5431 reg >> GG82563_PAGE_SHIFT);
5432 } else {
5433 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5434 reg >> GG82563_PAGE_SHIFT);
5435 }
5436 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5437 delay(200);
5438 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5439 delay(200);
5440
5441 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5442 }
5443
5444 /*
5445 * wm_gmii_bm_readreg: [mii interface function]
5446 *
5447 * Read a PHY register on the kumeran
5448 * This could be handled by the PHY layer if we didn't have to lock the
5449 * ressource ...
5450 */
5451 static int
5452 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5453 {
5454 struct wm_softc *sc = device_private(self);
5455 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5456 int rv;
5457
5458 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5459 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5460 __func__);
5461 return 0;
5462 }
5463
5464 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5465 if (phy == 1)
5466 wm_gmii_i82544_writereg(self, phy, 0x1f,
5467 reg);
5468 else
5469 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5470 reg >> GG82563_PAGE_SHIFT);
5471
5472 }
5473
5474 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5475 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5476 return rv;
5477 }
5478
5479 /*
5480 * wm_gmii_bm_writereg: [mii interface function]
5481 *
5482 * Write a PHY register on the kumeran.
5483 * This could be handled by the PHY layer if we didn't have to lock the
5484 * ressource ...
5485 */
5486 static void
5487 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5488 {
5489 struct wm_softc *sc = device_private(self);
5490 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5491
5492 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5493 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5494 __func__);
5495 return;
5496 }
5497
5498 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5499 if (phy == 1)
5500 wm_gmii_i82544_writereg(self, phy, 0x1f,
5501 reg);
5502 else
5503 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5504 reg >> GG82563_PAGE_SHIFT);
5505
5506 }
5507
5508 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5509 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5510 }
5511
5512 static void
5513 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
5514 {
5515 struct wm_softc *sc = device_private(self);
5516 uint16_t regnum = BM_PHY_REG_NUM(offset);
5517 uint16_t wuce;
5518
5519 /* XXX Gig must be disabled for MDIO accesses to page 800 */
5520 if (sc->sc_type == WM_T_PCH) {
5521 /* XXX e1000 driver do nothing... why? */
5522 }
5523
5524 /* Set page 769 */
5525 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5526 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5527
5528 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
5529
5530 wuce &= ~BM_WUC_HOST_WU_BIT;
5531 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
5532 wuce | BM_WUC_ENABLE_BIT);
5533
5534 /* Select page 800 */
5535 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5536 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
5537
5538 /* Write page 800 */
5539 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
5540
5541 if (rd)
5542 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
5543 else
5544 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
5545
5546 /* Set page 769 */
5547 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5548 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5549
5550 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
5551 }
5552
5553 /*
5554 * wm_gmii_hv_readreg: [mii interface function]
5555 *
5556 * Read a PHY register on the kumeran
5557 * This could be handled by the PHY layer if we didn't have to lock the
5558 * ressource ...
5559 */
5560 static int
5561 wm_gmii_hv_readreg(device_t self, int phy, int reg)
5562 {
5563 struct wm_softc *sc = device_private(self);
5564 uint16_t page = BM_PHY_REG_PAGE(reg);
5565 uint16_t regnum = BM_PHY_REG_NUM(reg);
5566 uint16_t val;
5567 int rv;
5568
5569 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5570 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5571 __func__);
5572 return 0;
5573 }
5574
5575 /* XXX Workaround failure in MDIO access while cable is disconnected */
5576 if (sc->sc_phytype == WMPHY_82577) {
5577 /* XXX must write */
5578 }
5579
5580 /* Page 800 works differently than the rest so it has its own func */
5581 if (page == BM_WUC_PAGE) {
5582 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
5583 return val;
5584 }
5585
5586 /*
5587 * Lower than page 768 works differently than the rest so it has its
5588 * own func
5589 */
5590 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5591 printf("gmii_hv_readreg!!!\n");
5592 return 0;
5593 }
5594
5595 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5596 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5597 page << BME1000_PAGE_SHIFT);
5598 }
5599
5600 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
5601 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5602 return rv;
5603 }
5604
5605 /*
5606 * wm_gmii_hv_writereg: [mii interface function]
5607 *
5608 * Write a PHY register on the kumeran.
5609 * This could be handled by the PHY layer if we didn't have to lock the
5610 * ressource ...
5611 */
5612 static void
5613 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
5614 {
5615 struct wm_softc *sc = device_private(self);
5616 uint16_t page = BM_PHY_REG_PAGE(reg);
5617 uint16_t regnum = BM_PHY_REG_NUM(reg);
5618
5619 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5620 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5621 __func__);
5622 return;
5623 }
5624
5625 /* XXX Workaround failure in MDIO access while cable is disconnected */
5626
5627 /* Page 800 works differently than the rest so it has its own func */
5628 if (page == BM_WUC_PAGE) {
5629 uint16_t tmp;
5630
5631 tmp = val;
5632 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
5633 return;
5634 }
5635
5636 /*
5637 * Lower than page 768 works differently than the rest so it has its
5638 * own func
5639 */
5640 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5641 printf("gmii_hv_writereg!!!\n");
5642 return;
5643 }
5644
5645 /*
5646 * XXX Workaround MDIO accesses being disabled after entering IEEE
5647 * Power Down (whenever bit 11 of the PHY control register is set)
5648 */
5649
5650 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5651 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5652 page << BME1000_PAGE_SHIFT);
5653 }
5654
5655 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
5656 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5657 }
5658
5659 /*
5660 * wm_gmii_statchg: [mii interface function]
5661 *
5662 * Callback from MII layer when media changes.
5663 */
5664 static void
5665 wm_gmii_statchg(device_t self)
5666 {
5667 struct wm_softc *sc = device_private(self);
5668 struct mii_data *mii = &sc->sc_mii;
5669
5670 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5671 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5672 sc->sc_fcrtl &= ~FCRTL_XONE;
5673
5674 /*
5675 * Get flow control negotiation result.
5676 */
5677 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5678 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5679 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5680 mii->mii_media_active &= ~IFM_ETH_FMASK;
5681 }
5682
5683 if (sc->sc_flowflags & IFM_FLOW) {
5684 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5685 sc->sc_ctrl |= CTRL_TFCE;
5686 sc->sc_fcrtl |= FCRTL_XONE;
5687 }
5688 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5689 sc->sc_ctrl |= CTRL_RFCE;
5690 }
5691
5692 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5693 DPRINTF(WM_DEBUG_LINK,
5694 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5695 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5696 } else {
5697 DPRINTF(WM_DEBUG_LINK,
5698 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5699 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5700 }
5701
5702 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5703 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5704 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5705 : WMREG_FCRTL, sc->sc_fcrtl);
5706 if (sc->sc_type == WM_T_80003) {
5707 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5708 case IFM_1000_T:
5709 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5710 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5711 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5712 break;
5713 default:
5714 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5715 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5716 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5717 break;
5718 }
5719 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5720 }
5721 }
5722
5723 /*
5724 * wm_kmrn_readreg:
5725 *
5726 * Read a kumeran register
5727 */
5728 static int
5729 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5730 {
5731 int rv;
5732
5733 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5734 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5735 aprint_error_dev(sc->sc_dev,
5736 "%s: failed to get semaphore\n", __func__);
5737 return 0;
5738 }
5739 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5740 if (wm_get_swfwhw_semaphore(sc)) {
5741 aprint_error_dev(sc->sc_dev,
5742 "%s: failed to get semaphore\n", __func__);
5743 return 0;
5744 }
5745 }
5746
5747 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5748 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5749 KUMCTRLSTA_REN);
5750 delay(2);
5751
5752 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5753
5754 if (sc->sc_flags == WM_F_SWFW_SYNC)
5755 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5756 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5757 wm_put_swfwhw_semaphore(sc);
5758
5759 return rv;
5760 }
5761
5762 /*
5763 * wm_kmrn_writereg:
5764 *
5765 * Write a kumeran register
5766 */
5767 static void
5768 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5769 {
5770
5771 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5772 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5773 aprint_error_dev(sc->sc_dev,
5774 "%s: failed to get semaphore\n", __func__);
5775 return;
5776 }
5777 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5778 if (wm_get_swfwhw_semaphore(sc)) {
5779 aprint_error_dev(sc->sc_dev,
5780 "%s: failed to get semaphore\n", __func__);
5781 return;
5782 }
5783 }
5784
5785 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5786 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5787 (val & KUMCTRLSTA_MASK));
5788
5789 if (sc->sc_flags == WM_F_SWFW_SYNC)
5790 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5791 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5792 wm_put_swfwhw_semaphore(sc);
5793 }
5794
5795 static int
5796 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5797 {
5798 uint32_t eecd = 0;
5799
5800 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5801 || sc->sc_type == WM_T_82583) {
5802 eecd = CSR_READ(sc, WMREG_EECD);
5803
5804 /* Isolate bits 15 & 16 */
5805 eecd = ((eecd >> 15) & 0x03);
5806
5807 /* If both bits are set, device is Flash type */
5808 if (eecd == 0x03)
5809 return 0;
5810 }
5811 return 1;
5812 }
5813
5814 static int
5815 wm_get_swsm_semaphore(struct wm_softc *sc)
5816 {
5817 int32_t timeout;
5818 uint32_t swsm;
5819
5820 /* Get the FW semaphore. */
5821 timeout = 1000 + 1; /* XXX */
5822 while (timeout) {
5823 swsm = CSR_READ(sc, WMREG_SWSM);
5824 swsm |= SWSM_SWESMBI;
5825 CSR_WRITE(sc, WMREG_SWSM, swsm);
5826 /* if we managed to set the bit we got the semaphore. */
5827 swsm = CSR_READ(sc, WMREG_SWSM);
5828 if (swsm & SWSM_SWESMBI)
5829 break;
5830
5831 delay(50);
5832 timeout--;
5833 }
5834
5835 if (timeout == 0) {
5836 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5837 /* Release semaphores */
5838 wm_put_swsm_semaphore(sc);
5839 return 1;
5840 }
5841 return 0;
5842 }
5843
5844 static void
5845 wm_put_swsm_semaphore(struct wm_softc *sc)
5846 {
5847 uint32_t swsm;
5848
5849 swsm = CSR_READ(sc, WMREG_SWSM);
5850 swsm &= ~(SWSM_SWESMBI);
5851 CSR_WRITE(sc, WMREG_SWSM, swsm);
5852 }
5853
5854 static int
5855 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5856 {
5857 uint32_t swfw_sync;
5858 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5859 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5860 int timeout = 200;
5861
5862 for (timeout = 0; timeout < 200; timeout++) {
5863 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5864 if (wm_get_swsm_semaphore(sc)) {
5865 aprint_error_dev(sc->sc_dev,
5866 "%s: failed to get semaphore\n",
5867 __func__);
5868 return 1;
5869 }
5870 }
5871 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5872 if ((swfw_sync & (swmask | fwmask)) == 0) {
5873 swfw_sync |= swmask;
5874 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5875 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5876 wm_put_swsm_semaphore(sc);
5877 return 0;
5878 }
5879 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5880 wm_put_swsm_semaphore(sc);
5881 delay(5000);
5882 }
5883 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5884 device_xname(sc->sc_dev), mask, swfw_sync);
5885 return 1;
5886 }
5887
5888 static void
5889 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5890 {
5891 uint32_t swfw_sync;
5892
5893 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5894 while (wm_get_swsm_semaphore(sc) != 0)
5895 continue;
5896 }
5897 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5898 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5899 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5900 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5901 wm_put_swsm_semaphore(sc);
5902 }
5903
5904 static int
5905 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5906 {
5907 uint32_t ext_ctrl;
5908 int timeout = 200;
5909
5910 for (timeout = 0; timeout < 200; timeout++) {
5911 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5912 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5913 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5914
5915 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5916 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5917 return 0;
5918 delay(5000);
5919 }
5920 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5921 device_xname(sc->sc_dev), ext_ctrl);
5922 return 1;
5923 }
5924
5925 static void
5926 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5927 {
5928 uint32_t ext_ctrl;
5929 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5930 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5931 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5932 }
5933
5934 static int
5935 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5936 {
5937 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5938 uint8_t bank_high_byte;
5939 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5940
5941 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
5942 /* Value of bit 22 corresponds to the flash bank we're on. */
5943 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5944 } else {
5945 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5946 if ((bank_high_byte & 0xc0) == 0x80)
5947 *bank = 0;
5948 else {
5949 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5950 &bank_high_byte);
5951 if ((bank_high_byte & 0xc0) == 0x80)
5952 *bank = 1;
5953 else {
5954 aprint_error_dev(sc->sc_dev,
5955 "EEPROM not present\n");
5956 return -1;
5957 }
5958 }
5959 }
5960
5961 return 0;
5962 }
5963
5964 /******************************************************************************
5965 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5966 * register.
5967 *
5968 * sc - Struct containing variables accessed by shared code
5969 * offset - offset of word in the EEPROM to read
5970 * data - word read from the EEPROM
5971 * words - number of words to read
5972 *****************************************************************************/
5973 static int
5974 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5975 {
5976 int32_t error = 0;
5977 uint32_t flash_bank = 0;
5978 uint32_t act_offset = 0;
5979 uint32_t bank_offset = 0;
5980 uint16_t word = 0;
5981 uint16_t i = 0;
5982
5983 /* We need to know which is the valid flash bank. In the event
5984 * that we didn't allocate eeprom_shadow_ram, we may not be
5985 * managing flash_bank. So it cannot be trusted and needs
5986 * to be updated with each read.
5987 */
5988 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5989 if (error) {
5990 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5991 __func__);
5992 return error;
5993 }
5994
5995 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5996 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5997
5998 error = wm_get_swfwhw_semaphore(sc);
5999 if (error) {
6000 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6001 __func__);
6002 return error;
6003 }
6004
6005 for (i = 0; i < words; i++) {
6006 /* The NVM part needs a byte offset, hence * 2 */
6007 act_offset = bank_offset + ((offset + i) * 2);
6008 error = wm_read_ich8_word(sc, act_offset, &word);
6009 if (error) {
6010 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6011 __func__);
6012 break;
6013 }
6014 data[i] = word;
6015 }
6016
6017 wm_put_swfwhw_semaphore(sc);
6018 return error;
6019 }
6020
6021 /******************************************************************************
6022 * This function does initial flash setup so that a new read/write/erase cycle
6023 * can be started.
6024 *
6025 * sc - The pointer to the hw structure
6026 ****************************************************************************/
6027 static int32_t
6028 wm_ich8_cycle_init(struct wm_softc *sc)
6029 {
6030 uint16_t hsfsts;
6031 int32_t error = 1;
6032 int32_t i = 0;
6033
6034 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6035
6036 /* May be check the Flash Des Valid bit in Hw status */
6037 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6038 return error;
6039 }
6040
6041 /* Clear FCERR in Hw status by writing 1 */
6042 /* Clear DAEL in Hw status by writing a 1 */
6043 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6044
6045 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6046
6047 /*
6048 * Either we should have a hardware SPI cycle in progress bit to check
6049 * against, in order to start a new cycle or FDONE bit should be
6050 * changed in the hardware so that it is 1 after harware reset, which
6051 * can then be used as an indication whether a cycle is in progress or
6052 * has been completed .. we should also have some software semaphore me
6053 * chanism to guard FDONE or the cycle in progress bit so that two
6054 * threads access to those bits can be sequentiallized or a way so that
6055 * 2 threads dont start the cycle at the same time
6056 */
6057
6058 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6059 /*
6060 * There is no cycle running at present, so we can start a
6061 * cycle
6062 */
6063
6064 /* Begin by setting Flash Cycle Done. */
6065 hsfsts |= HSFSTS_DONE;
6066 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6067 error = 0;
6068 } else {
6069 /*
6070 * otherwise poll for sometime so the current cycle has a
6071 * chance to end before giving up.
6072 */
6073 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6074 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6075 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6076 error = 0;
6077 break;
6078 }
6079 delay(1);
6080 }
6081 if (error == 0) {
6082 /*
6083 * Successful in waiting for previous cycle to timeout,
6084 * now set the Flash Cycle Done.
6085 */
6086 hsfsts |= HSFSTS_DONE;
6087 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6088 }
6089 }
6090 return error;
6091 }
6092
6093 /******************************************************************************
6094 * This function starts a flash cycle and waits for its completion
6095 *
6096 * sc - The pointer to the hw structure
6097 ****************************************************************************/
6098 static int32_t
6099 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6100 {
6101 uint16_t hsflctl;
6102 uint16_t hsfsts;
6103 int32_t error = 1;
6104 uint32_t i = 0;
6105
6106 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6107 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6108 hsflctl |= HSFCTL_GO;
6109 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6110
6111 /* wait till FDONE bit is set to 1 */
6112 do {
6113 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6114 if (hsfsts & HSFSTS_DONE)
6115 break;
6116 delay(1);
6117 i++;
6118 } while (i < timeout);
6119 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6120 error = 0;
6121
6122 return error;
6123 }
6124
6125 /******************************************************************************
6126 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6127 *
6128 * sc - The pointer to the hw structure
6129 * index - The index of the byte or word to read.
6130 * size - Size of data to read, 1=byte 2=word
6131 * data - Pointer to the word to store the value read.
6132 *****************************************************************************/
6133 static int32_t
6134 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6135 uint32_t size, uint16_t* data)
6136 {
6137 uint16_t hsfsts;
6138 uint16_t hsflctl;
6139 uint32_t flash_linear_address;
6140 uint32_t flash_data = 0;
6141 int32_t error = 1;
6142 int32_t count = 0;
6143
6144 if (size < 1 || size > 2 || data == 0x0 ||
6145 index > ICH_FLASH_LINEAR_ADDR_MASK)
6146 return error;
6147
6148 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6149 sc->sc_ich8_flash_base;
6150
6151 do {
6152 delay(1);
6153 /* Steps */
6154 error = wm_ich8_cycle_init(sc);
6155 if (error)
6156 break;
6157
6158 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6159 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6160 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6161 & HSFCTL_BCOUNT_MASK;
6162 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6163 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6164
6165 /*
6166 * Write the last 24 bits of index into Flash Linear address
6167 * field in Flash Address
6168 */
6169 /* TODO: TBD maybe check the index against the size of flash */
6170
6171 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6172
6173 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6174
6175 /*
6176 * Check if FCERR is set to 1, if set to 1, clear it and try
6177 * the whole sequence a few more times, else read in (shift in)
6178 * the Flash Data0, the order is least significant byte first
6179 * msb to lsb
6180 */
6181 if (error == 0) {
6182 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6183 if (size == 1)
6184 *data = (uint8_t)(flash_data & 0x000000FF);
6185 else if (size == 2)
6186 *data = (uint16_t)(flash_data & 0x0000FFFF);
6187 break;
6188 } else {
6189 /*
6190 * If we've gotten here, then things are probably
6191 * completely hosed, but if the error condition is
6192 * detected, it won't hurt to give it another try...
6193 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6194 */
6195 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6196 if (hsfsts & HSFSTS_ERR) {
6197 /* Repeat for some time before giving up. */
6198 continue;
6199 } else if ((hsfsts & HSFSTS_DONE) == 0)
6200 break;
6201 }
6202 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6203
6204 return error;
6205 }
6206
6207 /******************************************************************************
6208 * Reads a single byte from the NVM using the ICH8 flash access registers.
6209 *
6210 * sc - pointer to wm_hw structure
6211 * index - The index of the byte to read.
6212 * data - Pointer to a byte to store the value read.
6213 *****************************************************************************/
6214 static int32_t
6215 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6216 {
6217 int32_t status;
6218 uint16_t word = 0;
6219
6220 status = wm_read_ich8_data(sc, index, 1, &word);
6221 if (status == 0)
6222 *data = (uint8_t)word;
6223
6224 return status;
6225 }
6226
6227 /******************************************************************************
6228 * Reads a word from the NVM using the ICH8 flash access registers.
6229 *
6230 * sc - pointer to wm_hw structure
6231 * index - The starting byte index of the word to read.
6232 * data - Pointer to a word to store the value read.
6233 *****************************************************************************/
6234 static int32_t
6235 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6236 {
6237 int32_t status;
6238
6239 status = wm_read_ich8_data(sc, index, 2, data);
6240 return status;
6241 }
6242
6243 static int
6244 wm_check_mng_mode(struct wm_softc *sc)
6245 {
6246 int rv;
6247
6248 switch (sc->sc_type) {
6249 case WM_T_ICH8:
6250 case WM_T_ICH9:
6251 case WM_T_ICH10:
6252 case WM_T_PCH:
6253 rv = wm_check_mng_mode_ich8lan(sc);
6254 break;
6255 case WM_T_82574:
6256 case WM_T_82583:
6257 rv = wm_check_mng_mode_82574(sc);
6258 break;
6259 case WM_T_82571:
6260 case WM_T_82572:
6261 case WM_T_82573:
6262 case WM_T_80003:
6263 rv = wm_check_mng_mode_generic(sc);
6264 break;
6265 default:
6266 /* noting to do */
6267 rv = 0;
6268 break;
6269 }
6270
6271 return rv;
6272 }
6273
6274 static int
6275 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6276 {
6277 uint32_t fwsm;
6278
6279 fwsm = CSR_READ(sc, WMREG_FWSM);
6280
6281 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6282 return 1;
6283
6284 return 0;
6285 }
6286
6287 static int
6288 wm_check_mng_mode_82574(struct wm_softc *sc)
6289 {
6290 uint16_t data;
6291
6292 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6293
6294 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6295 return 1;
6296
6297 return 0;
6298 }
6299
6300 static int
6301 wm_check_mng_mode_generic(struct wm_softc *sc)
6302 {
6303 uint32_t fwsm;
6304
6305 fwsm = CSR_READ(sc, WMREG_FWSM);
6306
6307 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6308 return 1;
6309
6310 return 0;
6311 }
6312
6313 static int
6314 wm_check_reset_block(struct wm_softc *sc)
6315 {
6316 uint32_t reg;
6317
6318 switch (sc->sc_type) {
6319 case WM_T_ICH8:
6320 case WM_T_ICH9:
6321 case WM_T_ICH10:
6322 case WM_T_PCH:
6323 reg = CSR_READ(sc, WMREG_FWSM);
6324 if ((reg & FWSM_RSPCIPHY) != 0)
6325 return 0;
6326 else
6327 return -1;
6328 break;
6329 case WM_T_82571:
6330 case WM_T_82572:
6331 case WM_T_82573:
6332 case WM_T_82574:
6333 case WM_T_82583:
6334 case WM_T_80003:
6335 reg = CSR_READ(sc, WMREG_MANC);
6336 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6337 return -1;
6338 else
6339 return 0;
6340 break;
6341 default:
6342 /* no problem */
6343 break;
6344 }
6345
6346 return 0;
6347 }
6348
6349 static void
6350 wm_get_hw_control(struct wm_softc *sc)
6351 {
6352 uint32_t reg;
6353
6354 switch (sc->sc_type) {
6355 case WM_T_82573:
6356 #if 0
6357 case WM_T_82574:
6358 case WM_T_82583:
6359 /*
6360 * FreeBSD's em driver has the function for 82574 to checks
6361 * the management mode, but it's not used. Why?
6362 */
6363 #endif
6364 reg = CSR_READ(sc, WMREG_SWSM);
6365 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6366 break;
6367 case WM_T_82571:
6368 case WM_T_82572:
6369 case WM_T_80003:
6370 case WM_T_ICH8:
6371 case WM_T_ICH9:
6372 case WM_T_ICH10:
6373 case WM_T_PCH:
6374 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6375 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6376 break;
6377 default:
6378 break;
6379 }
6380 }
6381
6382 /* XXX Currently TBI only */
6383 static int
6384 wm_check_for_link(struct wm_softc *sc)
6385 {
6386 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6387 uint32_t rxcw;
6388 uint32_t ctrl;
6389 uint32_t status;
6390 uint32_t sig;
6391
6392 rxcw = CSR_READ(sc, WMREG_RXCW);
6393 ctrl = CSR_READ(sc, WMREG_CTRL);
6394 status = CSR_READ(sc, WMREG_STATUS);
6395
6396 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6397
6398 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6399 device_xname(sc->sc_dev), __func__,
6400 ((ctrl & CTRL_SWDPIN(1)) == sig),
6401 ((status & STATUS_LU) != 0),
6402 ((rxcw & RXCW_C) != 0)
6403 ));
6404
6405 /*
6406 * SWDPIN LU RXCW
6407 * 0 0 0
6408 * 0 0 1 (should not happen)
6409 * 0 1 0 (should not happen)
6410 * 0 1 1 (should not happen)
6411 * 1 0 0 Disable autonego and force linkup
6412 * 1 0 1 got /C/ but not linkup yet
6413 * 1 1 0 (linkup)
6414 * 1 1 1 If IFM_AUTO, back to autonego
6415 *
6416 */
6417 if (((ctrl & CTRL_SWDPIN(1)) == sig)
6418 && ((status & STATUS_LU) == 0)
6419 && ((rxcw & RXCW_C) == 0)) {
6420 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6421 __func__));
6422 sc->sc_tbi_linkup = 0;
6423 /* Disable auto-negotiation in the TXCW register */
6424 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6425
6426 /*
6427 * Force link-up and also force full-duplex.
6428 *
6429 * NOTE: CTRL was updated TFCE and RFCE automatically,
6430 * so we should update sc->sc_ctrl
6431 */
6432 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6433 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6434 } else if (((status & STATUS_LU) != 0)
6435 && ((rxcw & RXCW_C) != 0)
6436 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6437 sc->sc_tbi_linkup = 1;
6438 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6439 __func__));
6440 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6441 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6442 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6443 && ((rxcw & RXCW_C) != 0)) {
6444 DPRINTF(WM_DEBUG_LINK, ("/C/"));
6445 } else {
6446 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6447 status));
6448 }
6449
6450 return 0;
6451 }
6452
6453 /*
6454 * Workaround for pch's PHYs
6455 * XXX should be moved to new PHY driver?
6456 */
6457 static void
6458 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
6459 {
6460
6461 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
6462
6463 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
6464
6465 /* 82578 */
6466 if (sc->sc_phytype == WMPHY_82578) {
6467 /* PCH rev. < 3 */
6468 if (sc->sc_rev < 3) {
6469 /* XXX 6 bit shift? Why? Is it page2? */
6470 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
6471 0x66c0);
6472 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
6473 0xffff);
6474 }
6475
6476 /* XXX phy rev. < 2 */
6477 }
6478
6479 /* Select page 0 */
6480
6481 /* XXX acquire semaphore */
6482 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
6483 /* XXX release semaphore */
6484
6485 /*
6486 * Configure the K1 Si workaround during phy reset assuming there is
6487 * link so that it disables K1 if link is in 1Gbps.
6488 */
6489 wm_k1_gig_workaround_hv(sc, 1);
6490 }
6491
6492 static void
6493 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
6494 {
6495 int k1_enable = sc->sc_nvm_k1_enabled;
6496
6497 /* XXX acquire semaphore */
6498
6499 if (link) {
6500 k1_enable = 0;
6501
6502 /* Link stall fix for link up */
6503 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
6504 } else {
6505 /* Link stall fix for link down */
6506 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
6507 }
6508
6509 wm_configure_k1_ich8lan(sc, k1_enable);
6510
6511 /* XXX release semaphore */
6512 }
6513
6514 static void
6515 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
6516 {
6517 uint32_t ctrl, ctrl_ext, tmp;
6518 uint16_t kmrn_reg;
6519
6520 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
6521
6522 if (k1_enable)
6523 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
6524 else
6525 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
6526
6527 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
6528
6529 delay(20);
6530
6531 ctrl = CSR_READ(sc, WMREG_CTRL);
6532 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6533
6534 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
6535 tmp |= CTRL_FRCSPD;
6536
6537 CSR_WRITE(sc, WMREG_CTRL, tmp);
6538 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
6539 delay(20);
6540
6541 CSR_WRITE(sc, WMREG_CTRL, ctrl);
6542 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6543 delay(20);
6544 }
6545