if_wm.c revision 1.193 1 /* $NetBSD: if_wm.c,v 1.193 2010/01/19 22:07:02 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.193 2010/01/19 22:07:02 pooka Exp $");
80
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
97
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/inbmphyreg.h>
125
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129
130 #include <dev/pci/if_wmreg.h>
131 #include <dev/pci/if_wmvar.h>
132
133 #ifdef WM_DEBUG
134 #define WM_DEBUG_LINK 0x01
135 #define WM_DEBUG_TX 0x02
136 #define WM_DEBUG_RX 0x04
137 #define WM_DEBUG_GMII 0x08
138 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
139
140 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
141 #else
142 #define DPRINTF(x, y) /* nothing */
143 #endif /* WM_DEBUG */
144
145 /*
146 * Transmit descriptor list size. Due to errata, we can only have
147 * 256 hardware descriptors in the ring on < 82544, but we use 4096
148 * on >= 82544. We tell the upper layers that they can queue a lot
149 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
150 * of them at a time.
151 *
152 * We allow up to 256 (!) DMA segments per packet. Pathological packet
153 * chains containing many small mbufs have been observed in zero-copy
154 * situations with jumbo frames.
155 */
156 #define WM_NTXSEGS 256
157 #define WM_IFQUEUELEN 256
158 #define WM_TXQUEUELEN_MAX 64
159 #define WM_TXQUEUELEN_MAX_82547 16
160 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
161 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
162 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
163 #define WM_NTXDESC_82542 256
164 #define WM_NTXDESC_82544 4096
165 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
166 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
167 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
168 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
169 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
170
171 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
172
173 /*
174 * Receive descriptor list size. We have one Rx buffer for normal
175 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
176 * packet. We allocate 256 receive descriptors, each with a 2k
177 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
178 */
179 #define WM_NRXDESC 256
180 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
181 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
182 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
183
184 /*
185 * Control structures are DMA'd to the i82542 chip. We allocate them in
186 * a single clump that maps to a single DMA segment to make several things
187 * easier.
188 */
189 struct wm_control_data_82544 {
190 /*
191 * The receive descriptors.
192 */
193 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
194
195 /*
196 * The transmit descriptors. Put these at the end, because
197 * we might use a smaller number of them.
198 */
199 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
200 };
201
202 struct wm_control_data_82542 {
203 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
204 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
205 };
206
207 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
208 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
209 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
210
211 /*
212 * Software state for transmit jobs.
213 */
214 struct wm_txsoft {
215 struct mbuf *txs_mbuf; /* head of our mbuf chain */
216 bus_dmamap_t txs_dmamap; /* our DMA map */
217 int txs_firstdesc; /* first descriptor in packet */
218 int txs_lastdesc; /* last descriptor in packet */
219 int txs_ndesc; /* # of descriptors used */
220 };
221
222 /*
223 * Software state for receive buffers. Each descriptor gets a
224 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
225 * more than one buffer, we chain them together.
226 */
227 struct wm_rxsoft {
228 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
229 bus_dmamap_t rxs_dmamap; /* our DMA map */
230 };
231
232 #define WM_LINKUP_TIMEOUT 50
233
234 /*
235 * Software state per device.
236 */
237 struct wm_softc {
238 device_t sc_dev; /* generic device information */
239 bus_space_tag_t sc_st; /* bus space tag */
240 bus_space_handle_t sc_sh; /* bus space handle */
241 bus_space_tag_t sc_iot; /* I/O space tag */
242 bus_space_handle_t sc_ioh; /* I/O space handle */
243 bus_space_tag_t sc_flasht; /* flash registers space tag */
244 bus_space_handle_t sc_flashh; /* flash registers space handle */
245 bus_dma_tag_t sc_dmat; /* bus DMA tag */
246 struct ethercom sc_ethercom; /* ethernet common data */
247 pci_chipset_tag_t sc_pc;
248 pcitag_t sc_pcitag;
249
250 wm_chip_type sc_type; /* MAC type */
251 int sc_rev; /* MAC revision */
252 wm_phy_type sc_phytype; /* PHY type */
253 int sc_flags; /* flags; see below */
254 int sc_if_flags; /* last if_flags */
255 int sc_bus_speed; /* PCI/PCIX bus speed */
256 int sc_pcix_offset; /* PCIX capability register offset */
257 int sc_flowflags; /* 802.3x flow control flags */
258
259 void *sc_ih; /* interrupt cookie */
260
261 int sc_ee_addrbits; /* EEPROM address bits */
262
263 struct mii_data sc_mii; /* MII/media information */
264
265 callout_t sc_tick_ch; /* tick callout */
266
267 bus_dmamap_t sc_cddmamap; /* control data DMA map */
268 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
269
270 int sc_align_tweak;
271
272 /*
273 * Software state for the transmit and receive descriptors.
274 */
275 int sc_txnum; /* must be a power of two */
276 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
277 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
278
279 /*
280 * Control data structures.
281 */
282 int sc_ntxdesc; /* must be a power of two */
283 struct wm_control_data_82544 *sc_control_data;
284 #define sc_txdescs sc_control_data->wcd_txdescs
285 #define sc_rxdescs sc_control_data->wcd_rxdescs
286
287 #ifdef WM_EVENT_COUNTERS
288 /* Event counters. */
289 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
290 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
291 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
292 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
293 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
294 struct evcnt sc_ev_rxintr; /* Rx interrupts */
295 struct evcnt sc_ev_linkintr; /* Link interrupts */
296
297 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
298 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
299 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
300 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
301 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
302 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
303 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
304 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
305
306 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
307 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
308
309 struct evcnt sc_ev_tu; /* Tx underrun */
310
311 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
312 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
313 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
314 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
315 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
316 #endif /* WM_EVENT_COUNTERS */
317
318 bus_addr_t sc_tdt_reg; /* offset of TDT register */
319
320 int sc_txfree; /* number of free Tx descriptors */
321 int sc_txnext; /* next ready Tx descriptor */
322
323 int sc_txsfree; /* number of free Tx jobs */
324 int sc_txsnext; /* next free Tx job */
325 int sc_txsdirty; /* dirty Tx jobs */
326
327 /* These 5 variables are used only on the 82547. */
328 int sc_txfifo_size; /* Tx FIFO size */
329 int sc_txfifo_head; /* current head of FIFO */
330 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
331 int sc_txfifo_stall; /* Tx FIFO is stalled */
332 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
333
334 bus_addr_t sc_rdt_reg; /* offset of RDT register */
335
336 int sc_rxptr; /* next ready Rx descriptor/queue ent */
337 int sc_rxdiscard;
338 int sc_rxlen;
339 struct mbuf *sc_rxhead;
340 struct mbuf *sc_rxtail;
341 struct mbuf **sc_rxtailp;
342
343 uint32_t sc_ctrl; /* prototype CTRL register */
344 #if 0
345 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
346 #endif
347 uint32_t sc_icr; /* prototype interrupt bits */
348 uint32_t sc_itr; /* prototype intr throttling reg */
349 uint32_t sc_tctl; /* prototype TCTL register */
350 uint32_t sc_rctl; /* prototype RCTL register */
351 uint32_t sc_txcw; /* prototype TXCW register */
352 uint32_t sc_tipg; /* prototype TIPG register */
353 uint32_t sc_fcrtl; /* prototype FCRTL register */
354 uint32_t sc_pba; /* prototype PBA register */
355
356 int sc_tbi_linkup; /* TBI link status */
357 int sc_tbi_anegticks; /* autonegotiation ticks */
358 int sc_tbi_ticks; /* tbi ticks */
359 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
360 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
361
362 int sc_mchash_type; /* multicast filter offset */
363
364 #if NRND > 0
365 rndsource_element_t rnd_source; /* random source */
366 #endif
367 int sc_ich8_flash_base;
368 int sc_ich8_flash_bank_size;
369 int sc_nvm_k1_enabled;
370 };
371
372 #define WM_RXCHAIN_RESET(sc) \
373 do { \
374 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
375 *(sc)->sc_rxtailp = NULL; \
376 (sc)->sc_rxlen = 0; \
377 } while (/*CONSTCOND*/0)
378
379 #define WM_RXCHAIN_LINK(sc, m) \
380 do { \
381 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
382 (sc)->sc_rxtailp = &(m)->m_next; \
383 } while (/*CONSTCOND*/0)
384
385 #ifdef WM_EVENT_COUNTERS
386 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
387 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
388 #else
389 #define WM_EVCNT_INCR(ev) /* nothing */
390 #define WM_EVCNT_ADD(ev, val) /* nothing */
391 #endif
392
393 #define CSR_READ(sc, reg) \
394 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
395 #define CSR_WRITE(sc, reg, val) \
396 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
397 #define CSR_WRITE_FLUSH(sc) \
398 (void) CSR_READ((sc), WMREG_STATUS)
399
400 #define ICH8_FLASH_READ32(sc, reg) \
401 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
402 #define ICH8_FLASH_WRITE32(sc, reg, data) \
403 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
404
405 #define ICH8_FLASH_READ16(sc, reg) \
406 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
407 #define ICH8_FLASH_WRITE16(sc, reg, data) \
408 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
409
410 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
411 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
412
413 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
414 #define WM_CDTXADDR_HI(sc, x) \
415 (sizeof(bus_addr_t) == 8 ? \
416 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
417
418 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
419 #define WM_CDRXADDR_HI(sc, x) \
420 (sizeof(bus_addr_t) == 8 ? \
421 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
422
423 #define WM_CDTXSYNC(sc, x, n, ops) \
424 do { \
425 int __x, __n; \
426 \
427 __x = (x); \
428 __n = (n); \
429 \
430 /* If it will wrap around, sync to the end of the ring. */ \
431 if ((__x + __n) > WM_NTXDESC(sc)) { \
432 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
433 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
434 (WM_NTXDESC(sc) - __x), (ops)); \
435 __n -= (WM_NTXDESC(sc) - __x); \
436 __x = 0; \
437 } \
438 \
439 /* Now sync whatever is left. */ \
440 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
441 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
442 } while (/*CONSTCOND*/0)
443
444 #define WM_CDRXSYNC(sc, x, ops) \
445 do { \
446 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
447 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
448 } while (/*CONSTCOND*/0)
449
450 #define WM_INIT_RXDESC(sc, x) \
451 do { \
452 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
453 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
454 struct mbuf *__m = __rxs->rxs_mbuf; \
455 \
456 /* \
457 * Note: We scoot the packet forward 2 bytes in the buffer \
458 * so that the payload after the Ethernet header is aligned \
459 * to a 4-byte boundary. \
460 * \
461 * XXX BRAINDAMAGE ALERT! \
462 * The stupid chip uses the same size for every buffer, which \
463 * is set in the Receive Control register. We are using the 2K \
464 * size option, but what we REALLY want is (2K - 2)! For this \
465 * reason, we can't "scoot" packets longer than the standard \
466 * Ethernet MTU. On strict-alignment platforms, if the total \
467 * size exceeds (2K - 2) we set align_tweak to 0 and let \
468 * the upper layer copy the headers. \
469 */ \
470 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
471 \
472 wm_set_dma_addr(&__rxd->wrx_addr, \
473 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
474 __rxd->wrx_len = 0; \
475 __rxd->wrx_cksum = 0; \
476 __rxd->wrx_status = 0; \
477 __rxd->wrx_errors = 0; \
478 __rxd->wrx_special = 0; \
479 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
480 \
481 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
482 } while (/*CONSTCOND*/0)
483
484 static void wm_start(struct ifnet *);
485 static void wm_watchdog(struct ifnet *);
486 static int wm_ioctl(struct ifnet *, u_long, void *);
487 static int wm_init(struct ifnet *);
488 static void wm_stop(struct ifnet *, int);
489
490 static void wm_reset(struct wm_softc *);
491 static void wm_rxdrain(struct wm_softc *);
492 static int wm_add_rxbuf(struct wm_softc *, int);
493 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
494 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
495 static int wm_validate_eeprom_checksum(struct wm_softc *);
496 static void wm_tick(void *);
497
498 static void wm_set_filter(struct wm_softc *);
499
500 static int wm_intr(void *);
501 static void wm_txintr(struct wm_softc *);
502 static void wm_rxintr(struct wm_softc *);
503 static void wm_linkintr(struct wm_softc *, uint32_t);
504
505 static void wm_tbi_mediainit(struct wm_softc *);
506 static int wm_tbi_mediachange(struct ifnet *);
507 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
508
509 static void wm_tbi_set_linkled(struct wm_softc *);
510 static void wm_tbi_check_link(struct wm_softc *);
511
512 static void wm_gmii_reset(struct wm_softc *);
513
514 static int wm_gmii_i82543_readreg(device_t, int, int);
515 static void wm_gmii_i82543_writereg(device_t, int, int, int);
516
517 static int wm_gmii_i82544_readreg(device_t, int, int);
518 static void wm_gmii_i82544_writereg(device_t, int, int, int);
519
520 static int wm_gmii_i80003_readreg(device_t, int, int);
521 static void wm_gmii_i80003_writereg(device_t, int, int, int);
522 static int wm_gmii_bm_readreg(device_t, int, int);
523 static void wm_gmii_bm_writereg(device_t, int, int, int);
524 static int wm_gmii_hv_readreg(device_t, int, int);
525 static void wm_gmii_hv_writereg(device_t, int, int, int);
526
527 static void wm_gmii_statchg(device_t);
528
529 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
530 static int wm_gmii_mediachange(struct ifnet *);
531 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
532
533 static int wm_kmrn_readreg(struct wm_softc *, int);
534 static void wm_kmrn_writereg(struct wm_softc *, int, int);
535
536 static void wm_set_spiaddrsize(struct wm_softc *);
537 static int wm_match(device_t, cfdata_t, void *);
538 static void wm_attach(device_t, device_t, void *);
539 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
540 static void wm_get_auto_rd_done(struct wm_softc *);
541 static void wm_lan_init_done(struct wm_softc *);
542 static void wm_get_cfg_done(struct wm_softc *);
543 static int wm_get_swsm_semaphore(struct wm_softc *);
544 static void wm_put_swsm_semaphore(struct wm_softc *);
545 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
546 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
547 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
548 static int wm_get_swfwhw_semaphore(struct wm_softc *);
549 static void wm_put_swfwhw_semaphore(struct wm_softc *);
550
551 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
552 static int32_t wm_ich8_cycle_init(struct wm_softc *);
553 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
554 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
555 uint32_t, uint16_t *);
556 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
557 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
558 static void wm_82547_txfifo_stall(void *);
559 static int wm_check_mng_mode(struct wm_softc *);
560 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
561 static int wm_check_mng_mode_82574(struct wm_softc *);
562 static int wm_check_mng_mode_generic(struct wm_softc *);
563 static int wm_check_reset_block(struct wm_softc *);
564 static void wm_get_hw_control(struct wm_softc *);
565 static int wm_check_for_link(struct wm_softc *);
566 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
567 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
568 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
569
570 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
571 wm_match, wm_attach, NULL, NULL);
572
573 /*
574 * Devices supported by this driver.
575 */
576 static const struct wm_product {
577 pci_vendor_id_t wmp_vendor;
578 pci_product_id_t wmp_product;
579 const char *wmp_name;
580 wm_chip_type wmp_type;
581 int wmp_flags;
582 #define WMP_F_1000X 0x01
583 #define WMP_F_1000T 0x02
584 } wm_products[] = {
585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
586 "Intel i82542 1000BASE-X Ethernet",
587 WM_T_82542_2_1, WMP_F_1000X },
588
589 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
590 "Intel i82543GC 1000BASE-X Ethernet",
591 WM_T_82543, WMP_F_1000X },
592
593 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
594 "Intel i82543GC 1000BASE-T Ethernet",
595 WM_T_82543, WMP_F_1000T },
596
597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
598 "Intel i82544EI 1000BASE-T Ethernet",
599 WM_T_82544, WMP_F_1000T },
600
601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
602 "Intel i82544EI 1000BASE-X Ethernet",
603 WM_T_82544, WMP_F_1000X },
604
605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
606 "Intel i82544GC 1000BASE-T Ethernet",
607 WM_T_82544, WMP_F_1000T },
608
609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
610 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
611 WM_T_82544, WMP_F_1000T },
612
613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
614 "Intel i82540EM 1000BASE-T Ethernet",
615 WM_T_82540, WMP_F_1000T },
616
617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
618 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
619 WM_T_82540, WMP_F_1000T },
620
621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
622 "Intel i82540EP 1000BASE-T Ethernet",
623 WM_T_82540, WMP_F_1000T },
624
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
626 "Intel i82540EP 1000BASE-T Ethernet",
627 WM_T_82540, WMP_F_1000T },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
630 "Intel i82540EP 1000BASE-T Ethernet",
631 WM_T_82540, WMP_F_1000T },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
634 "Intel i82545EM 1000BASE-T Ethernet",
635 WM_T_82545, WMP_F_1000T },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
638 "Intel i82545GM 1000BASE-T Ethernet",
639 WM_T_82545_3, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
642 "Intel i82545GM 1000BASE-X Ethernet",
643 WM_T_82545_3, WMP_F_1000X },
644 #if 0
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
646 "Intel i82545GM Gigabit Ethernet (SERDES)",
647 WM_T_82545_3, WMP_F_SERDES },
648 #endif
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
650 "Intel i82546EB 1000BASE-T Ethernet",
651 WM_T_82546, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
654 "Intel i82546EB 1000BASE-T Ethernet",
655 WM_T_82546, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
658 "Intel i82545EM 1000BASE-X Ethernet",
659 WM_T_82545, WMP_F_1000X },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
662 "Intel i82546EB 1000BASE-X Ethernet",
663 WM_T_82546, WMP_F_1000X },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
666 "Intel i82546GB 1000BASE-T Ethernet",
667 WM_T_82546_3, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
670 "Intel i82546GB 1000BASE-X Ethernet",
671 WM_T_82546_3, WMP_F_1000X },
672 #if 0
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
674 "Intel i82546GB Gigabit Ethernet (SERDES)",
675 WM_T_82546_3, WMP_F_SERDES },
676 #endif
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
678 "i82546GB quad-port Gigabit Ethernet",
679 WM_T_82546_3, WMP_F_1000T },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
682 "i82546GB quad-port Gigabit Ethernet (KSP3)",
683 WM_T_82546_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
686 "Intel PRO/1000MT (82546GB)",
687 WM_T_82546_3, WMP_F_1000T },
688
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
690 "Intel i82541EI 1000BASE-T Ethernet",
691 WM_T_82541, WMP_F_1000T },
692
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
694 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
695 WM_T_82541, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
698 "Intel i82541EI Mobile 1000BASE-T Ethernet",
699 WM_T_82541, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
702 "Intel i82541ER 1000BASE-T Ethernet",
703 WM_T_82541_2, WMP_F_1000T },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
706 "Intel i82541GI 1000BASE-T Ethernet",
707 WM_T_82541_2, WMP_F_1000T },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
710 "Intel i82541GI Mobile 1000BASE-T Ethernet",
711 WM_T_82541_2, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
714 "Intel i82541PI 1000BASE-T Ethernet",
715 WM_T_82541_2, WMP_F_1000T },
716
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
718 "Intel i82547EI 1000BASE-T Ethernet",
719 WM_T_82547, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
722 "Intel i82547EI Mobile 1000BASE-T Ethernet",
723 WM_T_82547, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
726 "Intel i82547GI 1000BASE-T Ethernet",
727 WM_T_82547_2, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
730 "Intel PRO/1000 PT (82571EB)",
731 WM_T_82571, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
734 "Intel PRO/1000 PF (82571EB)",
735 WM_T_82571, WMP_F_1000X },
736 #if 0
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
738 "Intel PRO/1000 PB (82571EB)",
739 WM_T_82571, WMP_F_SERDES },
740 #endif
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
742 "Intel PRO/1000 QT (82571EB)",
743 WM_T_82571, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
746 "Intel i82572EI 1000baseT Ethernet",
747 WM_T_82572, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
750 "Intel PRO/1000 PT Quad Port Server Adapter",
751 WM_T_82571, WMP_F_1000T, },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
754 "Intel i82572EI 1000baseX Ethernet",
755 WM_T_82572, WMP_F_1000X },
756 #if 0
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
758 "Intel i82572EI Gigabit Ethernet (SERDES)",
759 WM_T_82572, WMP_F_SERDES },
760 #endif
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
763 "Intel i82572EI 1000baseT Ethernet",
764 WM_T_82572, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
767 "Intel i82573E",
768 WM_T_82573, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
771 "Intel i82573E IAMT",
772 WM_T_82573, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
775 "Intel i82573L Gigabit Ethernet",
776 WM_T_82573, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
779 "Intel i82574L",
780 WM_T_82574, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
783 "Intel i82583V",
784 WM_T_82583, WMP_F_1000T },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
787 "i80003 dual 1000baseT Ethernet",
788 WM_T_80003, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
791 "i80003 dual 1000baseX Ethernet",
792 WM_T_80003, WMP_F_1000T },
793 #if 0
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
795 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
796 WM_T_80003, WMP_F_SERDES },
797 #endif
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
800 "Intel i80003 1000baseT Ethernet",
801 WM_T_80003, WMP_F_1000T },
802 #if 0
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
804 "Intel i80003 Gigabit Ethernet (SERDES)",
805 WM_T_80003, WMP_F_SERDES },
806 #endif
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
808 "Intel i82801H (M_AMT) LAN Controller",
809 WM_T_ICH8, WMP_F_1000T },
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
811 "Intel i82801H (AMT) LAN Controller",
812 WM_T_ICH8, WMP_F_1000T },
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
814 "Intel i82801H LAN Controller",
815 WM_T_ICH8, WMP_F_1000T },
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
817 "Intel i82801H (IFE) LAN Controller",
818 WM_T_ICH8, WMP_F_1000T },
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
820 "Intel i82801H (M) LAN Controller",
821 WM_T_ICH8, WMP_F_1000T },
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
823 "Intel i82801H IFE (GT) LAN Controller",
824 WM_T_ICH8, WMP_F_1000T },
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
826 "Intel i82801H IFE (G) LAN Controller",
827 WM_T_ICH8, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
829 "82801I (AMT) LAN Controller",
830 WM_T_ICH9, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
832 "82801I LAN Controller",
833 WM_T_ICH9, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
835 "82801I (G) LAN Controller",
836 WM_T_ICH9, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
838 "82801I (GT) LAN Controller",
839 WM_T_ICH9, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
841 "82801I (C) LAN Controller",
842 WM_T_ICH9, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
844 "82801I mobile LAN Controller",
845 WM_T_ICH9, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
847 "82801I mobile (V) LAN Controller",
848 WM_T_ICH9, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
850 "82801I mobile (AMT) LAN Controller",
851 WM_T_ICH9, WMP_F_1000T },
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
853 "82567LM-4 LAN Controller",
854 WM_T_ICH9, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
856 "82567V-3 LAN Controller",
857 WM_T_ICH9, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
859 "82567LM-2 LAN Controller",
860 WM_T_ICH10, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
862 "82567LF-2 LAN Controller",
863 WM_T_ICH10, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
865 "82567LM-3 LAN Controller",
866 WM_T_ICH10, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
868 "82567LF-3 LAN Controller",
869 WM_T_ICH10, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
871 "82567V-2 LAN Controller",
872 WM_T_ICH10, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
874 "PCH LAN (82578LM) Controller",
875 WM_T_PCH, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
877 "PCH LAN (82578LC) Controller",
878 WM_T_PCH, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
880 "PCH LAN (82578DM) Controller",
881 WM_T_PCH, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
883 "PCH LAN (82578DC) Controller",
884 WM_T_PCH, WMP_F_1000T },
885 { 0, 0,
886 NULL,
887 0, 0 },
888 };
889
890 #ifdef WM_EVENT_COUNTERS
891 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
892 #endif /* WM_EVENT_COUNTERS */
893
894 #if 0 /* Not currently used */
895 static inline uint32_t
896 wm_io_read(struct wm_softc *sc, int reg)
897 {
898
899 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
900 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
901 }
902 #endif
903
904 static inline void
905 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
906 {
907
908 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
909 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
910 }
911
912 static inline void
913 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
914 {
915 wa->wa_low = htole32(v & 0xffffffffU);
916 if (sizeof(bus_addr_t) == 8)
917 wa->wa_high = htole32((uint64_t) v >> 32);
918 else
919 wa->wa_high = 0;
920 }
921
922 static void
923 wm_set_spiaddrsize(struct wm_softc *sc)
924 {
925 uint32_t reg;
926
927 sc->sc_flags |= WM_F_EEPROM_SPI;
928 reg = CSR_READ(sc, WMREG_EECD);
929 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
930 }
931
932 static const struct wm_product *
933 wm_lookup(const struct pci_attach_args *pa)
934 {
935 const struct wm_product *wmp;
936
937 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
938 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
939 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
940 return (wmp);
941 }
942 return (NULL);
943 }
944
945 static int
946 wm_match(device_t parent, cfdata_t cf, void *aux)
947 {
948 struct pci_attach_args *pa = aux;
949
950 if (wm_lookup(pa) != NULL)
951 return (1);
952
953 return (0);
954 }
955
956 static void
957 wm_attach(device_t parent, device_t self, void *aux)
958 {
959 struct wm_softc *sc = device_private(self);
960 struct pci_attach_args *pa = aux;
961 prop_dictionary_t dict;
962 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
963 pci_chipset_tag_t pc = pa->pa_pc;
964 pci_intr_handle_t ih;
965 size_t cdata_size;
966 const char *intrstr = NULL;
967 const char *eetype, *xname;
968 bus_space_tag_t memt;
969 bus_space_handle_t memh;
970 bus_dma_segment_t seg;
971 int memh_valid;
972 int i, rseg, error;
973 const struct wm_product *wmp;
974 prop_data_t ea;
975 prop_number_t pn;
976 uint8_t enaddr[ETHER_ADDR_LEN];
977 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
978 pcireg_t preg, memtype;
979 uint32_t reg;
980
981 sc->sc_dev = self;
982 callout_init(&sc->sc_tick_ch, 0);
983
984 wmp = wm_lookup(pa);
985 if (wmp == NULL) {
986 printf("\n");
987 panic("wm_attach: impossible");
988 }
989
990 sc->sc_pc = pa->pa_pc;
991 sc->sc_pcitag = pa->pa_tag;
992
993 if (pci_dma64_available(pa))
994 sc->sc_dmat = pa->pa_dmat64;
995 else
996 sc->sc_dmat = pa->pa_dmat;
997
998 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
999 aprint_naive(": Ethernet controller\n");
1000 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1001
1002 sc->sc_type = wmp->wmp_type;
1003 if (sc->sc_type < WM_T_82543) {
1004 if (sc->sc_rev < 2) {
1005 aprint_error_dev(sc->sc_dev,
1006 "i82542 must be at least rev. 2\n");
1007 return;
1008 }
1009 if (sc->sc_rev < 3)
1010 sc->sc_type = WM_T_82542_2_0;
1011 }
1012
1013 /* Set device properties (mactype) */
1014 dict = device_properties(sc->sc_dev);
1015 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1016
1017 /*
1018 * Map the device. All devices support memory-mapped acccess,
1019 * and it is really required for normal operation.
1020 */
1021 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1022 switch (memtype) {
1023 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1024 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1025 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1026 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1027 break;
1028 default:
1029 memh_valid = 0;
1030 break;
1031 }
1032
1033 if (memh_valid) {
1034 sc->sc_st = memt;
1035 sc->sc_sh = memh;
1036 } else {
1037 aprint_error_dev(sc->sc_dev,
1038 "unable to map device registers\n");
1039 return;
1040 }
1041
1042 /*
1043 * In addition, i82544 and later support I/O mapped indirect
1044 * register access. It is not desirable (nor supported in
1045 * this driver) to use it for normal operation, though it is
1046 * required to work around bugs in some chip versions.
1047 */
1048 if (sc->sc_type >= WM_T_82544) {
1049 /* First we have to find the I/O BAR. */
1050 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1051 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1052 PCI_MAPREG_TYPE_IO)
1053 break;
1054 }
1055 if (i == PCI_MAPREG_END)
1056 aprint_error_dev(sc->sc_dev,
1057 "WARNING: unable to find I/O BAR\n");
1058 else {
1059 /*
1060 * The i8254x doesn't apparently respond when the
1061 * I/O BAR is 0, which looks somewhat like it's not
1062 * been configured.
1063 */
1064 preg = pci_conf_read(pc, pa->pa_tag, i);
1065 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1066 aprint_error_dev(sc->sc_dev,
1067 "WARNING: I/O BAR at zero.\n");
1068 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1069 0, &sc->sc_iot, &sc->sc_ioh,
1070 NULL, NULL) == 0) {
1071 sc->sc_flags |= WM_F_IOH_VALID;
1072 } else {
1073 aprint_error_dev(sc->sc_dev,
1074 "WARNING: unable to map I/O space\n");
1075 }
1076 }
1077
1078 }
1079
1080 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1081 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1082 preg |= PCI_COMMAND_MASTER_ENABLE;
1083 if (sc->sc_type < WM_T_82542_2_1)
1084 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1085 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1086
1087 /* power up chip */
1088 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1089 NULL)) && error != EOPNOTSUPP) {
1090 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1091 return;
1092 }
1093
1094 /*
1095 * Map and establish our interrupt.
1096 */
1097 if (pci_intr_map(pa, &ih)) {
1098 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1099 return;
1100 }
1101 intrstr = pci_intr_string(pc, ih);
1102 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1103 if (sc->sc_ih == NULL) {
1104 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1105 if (intrstr != NULL)
1106 aprint_error(" at %s", intrstr);
1107 aprint_error("\n");
1108 return;
1109 }
1110 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1111
1112 /*
1113 * Determine a few things about the bus we're connected to.
1114 */
1115 if (sc->sc_type < WM_T_82543) {
1116 /* We don't really know the bus characteristics here. */
1117 sc->sc_bus_speed = 33;
1118 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1119 /*
1120 * CSA (Communication Streaming Architecture) is about as fast
1121 * a 32-bit 66MHz PCI Bus.
1122 */
1123 sc->sc_flags |= WM_F_CSA;
1124 sc->sc_bus_speed = 66;
1125 aprint_verbose_dev(sc->sc_dev,
1126 "Communication Streaming Architecture\n");
1127 if (sc->sc_type == WM_T_82547) {
1128 callout_init(&sc->sc_txfifo_ch, 0);
1129 callout_setfunc(&sc->sc_txfifo_ch,
1130 wm_82547_txfifo_stall, sc);
1131 aprint_verbose_dev(sc->sc_dev,
1132 "using 82547 Tx FIFO stall work-around\n");
1133 }
1134 } else if (sc->sc_type >= WM_T_82571) {
1135 sc->sc_flags |= WM_F_PCIE;
1136 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1137 && (sc->sc_type != WM_T_ICH10)
1138 && (sc->sc_type != WM_T_PCH))
1139 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1140 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1141 } else {
1142 reg = CSR_READ(sc, WMREG_STATUS);
1143 if (reg & STATUS_BUS64)
1144 sc->sc_flags |= WM_F_BUS64;
1145 if ((reg & STATUS_PCIX_MODE) != 0) {
1146 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1147
1148 sc->sc_flags |= WM_F_PCIX;
1149 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1150 PCI_CAP_PCIX,
1151 &sc->sc_pcix_offset, NULL) == 0)
1152 aprint_error_dev(sc->sc_dev,
1153 "unable to find PCIX capability\n");
1154 else if (sc->sc_type != WM_T_82545_3 &&
1155 sc->sc_type != WM_T_82546_3) {
1156 /*
1157 * Work around a problem caused by the BIOS
1158 * setting the max memory read byte count
1159 * incorrectly.
1160 */
1161 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1162 sc->sc_pcix_offset + PCI_PCIX_CMD);
1163 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1164 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1165
1166 bytecnt =
1167 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1168 PCI_PCIX_CMD_BYTECNT_SHIFT;
1169 maxb =
1170 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1171 PCI_PCIX_STATUS_MAXB_SHIFT;
1172 if (bytecnt > maxb) {
1173 aprint_verbose_dev(sc->sc_dev,
1174 "resetting PCI-X MMRBC: %d -> %d\n",
1175 512 << bytecnt, 512 << maxb);
1176 pcix_cmd = (pcix_cmd &
1177 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1178 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1179 pci_conf_write(pa->pa_pc, pa->pa_tag,
1180 sc->sc_pcix_offset + PCI_PCIX_CMD,
1181 pcix_cmd);
1182 }
1183 }
1184 }
1185 /*
1186 * The quad port adapter is special; it has a PCIX-PCIX
1187 * bridge on the board, and can run the secondary bus at
1188 * a higher speed.
1189 */
1190 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1191 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1192 : 66;
1193 } else if (sc->sc_flags & WM_F_PCIX) {
1194 switch (reg & STATUS_PCIXSPD_MASK) {
1195 case STATUS_PCIXSPD_50_66:
1196 sc->sc_bus_speed = 66;
1197 break;
1198 case STATUS_PCIXSPD_66_100:
1199 sc->sc_bus_speed = 100;
1200 break;
1201 case STATUS_PCIXSPD_100_133:
1202 sc->sc_bus_speed = 133;
1203 break;
1204 default:
1205 aprint_error_dev(sc->sc_dev,
1206 "unknown PCIXSPD %d; assuming 66MHz\n",
1207 reg & STATUS_PCIXSPD_MASK);
1208 sc->sc_bus_speed = 66;
1209 break;
1210 }
1211 } else
1212 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1213 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1214 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1215 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1216 }
1217
1218 /*
1219 * Allocate the control data structures, and create and load the
1220 * DMA map for it.
1221 *
1222 * NOTE: All Tx descriptors must be in the same 4G segment of
1223 * memory. So must Rx descriptors. We simplify by allocating
1224 * both sets within the same 4G segment.
1225 */
1226 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1227 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1228 cdata_size = sc->sc_type < WM_T_82544 ?
1229 sizeof(struct wm_control_data_82542) :
1230 sizeof(struct wm_control_data_82544);
1231 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1232 (bus_size_t) 0x100000000ULL,
1233 &seg, 1, &rseg, 0)) != 0) {
1234 aprint_error_dev(sc->sc_dev,
1235 "unable to allocate control data, error = %d\n",
1236 error);
1237 goto fail_0;
1238 }
1239
1240 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1241 (void **)&sc->sc_control_data,
1242 BUS_DMA_COHERENT)) != 0) {
1243 aprint_error_dev(sc->sc_dev,
1244 "unable to map control data, error = %d\n", error);
1245 goto fail_1;
1246 }
1247
1248 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1249 0, 0, &sc->sc_cddmamap)) != 0) {
1250 aprint_error_dev(sc->sc_dev,
1251 "unable to create control data DMA map, error = %d\n",
1252 error);
1253 goto fail_2;
1254 }
1255
1256 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1257 sc->sc_control_data, cdata_size, NULL,
1258 0)) != 0) {
1259 aprint_error_dev(sc->sc_dev,
1260 "unable to load control data DMA map, error = %d\n",
1261 error);
1262 goto fail_3;
1263 }
1264
1265 /*
1266 * Create the transmit buffer DMA maps.
1267 */
1268 WM_TXQUEUELEN(sc) =
1269 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1270 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1271 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1272 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1273 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1274 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1275 aprint_error_dev(sc->sc_dev,
1276 "unable to create Tx DMA map %d, error = %d\n",
1277 i, error);
1278 goto fail_4;
1279 }
1280 }
1281
1282 /*
1283 * Create the receive buffer DMA maps.
1284 */
1285 for (i = 0; i < WM_NRXDESC; i++) {
1286 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1287 MCLBYTES, 0, 0,
1288 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1289 aprint_error_dev(sc->sc_dev,
1290 "unable to create Rx DMA map %d error = %d\n",
1291 i, error);
1292 goto fail_5;
1293 }
1294 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1295 }
1296
1297 /* clear interesting stat counters */
1298 CSR_READ(sc, WMREG_COLC);
1299 CSR_READ(sc, WMREG_RXERRC);
1300
1301 /*
1302 * Reset the chip to a known state.
1303 */
1304 wm_reset(sc);
1305
1306 switch (sc->sc_type) {
1307 case WM_T_82571:
1308 case WM_T_82572:
1309 case WM_T_82573:
1310 case WM_T_82574:
1311 case WM_T_82583:
1312 case WM_T_80003:
1313 case WM_T_ICH8:
1314 case WM_T_ICH9:
1315 case WM_T_ICH10:
1316 case WM_T_PCH:
1317 if (wm_check_mng_mode(sc) != 0)
1318 wm_get_hw_control(sc);
1319 break;
1320 default:
1321 break;
1322 }
1323
1324 /*
1325 * Get some information about the EEPROM.
1326 */
1327 switch (sc->sc_type) {
1328 case WM_T_82542_2_0:
1329 case WM_T_82542_2_1:
1330 case WM_T_82543:
1331 case WM_T_82544:
1332 /* Microwire */
1333 sc->sc_ee_addrbits = 6;
1334 break;
1335 case WM_T_82540:
1336 case WM_T_82545:
1337 case WM_T_82545_3:
1338 case WM_T_82546:
1339 case WM_T_82546_3:
1340 /* Microwire */
1341 reg = CSR_READ(sc, WMREG_EECD);
1342 if (reg & EECD_EE_SIZE)
1343 sc->sc_ee_addrbits = 8;
1344 else
1345 sc->sc_ee_addrbits = 6;
1346 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1347 break;
1348 case WM_T_82541:
1349 case WM_T_82541_2:
1350 case WM_T_82547:
1351 case WM_T_82547_2:
1352 reg = CSR_READ(sc, WMREG_EECD);
1353 if (reg & EECD_EE_TYPE) {
1354 /* SPI */
1355 wm_set_spiaddrsize(sc);
1356 } else
1357 /* Microwire */
1358 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1359 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1360 break;
1361 case WM_T_82571:
1362 case WM_T_82572:
1363 /* SPI */
1364 wm_set_spiaddrsize(sc);
1365 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1366 break;
1367 case WM_T_82573:
1368 case WM_T_82574:
1369 case WM_T_82583:
1370 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1371 sc->sc_flags |= WM_F_EEPROM_FLASH;
1372 else {
1373 /* SPI */
1374 wm_set_spiaddrsize(sc);
1375 }
1376 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1377 break;
1378 case WM_T_80003:
1379 /* SPI */
1380 wm_set_spiaddrsize(sc);
1381 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1382 break;
1383 case WM_T_ICH8:
1384 case WM_T_ICH9:
1385 /* Check whether EEPROM is present or not */
1386 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1387 /* Not found */
1388 aprint_error_dev(sc->sc_dev,
1389 "EEPROM PRESENT bit isn't set\n");
1390 sc->sc_flags |= WM_F_EEPROM_INVALID;
1391 }
1392 /* FALLTHROUGH */
1393 case WM_T_ICH10:
1394 case WM_T_PCH:
1395 /* FLASH */
1396 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1397 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1398 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1399 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1400 aprint_error_dev(sc->sc_dev,
1401 "can't map FLASH registers\n");
1402 return;
1403 }
1404 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1405 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1406 ICH_FLASH_SECTOR_SIZE;
1407 sc->sc_ich8_flash_bank_size =
1408 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1409 sc->sc_ich8_flash_bank_size -=
1410 (reg & ICH_GFPREG_BASE_MASK);
1411 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1412 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1413 break;
1414 default:
1415 break;
1416 }
1417
1418 /*
1419 * Defer printing the EEPROM type until after verifying the checksum
1420 * This allows the EEPROM type to be printed correctly in the case
1421 * that no EEPROM is attached.
1422 */
1423 /*
1424 * Validate the EEPROM checksum. If the checksum fails, flag
1425 * this for later, so we can fail future reads from the EEPROM.
1426 */
1427 if (wm_validate_eeprom_checksum(sc)) {
1428 /*
1429 * Read twice again because some PCI-e parts fail the
1430 * first check due to the link being in sleep state.
1431 */
1432 if (wm_validate_eeprom_checksum(sc))
1433 sc->sc_flags |= WM_F_EEPROM_INVALID;
1434 }
1435
1436 /* Set device properties (macflags) */
1437 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1438
1439 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1440 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1441 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1442 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1443 } else {
1444 if (sc->sc_flags & WM_F_EEPROM_SPI)
1445 eetype = "SPI";
1446 else
1447 eetype = "MicroWire";
1448 aprint_verbose_dev(sc->sc_dev,
1449 "%u word (%d address bits) %s EEPROM\n",
1450 1U << sc->sc_ee_addrbits,
1451 sc->sc_ee_addrbits, eetype);
1452 }
1453
1454 /*
1455 * Read the Ethernet address from the EEPROM, if not first found
1456 * in device properties.
1457 */
1458 ea = prop_dictionary_get(dict, "mac-addr");
1459 if (ea != NULL) {
1460 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1461 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1462 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1463 } else {
1464 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1465 sizeof(myea) / sizeof(myea[0]), myea)) {
1466 aprint_error_dev(sc->sc_dev,
1467 "unable to read Ethernet address\n");
1468 return;
1469 }
1470 enaddr[0] = myea[0] & 0xff;
1471 enaddr[1] = myea[0] >> 8;
1472 enaddr[2] = myea[1] & 0xff;
1473 enaddr[3] = myea[1] >> 8;
1474 enaddr[4] = myea[2] & 0xff;
1475 enaddr[5] = myea[2] >> 8;
1476 }
1477
1478 /*
1479 * Toggle the LSB of the MAC address on the second port
1480 * of the dual port controller.
1481 */
1482 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1483 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1484 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1485 enaddr[5] ^= 1;
1486 }
1487
1488 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1489 ether_sprintf(enaddr));
1490
1491 /*
1492 * Read the config info from the EEPROM, and set up various
1493 * bits in the control registers based on their contents.
1494 */
1495 pn = prop_dictionary_get(dict, "i82543-cfg1");
1496 if (pn != NULL) {
1497 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1498 cfg1 = (uint16_t) prop_number_integer_value(pn);
1499 } else {
1500 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1501 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1502 return;
1503 }
1504 }
1505
1506 pn = prop_dictionary_get(dict, "i82543-cfg2");
1507 if (pn != NULL) {
1508 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1509 cfg2 = (uint16_t) prop_number_integer_value(pn);
1510 } else {
1511 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1512 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1513 return;
1514 }
1515 }
1516
1517 if (sc->sc_type >= WM_T_82544) {
1518 pn = prop_dictionary_get(dict, "i82543-swdpin");
1519 if (pn != NULL) {
1520 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1521 swdpin = (uint16_t) prop_number_integer_value(pn);
1522 } else {
1523 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1524 aprint_error_dev(sc->sc_dev,
1525 "unable to read SWDPIN\n");
1526 return;
1527 }
1528 }
1529 }
1530
1531 if (cfg1 & EEPROM_CFG1_ILOS)
1532 sc->sc_ctrl |= CTRL_ILOS;
1533 if (sc->sc_type >= WM_T_82544) {
1534 sc->sc_ctrl |=
1535 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1536 CTRL_SWDPIO_SHIFT;
1537 sc->sc_ctrl |=
1538 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1539 CTRL_SWDPINS_SHIFT;
1540 } else {
1541 sc->sc_ctrl |=
1542 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1543 CTRL_SWDPIO_SHIFT;
1544 }
1545
1546 #if 0
1547 if (sc->sc_type >= WM_T_82544) {
1548 if (cfg1 & EEPROM_CFG1_IPS0)
1549 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1550 if (cfg1 & EEPROM_CFG1_IPS1)
1551 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1552 sc->sc_ctrl_ext |=
1553 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1554 CTRL_EXT_SWDPIO_SHIFT;
1555 sc->sc_ctrl_ext |=
1556 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1557 CTRL_EXT_SWDPINS_SHIFT;
1558 } else {
1559 sc->sc_ctrl_ext |=
1560 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1561 CTRL_EXT_SWDPIO_SHIFT;
1562 }
1563 #endif
1564
1565 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1566 #if 0
1567 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1568 #endif
1569
1570 /*
1571 * Set up some register offsets that are different between
1572 * the i82542 and the i82543 and later chips.
1573 */
1574 if (sc->sc_type < WM_T_82543) {
1575 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1576 sc->sc_tdt_reg = WMREG_OLD_TDT;
1577 } else {
1578 sc->sc_rdt_reg = WMREG_RDT;
1579 sc->sc_tdt_reg = WMREG_TDT;
1580 }
1581
1582 if (sc->sc_type == WM_T_PCH) {
1583 uint16_t val;
1584
1585 /* Save the NVM K1 bit setting */
1586 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1587
1588 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1589 sc->sc_nvm_k1_enabled = 1;
1590 else
1591 sc->sc_nvm_k1_enabled = 0;
1592 }
1593
1594 /*
1595 * Determine if we're TBI or GMII mode, and initialize the
1596 * media structures accordingly.
1597 */
1598 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1599 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1600 || sc->sc_type == WM_T_82573
1601 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1602 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1603 wm_gmii_mediainit(sc, wmp->wmp_product);
1604 } else if (sc->sc_type < WM_T_82543 ||
1605 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1606 if (wmp->wmp_flags & WMP_F_1000T)
1607 aprint_error_dev(sc->sc_dev,
1608 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1609 wm_tbi_mediainit(sc);
1610 } else {
1611 if (wmp->wmp_flags & WMP_F_1000X)
1612 aprint_error_dev(sc->sc_dev,
1613 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1614 wm_gmii_mediainit(sc, wmp->wmp_product);
1615 }
1616
1617 ifp = &sc->sc_ethercom.ec_if;
1618 xname = device_xname(sc->sc_dev);
1619 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1620 ifp->if_softc = sc;
1621 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1622 ifp->if_ioctl = wm_ioctl;
1623 ifp->if_start = wm_start;
1624 ifp->if_watchdog = wm_watchdog;
1625 ifp->if_init = wm_init;
1626 ifp->if_stop = wm_stop;
1627 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1628 IFQ_SET_READY(&ifp->if_snd);
1629
1630 /* Check for jumbo frame */
1631 switch (sc->sc_type) {
1632 case WM_T_82573:
1633 /* XXX limited to 9234 if ASPM is disabled */
1634 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1635 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1636 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1637 break;
1638 case WM_T_82571:
1639 case WM_T_82572:
1640 case WM_T_82574:
1641 case WM_T_80003:
1642 case WM_T_ICH9:
1643 case WM_T_ICH10:
1644 /* XXX limited to 9234 */
1645 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1646 break;
1647 case WM_T_PCH:
1648 /* XXX limited to 4096 */
1649 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1650 break;
1651 case WM_T_82542_2_0:
1652 case WM_T_82542_2_1:
1653 case WM_T_82583:
1654 case WM_T_ICH8:
1655 /* No support for jumbo frame */
1656 break;
1657 default:
1658 /* ETHER_MAX_LEN_JUMBO */
1659 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1660 break;
1661 }
1662
1663 /*
1664 * If we're a i82543 or greater, we can support VLANs.
1665 */
1666 if (sc->sc_type >= WM_T_82543)
1667 sc->sc_ethercom.ec_capabilities |=
1668 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1669
1670 /*
1671 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1672 * on i82543 and later.
1673 */
1674 if (sc->sc_type >= WM_T_82543) {
1675 ifp->if_capabilities |=
1676 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1677 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1678 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1679 IFCAP_CSUM_TCPv6_Tx |
1680 IFCAP_CSUM_UDPv6_Tx;
1681 }
1682
1683 /*
1684 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1685 *
1686 * 82541GI (8086:1076) ... no
1687 * 82572EI (8086:10b9) ... yes
1688 */
1689 if (sc->sc_type >= WM_T_82571) {
1690 ifp->if_capabilities |=
1691 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1692 }
1693
1694 /*
1695 * If we're a i82544 or greater (except i82547), we can do
1696 * TCP segmentation offload.
1697 */
1698 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1699 ifp->if_capabilities |= IFCAP_TSOv4;
1700 }
1701
1702 if (sc->sc_type >= WM_T_82571) {
1703 ifp->if_capabilities |= IFCAP_TSOv6;
1704 }
1705
1706 /*
1707 * Attach the interface.
1708 */
1709 if_attach(ifp);
1710 ether_ifattach(ifp, enaddr);
1711 #if NRND > 0
1712 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1713 #endif
1714
1715 #ifdef WM_EVENT_COUNTERS
1716 /* Attach event counters. */
1717 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1718 NULL, xname, "txsstall");
1719 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1720 NULL, xname, "txdstall");
1721 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1722 NULL, xname, "txfifo_stall");
1723 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1724 NULL, xname, "txdw");
1725 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1726 NULL, xname, "txqe");
1727 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1728 NULL, xname, "rxintr");
1729 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1730 NULL, xname, "linkintr");
1731
1732 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1733 NULL, xname, "rxipsum");
1734 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1735 NULL, xname, "rxtusum");
1736 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1737 NULL, xname, "txipsum");
1738 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1739 NULL, xname, "txtusum");
1740 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1741 NULL, xname, "txtusum6");
1742
1743 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1744 NULL, xname, "txtso");
1745 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1746 NULL, xname, "txtso6");
1747 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1748 NULL, xname, "txtsopain");
1749
1750 for (i = 0; i < WM_NTXSEGS; i++) {
1751 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1752 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1753 NULL, xname, wm_txseg_evcnt_names[i]);
1754 }
1755
1756 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1757 NULL, xname, "txdrop");
1758
1759 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1760 NULL, xname, "tu");
1761
1762 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1763 NULL, xname, "tx_xoff");
1764 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1765 NULL, xname, "tx_xon");
1766 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1767 NULL, xname, "rx_xoff");
1768 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1769 NULL, xname, "rx_xon");
1770 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1771 NULL, xname, "rx_macctl");
1772 #endif /* WM_EVENT_COUNTERS */
1773
1774 if (pmf_device_register(self, NULL, NULL))
1775 pmf_class_network_register(self, ifp);
1776 else
1777 aprint_error_dev(self, "couldn't establish power handler\n");
1778
1779 return;
1780
1781 /*
1782 * Free any resources we've allocated during the failed attach
1783 * attempt. Do this in reverse order and fall through.
1784 */
1785 fail_5:
1786 for (i = 0; i < WM_NRXDESC; i++) {
1787 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1788 bus_dmamap_destroy(sc->sc_dmat,
1789 sc->sc_rxsoft[i].rxs_dmamap);
1790 }
1791 fail_4:
1792 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1793 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1794 bus_dmamap_destroy(sc->sc_dmat,
1795 sc->sc_txsoft[i].txs_dmamap);
1796 }
1797 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1798 fail_3:
1799 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1800 fail_2:
1801 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1802 cdata_size);
1803 fail_1:
1804 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1805 fail_0:
1806 return;
1807 }
1808
1809 /*
1810 * wm_tx_offload:
1811 *
1812 * Set up TCP/IP checksumming parameters for the
1813 * specified packet.
1814 */
1815 static int
1816 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1817 uint8_t *fieldsp)
1818 {
1819 struct mbuf *m0 = txs->txs_mbuf;
1820 struct livengood_tcpip_ctxdesc *t;
1821 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1822 uint32_t ipcse;
1823 struct ether_header *eh;
1824 int offset, iphl;
1825 uint8_t fields;
1826
1827 /*
1828 * XXX It would be nice if the mbuf pkthdr had offset
1829 * fields for the protocol headers.
1830 */
1831
1832 eh = mtod(m0, struct ether_header *);
1833 switch (htons(eh->ether_type)) {
1834 case ETHERTYPE_IP:
1835 case ETHERTYPE_IPV6:
1836 offset = ETHER_HDR_LEN;
1837 break;
1838
1839 case ETHERTYPE_VLAN:
1840 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1841 break;
1842
1843 default:
1844 /*
1845 * Don't support this protocol or encapsulation.
1846 */
1847 *fieldsp = 0;
1848 *cmdp = 0;
1849 return (0);
1850 }
1851
1852 if ((m0->m_pkthdr.csum_flags &
1853 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1854 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1855 } else {
1856 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1857 }
1858 ipcse = offset + iphl - 1;
1859
1860 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1861 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1862 seg = 0;
1863 fields = 0;
1864
1865 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1866 int hlen = offset + iphl;
1867 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1868
1869 if (__predict_false(m0->m_len <
1870 (hlen + sizeof(struct tcphdr)))) {
1871 /*
1872 * TCP/IP headers are not in the first mbuf; we need
1873 * to do this the slow and painful way. Let's just
1874 * hope this doesn't happen very often.
1875 */
1876 struct tcphdr th;
1877
1878 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1879
1880 m_copydata(m0, hlen, sizeof(th), &th);
1881 if (v4) {
1882 struct ip ip;
1883
1884 m_copydata(m0, offset, sizeof(ip), &ip);
1885 ip.ip_len = 0;
1886 m_copyback(m0,
1887 offset + offsetof(struct ip, ip_len),
1888 sizeof(ip.ip_len), &ip.ip_len);
1889 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1890 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1891 } else {
1892 struct ip6_hdr ip6;
1893
1894 m_copydata(m0, offset, sizeof(ip6), &ip6);
1895 ip6.ip6_plen = 0;
1896 m_copyback(m0,
1897 offset + offsetof(struct ip6_hdr, ip6_plen),
1898 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1899 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1900 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1901 }
1902 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1903 sizeof(th.th_sum), &th.th_sum);
1904
1905 hlen += th.th_off << 2;
1906 } else {
1907 /*
1908 * TCP/IP headers are in the first mbuf; we can do
1909 * this the easy way.
1910 */
1911 struct tcphdr *th;
1912
1913 if (v4) {
1914 struct ip *ip =
1915 (void *)(mtod(m0, char *) + offset);
1916 th = (void *)(mtod(m0, char *) + hlen);
1917
1918 ip->ip_len = 0;
1919 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1920 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1921 } else {
1922 struct ip6_hdr *ip6 =
1923 (void *)(mtod(m0, char *) + offset);
1924 th = (void *)(mtod(m0, char *) + hlen);
1925
1926 ip6->ip6_plen = 0;
1927 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1928 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1929 }
1930 hlen += th->th_off << 2;
1931 }
1932
1933 if (v4) {
1934 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1935 cmdlen |= WTX_TCPIP_CMD_IP;
1936 } else {
1937 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1938 ipcse = 0;
1939 }
1940 cmd |= WTX_TCPIP_CMD_TSE;
1941 cmdlen |= WTX_TCPIP_CMD_TSE |
1942 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1943 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1944 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1945 }
1946
1947 /*
1948 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1949 * offload feature, if we load the context descriptor, we
1950 * MUST provide valid values for IPCSS and TUCSS fields.
1951 */
1952
1953 ipcs = WTX_TCPIP_IPCSS(offset) |
1954 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1955 WTX_TCPIP_IPCSE(ipcse);
1956 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1957 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1958 fields |= WTX_IXSM;
1959 }
1960
1961 offset += iphl;
1962
1963 if (m0->m_pkthdr.csum_flags &
1964 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1965 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1966 fields |= WTX_TXSM;
1967 tucs = WTX_TCPIP_TUCSS(offset) |
1968 WTX_TCPIP_TUCSO(offset +
1969 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1970 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1971 } else if ((m0->m_pkthdr.csum_flags &
1972 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1973 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1974 fields |= WTX_TXSM;
1975 tucs = WTX_TCPIP_TUCSS(offset) |
1976 WTX_TCPIP_TUCSO(offset +
1977 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1978 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1979 } else {
1980 /* Just initialize it to a valid TCP context. */
1981 tucs = WTX_TCPIP_TUCSS(offset) |
1982 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1983 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1984 }
1985
1986 /* Fill in the context descriptor. */
1987 t = (struct livengood_tcpip_ctxdesc *)
1988 &sc->sc_txdescs[sc->sc_txnext];
1989 t->tcpip_ipcs = htole32(ipcs);
1990 t->tcpip_tucs = htole32(tucs);
1991 t->tcpip_cmdlen = htole32(cmdlen);
1992 t->tcpip_seg = htole32(seg);
1993 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1994
1995 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1996 txs->txs_ndesc++;
1997
1998 *cmdp = cmd;
1999 *fieldsp = fields;
2000
2001 return (0);
2002 }
2003
2004 static void
2005 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2006 {
2007 struct mbuf *m;
2008 int i;
2009
2010 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2011 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2012 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2013 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2014 m->m_data, m->m_len, m->m_flags);
2015 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2016 i, i == 1 ? "" : "s");
2017 }
2018
2019 /*
2020 * wm_82547_txfifo_stall:
2021 *
2022 * Callout used to wait for the 82547 Tx FIFO to drain,
2023 * reset the FIFO pointers, and restart packet transmission.
2024 */
2025 static void
2026 wm_82547_txfifo_stall(void *arg)
2027 {
2028 struct wm_softc *sc = arg;
2029 int s;
2030
2031 s = splnet();
2032
2033 if (sc->sc_txfifo_stall) {
2034 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2035 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2036 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2037 /*
2038 * Packets have drained. Stop transmitter, reset
2039 * FIFO pointers, restart transmitter, and kick
2040 * the packet queue.
2041 */
2042 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2043 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2044 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2045 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2046 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2047 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2048 CSR_WRITE(sc, WMREG_TCTL, tctl);
2049 CSR_WRITE_FLUSH(sc);
2050
2051 sc->sc_txfifo_head = 0;
2052 sc->sc_txfifo_stall = 0;
2053 wm_start(&sc->sc_ethercom.ec_if);
2054 } else {
2055 /*
2056 * Still waiting for packets to drain; try again in
2057 * another tick.
2058 */
2059 callout_schedule(&sc->sc_txfifo_ch, 1);
2060 }
2061 }
2062
2063 splx(s);
2064 }
2065
2066 /*
2067 * wm_82547_txfifo_bugchk:
2068 *
2069 * Check for bug condition in the 82547 Tx FIFO. We need to
2070 * prevent enqueueing a packet that would wrap around the end
2071 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2072 *
2073 * We do this by checking the amount of space before the end
2074 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2075 * the Tx FIFO, wait for all remaining packets to drain, reset
2076 * the internal FIFO pointers to the beginning, and restart
2077 * transmission on the interface.
2078 */
2079 #define WM_FIFO_HDR 0x10
2080 #define WM_82547_PAD_LEN 0x3e0
2081 static int
2082 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2083 {
2084 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2085 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2086
2087 /* Just return if already stalled. */
2088 if (sc->sc_txfifo_stall)
2089 return (1);
2090
2091 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2092 /* Stall only occurs in half-duplex mode. */
2093 goto send_packet;
2094 }
2095
2096 if (len >= WM_82547_PAD_LEN + space) {
2097 sc->sc_txfifo_stall = 1;
2098 callout_schedule(&sc->sc_txfifo_ch, 1);
2099 return (1);
2100 }
2101
2102 send_packet:
2103 sc->sc_txfifo_head += len;
2104 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2105 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2106
2107 return (0);
2108 }
2109
2110 /*
2111 * wm_start: [ifnet interface function]
2112 *
2113 * Start packet transmission on the interface.
2114 */
2115 static void
2116 wm_start(struct ifnet *ifp)
2117 {
2118 struct wm_softc *sc = ifp->if_softc;
2119 struct mbuf *m0;
2120 struct m_tag *mtag;
2121 struct wm_txsoft *txs;
2122 bus_dmamap_t dmamap;
2123 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2124 bus_addr_t curaddr;
2125 bus_size_t seglen, curlen;
2126 uint32_t cksumcmd;
2127 uint8_t cksumfields;
2128
2129 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2130 return;
2131
2132 /*
2133 * Remember the previous number of free descriptors.
2134 */
2135 ofree = sc->sc_txfree;
2136
2137 /*
2138 * Loop through the send queue, setting up transmit descriptors
2139 * until we drain the queue, or use up all available transmit
2140 * descriptors.
2141 */
2142 for (;;) {
2143 /* Grab a packet off the queue. */
2144 IFQ_POLL(&ifp->if_snd, m0);
2145 if (m0 == NULL)
2146 break;
2147
2148 DPRINTF(WM_DEBUG_TX,
2149 ("%s: TX: have packet to transmit: %p\n",
2150 device_xname(sc->sc_dev), m0));
2151
2152 /* Get a work queue entry. */
2153 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2154 wm_txintr(sc);
2155 if (sc->sc_txsfree == 0) {
2156 DPRINTF(WM_DEBUG_TX,
2157 ("%s: TX: no free job descriptors\n",
2158 device_xname(sc->sc_dev)));
2159 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2160 break;
2161 }
2162 }
2163
2164 txs = &sc->sc_txsoft[sc->sc_txsnext];
2165 dmamap = txs->txs_dmamap;
2166
2167 use_tso = (m0->m_pkthdr.csum_flags &
2168 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2169
2170 /*
2171 * So says the Linux driver:
2172 * The controller does a simple calculation to make sure
2173 * there is enough room in the FIFO before initiating the
2174 * DMA for each buffer. The calc is:
2175 * 4 = ceil(buffer len / MSS)
2176 * To make sure we don't overrun the FIFO, adjust the max
2177 * buffer len if the MSS drops.
2178 */
2179 dmamap->dm_maxsegsz =
2180 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2181 ? m0->m_pkthdr.segsz << 2
2182 : WTX_MAX_LEN;
2183
2184 /*
2185 * Load the DMA map. If this fails, the packet either
2186 * didn't fit in the allotted number of segments, or we
2187 * were short on resources. For the too-many-segments
2188 * case, we simply report an error and drop the packet,
2189 * since we can't sanely copy a jumbo packet to a single
2190 * buffer.
2191 */
2192 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2193 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2194 if (error) {
2195 if (error == EFBIG) {
2196 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2197 log(LOG_ERR, "%s: Tx packet consumes too many "
2198 "DMA segments, dropping...\n",
2199 device_xname(sc->sc_dev));
2200 IFQ_DEQUEUE(&ifp->if_snd, m0);
2201 wm_dump_mbuf_chain(sc, m0);
2202 m_freem(m0);
2203 continue;
2204 }
2205 /*
2206 * Short on resources, just stop for now.
2207 */
2208 DPRINTF(WM_DEBUG_TX,
2209 ("%s: TX: dmamap load failed: %d\n",
2210 device_xname(sc->sc_dev), error));
2211 break;
2212 }
2213
2214 segs_needed = dmamap->dm_nsegs;
2215 if (use_tso) {
2216 /* For sentinel descriptor; see below. */
2217 segs_needed++;
2218 }
2219
2220 /*
2221 * Ensure we have enough descriptors free to describe
2222 * the packet. Note, we always reserve one descriptor
2223 * at the end of the ring due to the semantics of the
2224 * TDT register, plus one more in the event we need
2225 * to load offload context.
2226 */
2227 if (segs_needed > sc->sc_txfree - 2) {
2228 /*
2229 * Not enough free descriptors to transmit this
2230 * packet. We haven't committed anything yet,
2231 * so just unload the DMA map, put the packet
2232 * pack on the queue, and punt. Notify the upper
2233 * layer that there are no more slots left.
2234 */
2235 DPRINTF(WM_DEBUG_TX,
2236 ("%s: TX: need %d (%d) descriptors, have %d\n",
2237 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2238 segs_needed, sc->sc_txfree - 1));
2239 ifp->if_flags |= IFF_OACTIVE;
2240 bus_dmamap_unload(sc->sc_dmat, dmamap);
2241 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2242 break;
2243 }
2244
2245 /*
2246 * Check for 82547 Tx FIFO bug. We need to do this
2247 * once we know we can transmit the packet, since we
2248 * do some internal FIFO space accounting here.
2249 */
2250 if (sc->sc_type == WM_T_82547 &&
2251 wm_82547_txfifo_bugchk(sc, m0)) {
2252 DPRINTF(WM_DEBUG_TX,
2253 ("%s: TX: 82547 Tx FIFO bug detected\n",
2254 device_xname(sc->sc_dev)));
2255 ifp->if_flags |= IFF_OACTIVE;
2256 bus_dmamap_unload(sc->sc_dmat, dmamap);
2257 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2258 break;
2259 }
2260
2261 IFQ_DEQUEUE(&ifp->if_snd, m0);
2262
2263 /*
2264 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2265 */
2266
2267 DPRINTF(WM_DEBUG_TX,
2268 ("%s: TX: packet has %d (%d) DMA segments\n",
2269 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2270
2271 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2272
2273 /*
2274 * Store a pointer to the packet so that we can free it
2275 * later.
2276 *
2277 * Initially, we consider the number of descriptors the
2278 * packet uses the number of DMA segments. This may be
2279 * incremented by 1 if we do checksum offload (a descriptor
2280 * is used to set the checksum context).
2281 */
2282 txs->txs_mbuf = m0;
2283 txs->txs_firstdesc = sc->sc_txnext;
2284 txs->txs_ndesc = segs_needed;
2285
2286 /* Set up offload parameters for this packet. */
2287 if (m0->m_pkthdr.csum_flags &
2288 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2289 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2290 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2291 if (wm_tx_offload(sc, txs, &cksumcmd,
2292 &cksumfields) != 0) {
2293 /* Error message already displayed. */
2294 bus_dmamap_unload(sc->sc_dmat, dmamap);
2295 continue;
2296 }
2297 } else {
2298 cksumcmd = 0;
2299 cksumfields = 0;
2300 }
2301
2302 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2303
2304 /* Sync the DMA map. */
2305 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2306 BUS_DMASYNC_PREWRITE);
2307
2308 /*
2309 * Initialize the transmit descriptor.
2310 */
2311 for (nexttx = sc->sc_txnext, seg = 0;
2312 seg < dmamap->dm_nsegs; seg++) {
2313 for (seglen = dmamap->dm_segs[seg].ds_len,
2314 curaddr = dmamap->dm_segs[seg].ds_addr;
2315 seglen != 0;
2316 curaddr += curlen, seglen -= curlen,
2317 nexttx = WM_NEXTTX(sc, nexttx)) {
2318 curlen = seglen;
2319
2320 /*
2321 * So says the Linux driver:
2322 * Work around for premature descriptor
2323 * write-backs in TSO mode. Append a
2324 * 4-byte sentinel descriptor.
2325 */
2326 if (use_tso &&
2327 seg == dmamap->dm_nsegs - 1 &&
2328 curlen > 8)
2329 curlen -= 4;
2330
2331 wm_set_dma_addr(
2332 &sc->sc_txdescs[nexttx].wtx_addr,
2333 curaddr);
2334 sc->sc_txdescs[nexttx].wtx_cmdlen =
2335 htole32(cksumcmd | curlen);
2336 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2337 0;
2338 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2339 cksumfields;
2340 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2341 lasttx = nexttx;
2342
2343 DPRINTF(WM_DEBUG_TX,
2344 ("%s: TX: desc %d: low 0x%08lx, "
2345 "len 0x%04x\n",
2346 device_xname(sc->sc_dev), nexttx,
2347 curaddr & 0xffffffffUL, (unsigned)curlen));
2348 }
2349 }
2350
2351 KASSERT(lasttx != -1);
2352
2353 /*
2354 * Set up the command byte on the last descriptor of
2355 * the packet. If we're in the interrupt delay window,
2356 * delay the interrupt.
2357 */
2358 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2359 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2360
2361 /*
2362 * If VLANs are enabled and the packet has a VLAN tag, set
2363 * up the descriptor to encapsulate the packet for us.
2364 *
2365 * This is only valid on the last descriptor of the packet.
2366 */
2367 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2368 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2369 htole32(WTX_CMD_VLE);
2370 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2371 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2372 }
2373
2374 txs->txs_lastdesc = lasttx;
2375
2376 DPRINTF(WM_DEBUG_TX,
2377 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2378 device_xname(sc->sc_dev),
2379 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2380
2381 /* Sync the descriptors we're using. */
2382 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2383 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2384
2385 /* Give the packet to the chip. */
2386 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2387
2388 DPRINTF(WM_DEBUG_TX,
2389 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2390
2391 DPRINTF(WM_DEBUG_TX,
2392 ("%s: TX: finished transmitting packet, job %d\n",
2393 device_xname(sc->sc_dev), sc->sc_txsnext));
2394
2395 /* Advance the tx pointer. */
2396 sc->sc_txfree -= txs->txs_ndesc;
2397 sc->sc_txnext = nexttx;
2398
2399 sc->sc_txsfree--;
2400 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2401
2402 /* Pass the packet to any BPF listeners. */
2403 if (ifp->if_bpf)
2404 bpf_ops->bpf_mtap(ifp->if_bpf, m0);
2405 }
2406
2407 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2408 /* No more slots; notify upper layer. */
2409 ifp->if_flags |= IFF_OACTIVE;
2410 }
2411
2412 if (sc->sc_txfree != ofree) {
2413 /* Set a watchdog timer in case the chip flakes out. */
2414 ifp->if_timer = 5;
2415 }
2416 }
2417
2418 /*
2419 * wm_watchdog: [ifnet interface function]
2420 *
2421 * Watchdog timer handler.
2422 */
2423 static void
2424 wm_watchdog(struct ifnet *ifp)
2425 {
2426 struct wm_softc *sc = ifp->if_softc;
2427
2428 /*
2429 * Since we're using delayed interrupts, sweep up
2430 * before we report an error.
2431 */
2432 wm_txintr(sc);
2433
2434 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2435 log(LOG_ERR,
2436 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2437 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2438 sc->sc_txnext);
2439 ifp->if_oerrors++;
2440
2441 /* Reset the interface. */
2442 (void) wm_init(ifp);
2443 }
2444
2445 /* Try to get more packets going. */
2446 wm_start(ifp);
2447 }
2448
2449 /*
2450 * wm_ioctl: [ifnet interface function]
2451 *
2452 * Handle control requests from the operator.
2453 */
2454 static int
2455 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2456 {
2457 struct wm_softc *sc = ifp->if_softc;
2458 struct ifreq *ifr = (struct ifreq *) data;
2459 struct ifaddr *ifa = (struct ifaddr *)data;
2460 struct sockaddr_dl *sdl;
2461 int diff, s, error;
2462
2463 s = splnet();
2464
2465 switch (cmd) {
2466 case SIOCSIFFLAGS:
2467 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2468 break;
2469 if (ifp->if_flags & IFF_UP) {
2470 diff = (ifp->if_flags ^ sc->sc_if_flags)
2471 & (IFF_PROMISC | IFF_ALLMULTI);
2472 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2473 /*
2474 * If the difference bettween last flag and
2475 * new flag is only IFF_PROMISC or
2476 * IFF_ALLMULTI, set multicast filter only
2477 * (don't reset to prevent link down).
2478 */
2479 wm_set_filter(sc);
2480 } else {
2481 /*
2482 * Reset the interface to pick up changes in
2483 * any other flags that affect the hardware
2484 * state.
2485 */
2486 wm_init(ifp);
2487 }
2488 } else {
2489 if (ifp->if_flags & IFF_RUNNING)
2490 wm_stop(ifp, 1);
2491 }
2492 sc->sc_if_flags = ifp->if_flags;
2493 error = 0;
2494 break;
2495 case SIOCSIFMEDIA:
2496 case SIOCGIFMEDIA:
2497 /* Flow control requires full-duplex mode. */
2498 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2499 (ifr->ifr_media & IFM_FDX) == 0)
2500 ifr->ifr_media &= ~IFM_ETH_FMASK;
2501 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2502 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2503 /* We can do both TXPAUSE and RXPAUSE. */
2504 ifr->ifr_media |=
2505 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2506 }
2507 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2508 }
2509 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2510 break;
2511 case SIOCINITIFADDR:
2512 if (ifa->ifa_addr->sa_family == AF_LINK) {
2513 sdl = satosdl(ifp->if_dl->ifa_addr);
2514 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2515 LLADDR(satosdl(ifa->ifa_addr)),
2516 ifp->if_addrlen);
2517 /* unicast address is first multicast entry */
2518 wm_set_filter(sc);
2519 error = 0;
2520 break;
2521 }
2522 /* Fall through for rest */
2523 default:
2524 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2525 break;
2526
2527 error = 0;
2528
2529 if (cmd == SIOCSIFCAP)
2530 error = (*ifp->if_init)(ifp);
2531 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2532 ;
2533 else if (ifp->if_flags & IFF_RUNNING) {
2534 /*
2535 * Multicast list has changed; set the hardware filter
2536 * accordingly.
2537 */
2538 wm_set_filter(sc);
2539 }
2540 break;
2541 }
2542
2543 /* Try to get more packets going. */
2544 wm_start(ifp);
2545
2546 splx(s);
2547 return (error);
2548 }
2549
2550 /*
2551 * wm_intr:
2552 *
2553 * Interrupt service routine.
2554 */
2555 static int
2556 wm_intr(void *arg)
2557 {
2558 struct wm_softc *sc = arg;
2559 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2560 uint32_t icr;
2561 int handled = 0;
2562
2563 while (1 /* CONSTCOND */) {
2564 icr = CSR_READ(sc, WMREG_ICR);
2565 if ((icr & sc->sc_icr) == 0)
2566 break;
2567 #if 0 /*NRND > 0*/
2568 if (RND_ENABLED(&sc->rnd_source))
2569 rnd_add_uint32(&sc->rnd_source, icr);
2570 #endif
2571
2572 handled = 1;
2573
2574 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2575 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2576 DPRINTF(WM_DEBUG_RX,
2577 ("%s: RX: got Rx intr 0x%08x\n",
2578 device_xname(sc->sc_dev),
2579 icr & (ICR_RXDMT0|ICR_RXT0)));
2580 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2581 }
2582 #endif
2583 wm_rxintr(sc);
2584
2585 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2586 if (icr & ICR_TXDW) {
2587 DPRINTF(WM_DEBUG_TX,
2588 ("%s: TX: got TXDW interrupt\n",
2589 device_xname(sc->sc_dev)));
2590 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2591 }
2592 #endif
2593 wm_txintr(sc);
2594
2595 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2596 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2597 wm_linkintr(sc, icr);
2598 }
2599
2600 if (icr & ICR_RXO) {
2601 ifp->if_ierrors++;
2602 #if defined(WM_DEBUG)
2603 log(LOG_WARNING, "%s: Receive overrun\n",
2604 device_xname(sc->sc_dev));
2605 #endif /* defined(WM_DEBUG) */
2606 }
2607 }
2608
2609 if (handled) {
2610 /* Try to get more packets going. */
2611 wm_start(ifp);
2612 }
2613
2614 return (handled);
2615 }
2616
2617 /*
2618 * wm_txintr:
2619 *
2620 * Helper; handle transmit interrupts.
2621 */
2622 static void
2623 wm_txintr(struct wm_softc *sc)
2624 {
2625 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2626 struct wm_txsoft *txs;
2627 uint8_t status;
2628 int i;
2629
2630 ifp->if_flags &= ~IFF_OACTIVE;
2631
2632 /*
2633 * Go through the Tx list and free mbufs for those
2634 * frames which have been transmitted.
2635 */
2636 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2637 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2638 txs = &sc->sc_txsoft[i];
2639
2640 DPRINTF(WM_DEBUG_TX,
2641 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2642
2643 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2644 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2645
2646 status =
2647 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2648 if ((status & WTX_ST_DD) == 0) {
2649 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2650 BUS_DMASYNC_PREREAD);
2651 break;
2652 }
2653
2654 DPRINTF(WM_DEBUG_TX,
2655 ("%s: TX: job %d done: descs %d..%d\n",
2656 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2657 txs->txs_lastdesc));
2658
2659 /*
2660 * XXX We should probably be using the statistics
2661 * XXX registers, but I don't know if they exist
2662 * XXX on chips before the i82544.
2663 */
2664
2665 #ifdef WM_EVENT_COUNTERS
2666 if (status & WTX_ST_TU)
2667 WM_EVCNT_INCR(&sc->sc_ev_tu);
2668 #endif /* WM_EVENT_COUNTERS */
2669
2670 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2671 ifp->if_oerrors++;
2672 if (status & WTX_ST_LC)
2673 log(LOG_WARNING, "%s: late collision\n",
2674 device_xname(sc->sc_dev));
2675 else if (status & WTX_ST_EC) {
2676 ifp->if_collisions += 16;
2677 log(LOG_WARNING, "%s: excessive collisions\n",
2678 device_xname(sc->sc_dev));
2679 }
2680 } else
2681 ifp->if_opackets++;
2682
2683 sc->sc_txfree += txs->txs_ndesc;
2684 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2685 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2686 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2687 m_freem(txs->txs_mbuf);
2688 txs->txs_mbuf = NULL;
2689 }
2690
2691 /* Update the dirty transmit buffer pointer. */
2692 sc->sc_txsdirty = i;
2693 DPRINTF(WM_DEBUG_TX,
2694 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2695
2696 /*
2697 * If there are no more pending transmissions, cancel the watchdog
2698 * timer.
2699 */
2700 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2701 ifp->if_timer = 0;
2702 }
2703
2704 /*
2705 * wm_rxintr:
2706 *
2707 * Helper; handle receive interrupts.
2708 */
2709 static void
2710 wm_rxintr(struct wm_softc *sc)
2711 {
2712 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2713 struct wm_rxsoft *rxs;
2714 struct mbuf *m;
2715 int i, len;
2716 uint8_t status, errors;
2717 uint16_t vlantag;
2718
2719 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2720 rxs = &sc->sc_rxsoft[i];
2721
2722 DPRINTF(WM_DEBUG_RX,
2723 ("%s: RX: checking descriptor %d\n",
2724 device_xname(sc->sc_dev), i));
2725
2726 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2727
2728 status = sc->sc_rxdescs[i].wrx_status;
2729 errors = sc->sc_rxdescs[i].wrx_errors;
2730 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2731 vlantag = sc->sc_rxdescs[i].wrx_special;
2732
2733 if ((status & WRX_ST_DD) == 0) {
2734 /*
2735 * We have processed all of the receive descriptors.
2736 */
2737 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2738 break;
2739 }
2740
2741 if (__predict_false(sc->sc_rxdiscard)) {
2742 DPRINTF(WM_DEBUG_RX,
2743 ("%s: RX: discarding contents of descriptor %d\n",
2744 device_xname(sc->sc_dev), i));
2745 WM_INIT_RXDESC(sc, i);
2746 if (status & WRX_ST_EOP) {
2747 /* Reset our state. */
2748 DPRINTF(WM_DEBUG_RX,
2749 ("%s: RX: resetting rxdiscard -> 0\n",
2750 device_xname(sc->sc_dev)));
2751 sc->sc_rxdiscard = 0;
2752 }
2753 continue;
2754 }
2755
2756 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2757 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2758
2759 m = rxs->rxs_mbuf;
2760
2761 /*
2762 * Add a new receive buffer to the ring, unless of
2763 * course the length is zero. Treat the latter as a
2764 * failed mapping.
2765 */
2766 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2767 /*
2768 * Failed, throw away what we've done so
2769 * far, and discard the rest of the packet.
2770 */
2771 ifp->if_ierrors++;
2772 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2773 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2774 WM_INIT_RXDESC(sc, i);
2775 if ((status & WRX_ST_EOP) == 0)
2776 sc->sc_rxdiscard = 1;
2777 if (sc->sc_rxhead != NULL)
2778 m_freem(sc->sc_rxhead);
2779 WM_RXCHAIN_RESET(sc);
2780 DPRINTF(WM_DEBUG_RX,
2781 ("%s: RX: Rx buffer allocation failed, "
2782 "dropping packet%s\n", device_xname(sc->sc_dev),
2783 sc->sc_rxdiscard ? " (discard)" : ""));
2784 continue;
2785 }
2786
2787 m->m_len = len;
2788 sc->sc_rxlen += len;
2789 DPRINTF(WM_DEBUG_RX,
2790 ("%s: RX: buffer at %p len %d\n",
2791 device_xname(sc->sc_dev), m->m_data, len));
2792
2793 /*
2794 * If this is not the end of the packet, keep
2795 * looking.
2796 */
2797 if ((status & WRX_ST_EOP) == 0) {
2798 WM_RXCHAIN_LINK(sc, m);
2799 DPRINTF(WM_DEBUG_RX,
2800 ("%s: RX: not yet EOP, rxlen -> %d\n",
2801 device_xname(sc->sc_dev), sc->sc_rxlen));
2802 continue;
2803 }
2804
2805 /*
2806 * Okay, we have the entire packet now. The chip is
2807 * configured to include the FCS (not all chips can
2808 * be configured to strip it), so we need to trim it.
2809 * May need to adjust length of previous mbuf in the
2810 * chain if the current mbuf is too short.
2811 */
2812 if (m->m_len < ETHER_CRC_LEN) {
2813 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2814 m->m_len = 0;
2815 } else {
2816 m->m_len -= ETHER_CRC_LEN;
2817 }
2818 len = sc->sc_rxlen - ETHER_CRC_LEN;
2819
2820 WM_RXCHAIN_LINK(sc, m);
2821
2822 *sc->sc_rxtailp = NULL;
2823 m = sc->sc_rxhead;
2824
2825 WM_RXCHAIN_RESET(sc);
2826
2827 DPRINTF(WM_DEBUG_RX,
2828 ("%s: RX: have entire packet, len -> %d\n",
2829 device_xname(sc->sc_dev), len));
2830
2831 /*
2832 * If an error occurred, update stats and drop the packet.
2833 */
2834 if (errors &
2835 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2836 ifp->if_ierrors++;
2837 if (errors & WRX_ER_SE)
2838 log(LOG_WARNING, "%s: symbol error\n",
2839 device_xname(sc->sc_dev));
2840 else if (errors & WRX_ER_SEQ)
2841 log(LOG_WARNING, "%s: receive sequence error\n",
2842 device_xname(sc->sc_dev));
2843 else if (errors & WRX_ER_CE)
2844 log(LOG_WARNING, "%s: CRC error\n",
2845 device_xname(sc->sc_dev));
2846 m_freem(m);
2847 continue;
2848 }
2849
2850 /*
2851 * No errors. Receive the packet.
2852 */
2853 m->m_pkthdr.rcvif = ifp;
2854 m->m_pkthdr.len = len;
2855
2856 /*
2857 * If VLANs are enabled, VLAN packets have been unwrapped
2858 * for us. Associate the tag with the packet.
2859 */
2860 if ((status & WRX_ST_VP) != 0) {
2861 VLAN_INPUT_TAG(ifp, m,
2862 le16toh(vlantag),
2863 continue);
2864 }
2865
2866 /*
2867 * Set up checksum info for this packet.
2868 */
2869 if ((status & WRX_ST_IXSM) == 0) {
2870 if (status & WRX_ST_IPCS) {
2871 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2872 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2873 if (errors & WRX_ER_IPE)
2874 m->m_pkthdr.csum_flags |=
2875 M_CSUM_IPv4_BAD;
2876 }
2877 if (status & WRX_ST_TCPCS) {
2878 /*
2879 * Note: we don't know if this was TCP or UDP,
2880 * so we just set both bits, and expect the
2881 * upper layers to deal.
2882 */
2883 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2884 m->m_pkthdr.csum_flags |=
2885 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2886 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2887 if (errors & WRX_ER_TCPE)
2888 m->m_pkthdr.csum_flags |=
2889 M_CSUM_TCP_UDP_BAD;
2890 }
2891 }
2892
2893 ifp->if_ipackets++;
2894
2895 /* Pass this up to any BPF listeners. */
2896 if (ifp->if_bpf)
2897 bpf_ops->bpf_mtap(ifp->if_bpf, m);
2898
2899 /* Pass it on. */
2900 (*ifp->if_input)(ifp, m);
2901 }
2902
2903 /* Update the receive pointer. */
2904 sc->sc_rxptr = i;
2905
2906 DPRINTF(WM_DEBUG_RX,
2907 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2908 }
2909
2910 /*
2911 * wm_linkintr_gmii:
2912 *
2913 * Helper; handle link interrupts for GMII.
2914 */
2915 static void
2916 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
2917 {
2918
2919 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2920 __func__));
2921
2922 if (icr & ICR_LSC) {
2923 DPRINTF(WM_DEBUG_LINK,
2924 ("%s: LINK: LSC -> mii_tick\n",
2925 device_xname(sc->sc_dev)));
2926 mii_tick(&sc->sc_mii);
2927 if (sc->sc_type == WM_T_82543) {
2928 int miistatus, active;
2929
2930 /*
2931 * With 82543, we need to force speed and
2932 * duplex on the MAC equal to what the PHY
2933 * speed and duplex configuration is.
2934 */
2935 miistatus = sc->sc_mii.mii_media_status;
2936
2937 if (miistatus & IFM_ACTIVE) {
2938 active = sc->sc_mii.mii_media_active;
2939 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
2940 switch (IFM_SUBTYPE(active)) {
2941 case IFM_10_T:
2942 sc->sc_ctrl |= CTRL_SPEED_10;
2943 break;
2944 case IFM_100_TX:
2945 sc->sc_ctrl |= CTRL_SPEED_100;
2946 break;
2947 case IFM_1000_T:
2948 sc->sc_ctrl |= CTRL_SPEED_1000;
2949 break;
2950 default:
2951 /*
2952 * fiber?
2953 * Shoud not enter here.
2954 */
2955 printf("unknown media (%x)\n",
2956 active);
2957 break;
2958 }
2959 if (active & IFM_FDX)
2960 sc->sc_ctrl |= CTRL_FD;
2961 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2962 }
2963 } else if (sc->sc_type == WM_T_PCH) {
2964 wm_k1_gig_workaround_hv(sc,
2965 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
2966 }
2967
2968 if ((sc->sc_phytype == WMPHY_82578)
2969 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
2970 == IFM_1000_T)) {
2971
2972 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
2973 printf("XXX link sall wa\n");
2974 delay(200*1000); /* XXX too big */
2975
2976 /* Link stall fix for link up */
2977 wm_gmii_hv_writereg(sc->sc_dev, 1,
2978 HV_MUX_DATA_CTRL,
2979 HV_MUX_DATA_CTRL_GEN_TO_MAC
2980 | HV_MUX_DATA_CTRL_FORCE_SPEED);
2981 wm_gmii_hv_writereg(sc->sc_dev, 1,
2982 HV_MUX_DATA_CTRL,
2983 HV_MUX_DATA_CTRL_GEN_TO_MAC);
2984 }
2985 }
2986 } else if (icr & ICR_RXSEQ) {
2987 DPRINTF(WM_DEBUG_LINK,
2988 ("%s: LINK Receive sequence error\n",
2989 device_xname(sc->sc_dev)));
2990 }
2991 }
2992
2993 /*
2994 * wm_linkintr_tbi:
2995 *
2996 * Helper; handle link interrupts for TBI mode.
2997 */
2998 static void
2999 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3000 {
3001 uint32_t status;
3002
3003 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3004 __func__));
3005
3006 status = CSR_READ(sc, WMREG_STATUS);
3007 if (icr & ICR_LSC) {
3008 if (status & STATUS_LU) {
3009 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3010 device_xname(sc->sc_dev),
3011 (status & STATUS_FD) ? "FDX" : "HDX"));
3012 /*
3013 * NOTE: CTRL will update TFCE and RFCE automatically,
3014 * so we should update sc->sc_ctrl
3015 */
3016
3017 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3018 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3019 sc->sc_fcrtl &= ~FCRTL_XONE;
3020 if (status & STATUS_FD)
3021 sc->sc_tctl |=
3022 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3023 else
3024 sc->sc_tctl |=
3025 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3026 if (sc->sc_ctrl & CTRL_TFCE)
3027 sc->sc_fcrtl |= FCRTL_XONE;
3028 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3029 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3030 WMREG_OLD_FCRTL : WMREG_FCRTL,
3031 sc->sc_fcrtl);
3032 sc->sc_tbi_linkup = 1;
3033 } else {
3034 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3035 device_xname(sc->sc_dev)));
3036 sc->sc_tbi_linkup = 0;
3037 }
3038 wm_tbi_set_linkled(sc);
3039 } else if (icr & ICR_RXCFG) {
3040 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3041 device_xname(sc->sc_dev)));
3042 sc->sc_tbi_nrxcfg++;
3043 wm_check_for_link(sc);
3044 } else if (icr & ICR_RXSEQ) {
3045 DPRINTF(WM_DEBUG_LINK,
3046 ("%s: LINK: Receive sequence error\n",
3047 device_xname(sc->sc_dev)));
3048 }
3049 }
3050
3051 /*
3052 * wm_linkintr:
3053 *
3054 * Helper; handle link interrupts.
3055 */
3056 static void
3057 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3058 {
3059
3060 if (sc->sc_flags & WM_F_HAS_MII)
3061 wm_linkintr_gmii(sc, icr);
3062 else
3063 wm_linkintr_tbi(sc, icr);
3064 }
3065
3066 /*
3067 * wm_tick:
3068 *
3069 * One second timer, used to check link status, sweep up
3070 * completed transmit jobs, etc.
3071 */
3072 static void
3073 wm_tick(void *arg)
3074 {
3075 struct wm_softc *sc = arg;
3076 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3077 int s;
3078
3079 s = splnet();
3080
3081 if (sc->sc_type >= WM_T_82542_2_1) {
3082 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3083 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3084 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3085 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3086 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3087 }
3088
3089 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3090 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3091
3092 if (sc->sc_flags & WM_F_HAS_MII)
3093 mii_tick(&sc->sc_mii);
3094 else
3095 wm_tbi_check_link(sc);
3096
3097 splx(s);
3098
3099 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3100 }
3101
3102 /*
3103 * wm_reset:
3104 *
3105 * Reset the i82542 chip.
3106 */
3107 static void
3108 wm_reset(struct wm_softc *sc)
3109 {
3110 int phy_reset = 0;
3111 uint32_t reg, func, mask;
3112 int i;
3113
3114 /*
3115 * Allocate on-chip memory according to the MTU size.
3116 * The Packet Buffer Allocation register must be written
3117 * before the chip is reset.
3118 */
3119 switch (sc->sc_type) {
3120 case WM_T_82547:
3121 case WM_T_82547_2:
3122 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3123 PBA_22K : PBA_30K;
3124 sc->sc_txfifo_head = 0;
3125 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3126 sc->sc_txfifo_size =
3127 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3128 sc->sc_txfifo_stall = 0;
3129 break;
3130 case WM_T_82571:
3131 case WM_T_82572:
3132 case WM_T_80003:
3133 sc->sc_pba = PBA_32K;
3134 break;
3135 case WM_T_82573:
3136 sc->sc_pba = PBA_12K;
3137 break;
3138 case WM_T_82574:
3139 case WM_T_82583:
3140 sc->sc_pba = PBA_20K;
3141 break;
3142 case WM_T_ICH8:
3143 sc->sc_pba = PBA_8K;
3144 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3145 break;
3146 case WM_T_ICH9:
3147 case WM_T_ICH10:
3148 case WM_T_PCH:
3149 sc->sc_pba = PBA_10K;
3150 break;
3151 default:
3152 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3153 PBA_40K : PBA_48K;
3154 break;
3155 }
3156 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3157
3158 if (sc->sc_flags & WM_F_PCIE) {
3159 int timeout = 800;
3160
3161 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3162 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3163
3164 while (timeout--) {
3165 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3166 break;
3167 delay(100);
3168 }
3169 }
3170
3171 /* clear interrupt */
3172 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3173
3174 /* Stop the transmit and receive processes. */
3175 CSR_WRITE(sc, WMREG_RCTL, 0);
3176 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3177
3178 /* set_tbi_sbp_82543() */
3179
3180 delay(10*1000);
3181
3182 /* Must acquire the MDIO ownership before MAC reset */
3183 switch(sc->sc_type) {
3184 case WM_T_82573:
3185 case WM_T_82574:
3186 case WM_T_82583:
3187 i = 0;
3188 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3189 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3190 do {
3191 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3192 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3193 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3194 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3195 break;
3196 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3197 delay(2*1000);
3198 i++;
3199 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3200 break;
3201 default:
3202 break;
3203 }
3204
3205 /*
3206 * 82541 Errata 29? & 82547 Errata 28?
3207 * See also the description about PHY_RST bit in CTRL register
3208 * in 8254x_GBe_SDM.pdf.
3209 */
3210 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3211 CSR_WRITE(sc, WMREG_CTRL,
3212 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3213 delay(5000);
3214 }
3215
3216 switch (sc->sc_type) {
3217 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3218 case WM_T_82541:
3219 case WM_T_82541_2:
3220 case WM_T_82547:
3221 case WM_T_82547_2:
3222 /*
3223 * On some chipsets, a reset through a memory-mapped write
3224 * cycle can cause the chip to reset before completing the
3225 * write cycle. This causes major headache that can be
3226 * avoided by issuing the reset via indirect register writes
3227 * through I/O space.
3228 *
3229 * So, if we successfully mapped the I/O BAR at attach time,
3230 * use that. Otherwise, try our luck with a memory-mapped
3231 * reset.
3232 */
3233 if (sc->sc_flags & WM_F_IOH_VALID)
3234 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3235 else
3236 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3237 break;
3238 case WM_T_82545_3:
3239 case WM_T_82546_3:
3240 /* Use the shadow control register on these chips. */
3241 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3242 break;
3243 case WM_T_80003:
3244 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3245 mask = func ? SWFW_PHY1_SM : SWFW_PHY0_SM;
3246 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3247 wm_get_swfw_semaphore(sc, mask);
3248 CSR_WRITE(sc, WMREG_CTRL, reg);
3249 wm_put_swfw_semaphore(sc, mask);
3250 break;
3251 case WM_T_ICH8:
3252 case WM_T_ICH9:
3253 case WM_T_ICH10:
3254 case WM_T_PCH:
3255 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3256 if (wm_check_reset_block(sc) == 0) {
3257 if (sc->sc_type >= WM_T_PCH) {
3258 uint32_t status;
3259
3260 status = CSR_READ(sc, WMREG_STATUS);
3261 CSR_WRITE(sc, WMREG_STATUS,
3262 status & ~STATUS_PHYRA);
3263 }
3264
3265 reg |= CTRL_PHY_RESET;
3266 phy_reset = 1;
3267 }
3268 wm_get_swfwhw_semaphore(sc);
3269 CSR_WRITE(sc, WMREG_CTRL, reg);
3270 delay(20*1000);
3271 wm_put_swfwhw_semaphore(sc);
3272 break;
3273 case WM_T_82542_2_0:
3274 case WM_T_82542_2_1:
3275 case WM_T_82543:
3276 case WM_T_82540:
3277 case WM_T_82545:
3278 case WM_T_82546:
3279 case WM_T_82571:
3280 case WM_T_82572:
3281 case WM_T_82573:
3282 case WM_T_82574:
3283 case WM_T_82583:
3284 default:
3285 /* Everything else can safely use the documented method. */
3286 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3287 break;
3288 }
3289
3290 if (phy_reset != 0)
3291 wm_get_cfg_done(sc);
3292
3293 /* reload EEPROM */
3294 switch(sc->sc_type) {
3295 case WM_T_82542_2_0:
3296 case WM_T_82542_2_1:
3297 case WM_T_82543:
3298 case WM_T_82544:
3299 delay(10);
3300 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3301 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3302 delay(2000);
3303 break;
3304 case WM_T_82540:
3305 case WM_T_82545:
3306 case WM_T_82545_3:
3307 case WM_T_82546:
3308 case WM_T_82546_3:
3309 delay(5*1000);
3310 /* XXX Disable HW ARPs on ASF enabled adapters */
3311 break;
3312 case WM_T_82541:
3313 case WM_T_82541_2:
3314 case WM_T_82547:
3315 case WM_T_82547_2:
3316 delay(20000);
3317 /* XXX Disable HW ARPs on ASF enabled adapters */
3318 break;
3319 case WM_T_82571:
3320 case WM_T_82572:
3321 case WM_T_82573:
3322 case WM_T_82574:
3323 case WM_T_82583:
3324 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3325 delay(10);
3326 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3327 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3328 }
3329 /* check EECD_EE_AUTORD */
3330 wm_get_auto_rd_done(sc);
3331 /*
3332 * Phy configuration from NVM just starts after EECD_AUTO_RD
3333 * is set.
3334 */
3335 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3336 || (sc->sc_type == WM_T_82583))
3337 delay(25*1000);
3338 break;
3339 case WM_T_80003:
3340 case WM_T_ICH8:
3341 case WM_T_ICH9:
3342 /* check EECD_EE_AUTORD */
3343 wm_get_auto_rd_done(sc);
3344 break;
3345 case WM_T_ICH10:
3346 case WM_T_PCH:
3347 wm_lan_init_done(sc);
3348 break;
3349 default:
3350 panic("%s: unknown type\n", __func__);
3351 }
3352
3353 /* reload sc_ctrl */
3354 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3355
3356 /* dummy read from WUC */
3357 if (sc->sc_type == WM_T_PCH)
3358 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3359 /*
3360 * For PCH, this write will make sure that any noise will be detected
3361 * as a CRC error and be dropped rather than show up as a bad packet
3362 * to the DMA engine
3363 */
3364 if (sc->sc_type == WM_T_PCH)
3365 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3366
3367 #if 0
3368 for (i = 0; i < 1000; i++) {
3369 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3370 return;
3371 }
3372 delay(20);
3373 }
3374
3375 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3376 log(LOG_ERR, "%s: reset failed to complete\n",
3377 device_xname(sc->sc_dev));
3378 #endif
3379 }
3380
3381 /*
3382 * wm_init: [ifnet interface function]
3383 *
3384 * Initialize the interface. Must be called at splnet().
3385 */
3386 static int
3387 wm_init(struct ifnet *ifp)
3388 {
3389 struct wm_softc *sc = ifp->if_softc;
3390 struct wm_rxsoft *rxs;
3391 int i, error = 0;
3392 uint32_t reg;
3393
3394 /*
3395 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3396 * There is a small but measurable benefit to avoiding the adjusment
3397 * of the descriptor so that the headers are aligned, for normal mtu,
3398 * on such platforms. One possibility is that the DMA itself is
3399 * slightly more efficient if the front of the entire packet (instead
3400 * of the front of the headers) is aligned.
3401 *
3402 * Note we must always set align_tweak to 0 if we are using
3403 * jumbo frames.
3404 */
3405 #ifdef __NO_STRICT_ALIGNMENT
3406 sc->sc_align_tweak = 0;
3407 #else
3408 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3409 sc->sc_align_tweak = 0;
3410 else
3411 sc->sc_align_tweak = 2;
3412 #endif /* __NO_STRICT_ALIGNMENT */
3413
3414 /* Cancel any pending I/O. */
3415 wm_stop(ifp, 0);
3416
3417 /* update statistics before reset */
3418 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3419 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3420
3421 /* Reset the chip to a known state. */
3422 wm_reset(sc);
3423
3424 switch (sc->sc_type) {
3425 case WM_T_82571:
3426 case WM_T_82572:
3427 case WM_T_82573:
3428 case WM_T_82574:
3429 case WM_T_82583:
3430 case WM_T_80003:
3431 case WM_T_ICH8:
3432 case WM_T_ICH9:
3433 case WM_T_ICH10:
3434 case WM_T_PCH:
3435 if (wm_check_mng_mode(sc) != 0)
3436 wm_get_hw_control(sc);
3437 break;
3438 default:
3439 break;
3440 }
3441
3442 /* Reset the PHY. */
3443 if (sc->sc_flags & WM_F_HAS_MII)
3444 wm_gmii_reset(sc);
3445
3446 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3447 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3448 if (sc->sc_type == WM_T_PCH)
3449 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3450
3451 /* Initialize the transmit descriptor ring. */
3452 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3453 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3454 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3455 sc->sc_txfree = WM_NTXDESC(sc);
3456 sc->sc_txnext = 0;
3457
3458 if (sc->sc_type < WM_T_82543) {
3459 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3460 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3461 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3462 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3463 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3464 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3465 } else {
3466 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3467 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3468 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3469 CSR_WRITE(sc, WMREG_TDH, 0);
3470 CSR_WRITE(sc, WMREG_TDT, 0);
3471 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3472 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3473
3474 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3475 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3476 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3477 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3478 }
3479 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3480 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3481
3482 /* Initialize the transmit job descriptors. */
3483 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3484 sc->sc_txsoft[i].txs_mbuf = NULL;
3485 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3486 sc->sc_txsnext = 0;
3487 sc->sc_txsdirty = 0;
3488
3489 /*
3490 * Initialize the receive descriptor and receive job
3491 * descriptor rings.
3492 */
3493 if (sc->sc_type < WM_T_82543) {
3494 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3495 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3496 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3497 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3498 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3499 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3500
3501 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3502 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3503 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3504 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3505 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3506 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3507 } else {
3508 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3509 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3510 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3511 CSR_WRITE(sc, WMREG_RDH, 0);
3512 CSR_WRITE(sc, WMREG_RDT, 0);
3513 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3514 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3515 }
3516 for (i = 0; i < WM_NRXDESC; i++) {
3517 rxs = &sc->sc_rxsoft[i];
3518 if (rxs->rxs_mbuf == NULL) {
3519 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3520 log(LOG_ERR, "%s: unable to allocate or map rx "
3521 "buffer %d, error = %d\n",
3522 device_xname(sc->sc_dev), i, error);
3523 /*
3524 * XXX Should attempt to run with fewer receive
3525 * XXX buffers instead of just failing.
3526 */
3527 wm_rxdrain(sc);
3528 goto out;
3529 }
3530 } else
3531 WM_INIT_RXDESC(sc, i);
3532 }
3533 sc->sc_rxptr = 0;
3534 sc->sc_rxdiscard = 0;
3535 WM_RXCHAIN_RESET(sc);
3536
3537 /*
3538 * Clear out the VLAN table -- we don't use it (yet).
3539 */
3540 CSR_WRITE(sc, WMREG_VET, 0);
3541 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3542 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3543
3544 /*
3545 * Set up flow-control parameters.
3546 *
3547 * XXX Values could probably stand some tuning.
3548 */
3549 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3550 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3551 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3552 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3553 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3554 }
3555
3556 sc->sc_fcrtl = FCRTL_DFLT;
3557 if (sc->sc_type < WM_T_82543) {
3558 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3559 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3560 } else {
3561 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3562 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3563 }
3564
3565 if (sc->sc_type == WM_T_80003)
3566 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3567 else
3568 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3569
3570 /* Deal with VLAN enables. */
3571 if (VLAN_ATTACHED(&sc->sc_ethercom))
3572 sc->sc_ctrl |= CTRL_VME;
3573 else
3574 sc->sc_ctrl &= ~CTRL_VME;
3575
3576 /* Write the control registers. */
3577 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3578
3579 if (sc->sc_flags & WM_F_HAS_MII) {
3580 int val;
3581
3582 switch (sc->sc_type) {
3583 case WM_T_80003:
3584 case WM_T_ICH8:
3585 case WM_T_ICH9:
3586 case WM_T_ICH10:
3587 case WM_T_PCH:
3588 /*
3589 * Set the mac to wait the maximum time between each
3590 * iteration and increase the max iterations when
3591 * polling the phy; this fixes erroneous timeouts at
3592 * 10Mbps.
3593 */
3594 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3595 0xFFFF);
3596 val = wm_kmrn_readreg(sc,
3597 KUMCTRLSTA_OFFSET_INB_PARAM);
3598 val |= 0x3F;
3599 wm_kmrn_writereg(sc,
3600 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3601 break;
3602 default:
3603 break;
3604 }
3605
3606 if (sc->sc_type == WM_T_80003) {
3607 val = CSR_READ(sc, WMREG_CTRL_EXT);
3608 val &= ~CTRL_EXT_LINK_MODE_MASK;
3609 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3610
3611 /* Bypass RX and TX FIFO's */
3612 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3613 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3614 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3615
3616 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3617 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3618 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3619 }
3620 }
3621 #if 0
3622 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3623 #endif
3624
3625 /*
3626 * Set up checksum offload parameters.
3627 */
3628 reg = CSR_READ(sc, WMREG_RXCSUM);
3629 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3630 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3631 reg |= RXCSUM_IPOFL;
3632 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3633 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3634 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3635 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3636 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3637
3638 /* Reset TBI's RXCFG count */
3639 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3640
3641 /*
3642 * Set up the interrupt registers.
3643 */
3644 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3645 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3646 ICR_RXO | ICR_RXT0;
3647 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3648 sc->sc_icr |= ICR_RXCFG;
3649 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3650
3651 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3652 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
3653 reg = CSR_READ(sc, WMREG_KABGTXD);
3654 reg |= KABGTXD_BGSQLBIAS;
3655 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3656 }
3657
3658 /* Set up the inter-packet gap. */
3659 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3660
3661 if (sc->sc_type >= WM_T_82543) {
3662 /*
3663 * Set up the interrupt throttling register (units of 256ns)
3664 * Note that a footnote in Intel's documentation says this
3665 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3666 * or 10Mbit mode. Empirically, it appears to be the case
3667 * that that is also true for the 1024ns units of the other
3668 * interrupt-related timer registers -- so, really, we ought
3669 * to divide this value by 4 when the link speed is low.
3670 *
3671 * XXX implement this division at link speed change!
3672 */
3673
3674 /*
3675 * For N interrupts/sec, set this value to:
3676 * 1000000000 / (N * 256). Note that we set the
3677 * absolute and packet timer values to this value
3678 * divided by 4 to get "simple timer" behavior.
3679 */
3680
3681 sc->sc_itr = 1500; /* 2604 ints/sec */
3682 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3683 }
3684
3685 /* Set the VLAN ethernetype. */
3686 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3687
3688 /*
3689 * Set up the transmit control register; we start out with
3690 * a collision distance suitable for FDX, but update it whe
3691 * we resolve the media type.
3692 */
3693 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3694 | TCTL_CT(TX_COLLISION_THRESHOLD)
3695 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3696 if (sc->sc_type >= WM_T_82571)
3697 sc->sc_tctl |= TCTL_MULR;
3698 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3699
3700 if (sc->sc_type == WM_T_80003) {
3701 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3702 reg &= ~TCTL_EXT_GCEX_MASK;
3703 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3704 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3705 }
3706
3707 /* Set the media. */
3708 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3709 goto out;
3710
3711 /*
3712 * Set up the receive control register; we actually program
3713 * the register when we set the receive filter. Use multicast
3714 * address offset type 0.
3715 *
3716 * Only the i82544 has the ability to strip the incoming
3717 * CRC, so we don't enable that feature.
3718 */
3719 sc->sc_mchash_type = 0;
3720 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3721 | RCTL_MO(sc->sc_mchash_type);
3722
3723 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3724 && (ifp->if_mtu > ETHERMTU))
3725 sc->sc_rctl |= RCTL_LPE;
3726
3727 if (MCLBYTES == 2048) {
3728 sc->sc_rctl |= RCTL_2k;
3729 } else {
3730 if (sc->sc_type >= WM_T_82543) {
3731 switch(MCLBYTES) {
3732 case 4096:
3733 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3734 break;
3735 case 8192:
3736 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3737 break;
3738 case 16384:
3739 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3740 break;
3741 default:
3742 panic("wm_init: MCLBYTES %d unsupported",
3743 MCLBYTES);
3744 break;
3745 }
3746 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3747 }
3748
3749 /* Set the receive filter. */
3750 wm_set_filter(sc);
3751
3752 /* Start the one second link check clock. */
3753 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3754
3755 /* ...all done! */
3756 ifp->if_flags |= IFF_RUNNING;
3757 ifp->if_flags &= ~IFF_OACTIVE;
3758
3759 out:
3760 if (error)
3761 log(LOG_ERR, "%s: interface not running\n",
3762 device_xname(sc->sc_dev));
3763 return (error);
3764 }
3765
3766 /*
3767 * wm_rxdrain:
3768 *
3769 * Drain the receive queue.
3770 */
3771 static void
3772 wm_rxdrain(struct wm_softc *sc)
3773 {
3774 struct wm_rxsoft *rxs;
3775 int i;
3776
3777 for (i = 0; i < WM_NRXDESC; i++) {
3778 rxs = &sc->sc_rxsoft[i];
3779 if (rxs->rxs_mbuf != NULL) {
3780 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3781 m_freem(rxs->rxs_mbuf);
3782 rxs->rxs_mbuf = NULL;
3783 }
3784 }
3785 }
3786
3787 /*
3788 * wm_stop: [ifnet interface function]
3789 *
3790 * Stop transmission on the interface.
3791 */
3792 static void
3793 wm_stop(struct ifnet *ifp, int disable)
3794 {
3795 struct wm_softc *sc = ifp->if_softc;
3796 struct wm_txsoft *txs;
3797 int i;
3798
3799 /* Stop the one second clock. */
3800 callout_stop(&sc->sc_tick_ch);
3801
3802 /* Stop the 82547 Tx FIFO stall check timer. */
3803 if (sc->sc_type == WM_T_82547)
3804 callout_stop(&sc->sc_txfifo_ch);
3805
3806 if (sc->sc_flags & WM_F_HAS_MII) {
3807 /* Down the MII. */
3808 mii_down(&sc->sc_mii);
3809 } else {
3810 #if 0
3811 /* Should we clear PHY's status properly? */
3812 wm_reset(sc);
3813 #endif
3814 }
3815
3816 /* Stop the transmit and receive processes. */
3817 CSR_WRITE(sc, WMREG_TCTL, 0);
3818 CSR_WRITE(sc, WMREG_RCTL, 0);
3819
3820 /*
3821 * Clear the interrupt mask to ensure the device cannot assert its
3822 * interrupt line.
3823 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3824 * any currently pending or shared interrupt.
3825 */
3826 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3827 sc->sc_icr = 0;
3828
3829 /* Release any queued transmit buffers. */
3830 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3831 txs = &sc->sc_txsoft[i];
3832 if (txs->txs_mbuf != NULL) {
3833 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3834 m_freem(txs->txs_mbuf);
3835 txs->txs_mbuf = NULL;
3836 }
3837 }
3838
3839 /* Mark the interface as down and cancel the watchdog timer. */
3840 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3841 ifp->if_timer = 0;
3842
3843 if (disable)
3844 wm_rxdrain(sc);
3845 }
3846
3847 void
3848 wm_get_auto_rd_done(struct wm_softc *sc)
3849 {
3850 int i;
3851
3852 /* wait for eeprom to reload */
3853 switch (sc->sc_type) {
3854 case WM_T_82571:
3855 case WM_T_82572:
3856 case WM_T_82573:
3857 case WM_T_82574:
3858 case WM_T_82583:
3859 case WM_T_80003:
3860 case WM_T_ICH8:
3861 case WM_T_ICH9:
3862 for (i = 0; i < 10; i++) {
3863 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3864 break;
3865 delay(1000);
3866 }
3867 if (i == 10) {
3868 log(LOG_ERR, "%s: auto read from eeprom failed to "
3869 "complete\n", device_xname(sc->sc_dev));
3870 }
3871 break;
3872 default:
3873 break;
3874 }
3875 }
3876
3877 void
3878 wm_lan_init_done(struct wm_softc *sc)
3879 {
3880 uint32_t reg = 0;
3881 int i;
3882
3883 /* wait for eeprom to reload */
3884 switch (sc->sc_type) {
3885 case WM_T_ICH10:
3886 case WM_T_PCH:
3887 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3888 reg = CSR_READ(sc, WMREG_STATUS);
3889 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3890 break;
3891 delay(100);
3892 }
3893 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3894 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3895 "complete\n", device_xname(sc->sc_dev), __func__);
3896 }
3897 break;
3898 default:
3899 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3900 __func__);
3901 break;
3902 }
3903
3904 reg &= ~STATUS_LAN_INIT_DONE;
3905 CSR_WRITE(sc, WMREG_STATUS, reg);
3906 }
3907
3908 void
3909 wm_get_cfg_done(struct wm_softc *sc)
3910 {
3911 int func = 0;
3912 int mask;
3913 uint32_t reg;
3914 int i;
3915
3916 /* wait for eeprom to reload */
3917 switch (sc->sc_type) {
3918 case WM_T_82542_2_0:
3919 case WM_T_82542_2_1:
3920 /* null */
3921 break;
3922 case WM_T_82543:
3923 case WM_T_82544:
3924 case WM_T_82540:
3925 case WM_T_82545:
3926 case WM_T_82545_3:
3927 case WM_T_82546:
3928 case WM_T_82546_3:
3929 case WM_T_82541:
3930 case WM_T_82541_2:
3931 case WM_T_82547:
3932 case WM_T_82547_2:
3933 case WM_T_82573:
3934 case WM_T_82574:
3935 case WM_T_82583:
3936 /* generic */
3937 delay(10*1000);
3938 break;
3939 case WM_T_80003:
3940 case WM_T_82571:
3941 case WM_T_82572:
3942 if (sc->sc_type == WM_T_80003)
3943 func = (CSR_READ(sc, WMREG_STATUS)
3944 >> STATUS_FUNCID_SHIFT) & 1;
3945 else
3946 func = 0; /* XXX Is it true for 82571? */
3947 mask = (func == 1) ? EEMNGCTL_CFGDONE_1 : EEMNGCTL_CFGDONE_0;
3948 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3949 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3950 break;
3951 delay(1000);
3952 }
3953 if (i >= WM_PHY_CFG_TIMEOUT) {
3954 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3955 device_xname(sc->sc_dev), __func__));
3956 }
3957 break;
3958 case WM_T_ICH8:
3959 case WM_T_ICH9:
3960 case WM_T_ICH10:
3961 case WM_T_PCH:
3962 if (sc->sc_type >= WM_T_PCH) {
3963 reg = CSR_READ(sc, WMREG_STATUS);
3964 if ((reg & STATUS_PHYRA) != 0)
3965 CSR_WRITE(sc, WMREG_STATUS,
3966 reg & ~STATUS_PHYRA);
3967 }
3968 delay(10*1000);
3969 break;
3970 default:
3971 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3972 __func__);
3973 break;
3974 }
3975 }
3976
3977 /*
3978 * wm_acquire_eeprom:
3979 *
3980 * Perform the EEPROM handshake required on some chips.
3981 */
3982 static int
3983 wm_acquire_eeprom(struct wm_softc *sc)
3984 {
3985 uint32_t reg;
3986 int x;
3987 int ret = 0;
3988
3989 /* always success */
3990 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3991 return 0;
3992
3993 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3994 ret = wm_get_swfwhw_semaphore(sc);
3995 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3996 /* this will also do wm_get_swsm_semaphore() if needed */
3997 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3998 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3999 ret = wm_get_swsm_semaphore(sc);
4000 }
4001
4002 if (ret) {
4003 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4004 __func__);
4005 return 1;
4006 }
4007
4008 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4009 reg = CSR_READ(sc, WMREG_EECD);
4010
4011 /* Request EEPROM access. */
4012 reg |= EECD_EE_REQ;
4013 CSR_WRITE(sc, WMREG_EECD, reg);
4014
4015 /* ..and wait for it to be granted. */
4016 for (x = 0; x < 1000; x++) {
4017 reg = CSR_READ(sc, WMREG_EECD);
4018 if (reg & EECD_EE_GNT)
4019 break;
4020 delay(5);
4021 }
4022 if ((reg & EECD_EE_GNT) == 0) {
4023 aprint_error_dev(sc->sc_dev,
4024 "could not acquire EEPROM GNT\n");
4025 reg &= ~EECD_EE_REQ;
4026 CSR_WRITE(sc, WMREG_EECD, reg);
4027 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4028 wm_put_swfwhw_semaphore(sc);
4029 if (sc->sc_flags & WM_F_SWFW_SYNC)
4030 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4031 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4032 wm_put_swsm_semaphore(sc);
4033 return (1);
4034 }
4035 }
4036
4037 return (0);
4038 }
4039
4040 /*
4041 * wm_release_eeprom:
4042 *
4043 * Release the EEPROM mutex.
4044 */
4045 static void
4046 wm_release_eeprom(struct wm_softc *sc)
4047 {
4048 uint32_t reg;
4049
4050 /* always success */
4051 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4052 return;
4053
4054 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4055 reg = CSR_READ(sc, WMREG_EECD);
4056 reg &= ~EECD_EE_REQ;
4057 CSR_WRITE(sc, WMREG_EECD, reg);
4058 }
4059
4060 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4061 wm_put_swfwhw_semaphore(sc);
4062 if (sc->sc_flags & WM_F_SWFW_SYNC)
4063 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4064 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4065 wm_put_swsm_semaphore(sc);
4066 }
4067
4068 /*
4069 * wm_eeprom_sendbits:
4070 *
4071 * Send a series of bits to the EEPROM.
4072 */
4073 static void
4074 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4075 {
4076 uint32_t reg;
4077 int x;
4078
4079 reg = CSR_READ(sc, WMREG_EECD);
4080
4081 for (x = nbits; x > 0; x--) {
4082 if (bits & (1U << (x - 1)))
4083 reg |= EECD_DI;
4084 else
4085 reg &= ~EECD_DI;
4086 CSR_WRITE(sc, WMREG_EECD, reg);
4087 delay(2);
4088 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4089 delay(2);
4090 CSR_WRITE(sc, WMREG_EECD, reg);
4091 delay(2);
4092 }
4093 }
4094
4095 /*
4096 * wm_eeprom_recvbits:
4097 *
4098 * Receive a series of bits from the EEPROM.
4099 */
4100 static void
4101 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4102 {
4103 uint32_t reg, val;
4104 int x;
4105
4106 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4107
4108 val = 0;
4109 for (x = nbits; x > 0; x--) {
4110 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4111 delay(2);
4112 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4113 val |= (1U << (x - 1));
4114 CSR_WRITE(sc, WMREG_EECD, reg);
4115 delay(2);
4116 }
4117 *valp = val;
4118 }
4119
4120 /*
4121 * wm_read_eeprom_uwire:
4122 *
4123 * Read a word from the EEPROM using the MicroWire protocol.
4124 */
4125 static int
4126 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4127 {
4128 uint32_t reg, val;
4129 int i;
4130
4131 for (i = 0; i < wordcnt; i++) {
4132 /* Clear SK and DI. */
4133 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4134 CSR_WRITE(sc, WMREG_EECD, reg);
4135
4136 /* Set CHIP SELECT. */
4137 reg |= EECD_CS;
4138 CSR_WRITE(sc, WMREG_EECD, reg);
4139 delay(2);
4140
4141 /* Shift in the READ command. */
4142 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4143
4144 /* Shift in address. */
4145 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4146
4147 /* Shift out the data. */
4148 wm_eeprom_recvbits(sc, &val, 16);
4149 data[i] = val & 0xffff;
4150
4151 /* Clear CHIP SELECT. */
4152 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4153 CSR_WRITE(sc, WMREG_EECD, reg);
4154 delay(2);
4155 }
4156
4157 return (0);
4158 }
4159
4160 /*
4161 * wm_spi_eeprom_ready:
4162 *
4163 * Wait for a SPI EEPROM to be ready for commands.
4164 */
4165 static int
4166 wm_spi_eeprom_ready(struct wm_softc *sc)
4167 {
4168 uint32_t val;
4169 int usec;
4170
4171 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4172 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4173 wm_eeprom_recvbits(sc, &val, 8);
4174 if ((val & SPI_SR_RDY) == 0)
4175 break;
4176 }
4177 if (usec >= SPI_MAX_RETRIES) {
4178 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4179 return (1);
4180 }
4181 return (0);
4182 }
4183
4184 /*
4185 * wm_read_eeprom_spi:
4186 *
4187 * Read a work from the EEPROM using the SPI protocol.
4188 */
4189 static int
4190 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4191 {
4192 uint32_t reg, val;
4193 int i;
4194 uint8_t opc;
4195
4196 /* Clear SK and CS. */
4197 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4198 CSR_WRITE(sc, WMREG_EECD, reg);
4199 delay(2);
4200
4201 if (wm_spi_eeprom_ready(sc))
4202 return (1);
4203
4204 /* Toggle CS to flush commands. */
4205 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4206 delay(2);
4207 CSR_WRITE(sc, WMREG_EECD, reg);
4208 delay(2);
4209
4210 opc = SPI_OPC_READ;
4211 if (sc->sc_ee_addrbits == 8 && word >= 128)
4212 opc |= SPI_OPC_A8;
4213
4214 wm_eeprom_sendbits(sc, opc, 8);
4215 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4216
4217 for (i = 0; i < wordcnt; i++) {
4218 wm_eeprom_recvbits(sc, &val, 16);
4219 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4220 }
4221
4222 /* Raise CS and clear SK. */
4223 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4224 CSR_WRITE(sc, WMREG_EECD, reg);
4225 delay(2);
4226
4227 return (0);
4228 }
4229
4230 #define EEPROM_CHECKSUM 0xBABA
4231 #define EEPROM_SIZE 0x0040
4232
4233 /*
4234 * wm_validate_eeprom_checksum
4235 *
4236 * The checksum is defined as the sum of the first 64 (16 bit) words.
4237 */
4238 static int
4239 wm_validate_eeprom_checksum(struct wm_softc *sc)
4240 {
4241 uint16_t checksum;
4242 uint16_t eeprom_data;
4243 int i;
4244
4245 checksum = 0;
4246
4247 for (i = 0; i < EEPROM_SIZE; i++) {
4248 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4249 return 1;
4250 checksum += eeprom_data;
4251 }
4252
4253 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4254 return 1;
4255
4256 return 0;
4257 }
4258
4259 /*
4260 * wm_read_eeprom:
4261 *
4262 * Read data from the serial EEPROM.
4263 */
4264 static int
4265 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4266 {
4267 int rv;
4268
4269 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4270 return 1;
4271
4272 if (wm_acquire_eeprom(sc))
4273 return 1;
4274
4275 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4276 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4277 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4278 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4279 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4280 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4281 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4282 else
4283 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4284
4285 wm_release_eeprom(sc);
4286 return rv;
4287 }
4288
4289 static int
4290 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4291 uint16_t *data)
4292 {
4293 int i, eerd = 0;
4294 int error = 0;
4295
4296 for (i = 0; i < wordcnt; i++) {
4297 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4298
4299 CSR_WRITE(sc, WMREG_EERD, eerd);
4300 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4301 if (error != 0)
4302 break;
4303
4304 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4305 }
4306
4307 return error;
4308 }
4309
4310 static int
4311 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4312 {
4313 uint32_t attempts = 100000;
4314 uint32_t i, reg = 0;
4315 int32_t done = -1;
4316
4317 for (i = 0; i < attempts; i++) {
4318 reg = CSR_READ(sc, rw);
4319
4320 if (reg & EERD_DONE) {
4321 done = 0;
4322 break;
4323 }
4324 delay(5);
4325 }
4326
4327 return done;
4328 }
4329
4330 /*
4331 * wm_add_rxbuf:
4332 *
4333 * Add a receive buffer to the indiciated descriptor.
4334 */
4335 static int
4336 wm_add_rxbuf(struct wm_softc *sc, int idx)
4337 {
4338 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4339 struct mbuf *m;
4340 int error;
4341
4342 MGETHDR(m, M_DONTWAIT, MT_DATA);
4343 if (m == NULL)
4344 return (ENOBUFS);
4345
4346 MCLGET(m, M_DONTWAIT);
4347 if ((m->m_flags & M_EXT) == 0) {
4348 m_freem(m);
4349 return (ENOBUFS);
4350 }
4351
4352 if (rxs->rxs_mbuf != NULL)
4353 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4354
4355 rxs->rxs_mbuf = m;
4356
4357 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4358 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4359 BUS_DMA_READ|BUS_DMA_NOWAIT);
4360 if (error) {
4361 /* XXX XXX XXX */
4362 aprint_error_dev(sc->sc_dev,
4363 "unable to load rx DMA map %d, error = %d\n",
4364 idx, error);
4365 panic("wm_add_rxbuf");
4366 }
4367
4368 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4369 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4370
4371 WM_INIT_RXDESC(sc, idx);
4372
4373 return (0);
4374 }
4375
4376 /*
4377 * wm_set_ral:
4378 *
4379 * Set an entery in the receive address list.
4380 */
4381 static void
4382 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4383 {
4384 uint32_t ral_lo, ral_hi;
4385
4386 if (enaddr != NULL) {
4387 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4388 (enaddr[3] << 24);
4389 ral_hi = enaddr[4] | (enaddr[5] << 8);
4390 ral_hi |= RAL_AV;
4391 } else {
4392 ral_lo = 0;
4393 ral_hi = 0;
4394 }
4395
4396 if (sc->sc_type >= WM_T_82544) {
4397 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4398 ral_lo);
4399 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4400 ral_hi);
4401 } else {
4402 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4403 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4404 }
4405 }
4406
4407 /*
4408 * wm_mchash:
4409 *
4410 * Compute the hash of the multicast address for the 4096-bit
4411 * multicast filter.
4412 */
4413 static uint32_t
4414 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4415 {
4416 static const int lo_shift[4] = { 4, 3, 2, 0 };
4417 static const int hi_shift[4] = { 4, 5, 6, 8 };
4418 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4419 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4420 uint32_t hash;
4421
4422 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4423 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4424 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4425 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4426 return (hash & 0x3ff);
4427 }
4428 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4429 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4430
4431 return (hash & 0xfff);
4432 }
4433
4434 /*
4435 * wm_set_filter:
4436 *
4437 * Set up the receive filter.
4438 */
4439 static void
4440 wm_set_filter(struct wm_softc *sc)
4441 {
4442 struct ethercom *ec = &sc->sc_ethercom;
4443 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4444 struct ether_multi *enm;
4445 struct ether_multistep step;
4446 bus_addr_t mta_reg;
4447 uint32_t hash, reg, bit;
4448 int i, size;
4449
4450 if (sc->sc_type >= WM_T_82544)
4451 mta_reg = WMREG_CORDOVA_MTA;
4452 else
4453 mta_reg = WMREG_MTA;
4454
4455 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4456
4457 if (ifp->if_flags & IFF_BROADCAST)
4458 sc->sc_rctl |= RCTL_BAM;
4459 if (ifp->if_flags & IFF_PROMISC) {
4460 sc->sc_rctl |= RCTL_UPE;
4461 goto allmulti;
4462 }
4463
4464 /*
4465 * Set the station address in the first RAL slot, and
4466 * clear the remaining slots.
4467 */
4468 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4469 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4470 size = WM_ICH8_RAL_TABSIZE;
4471 else
4472 size = WM_RAL_TABSIZE;
4473 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4474 for (i = 1; i < size; i++)
4475 wm_set_ral(sc, NULL, i);
4476
4477 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4478 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4479 size = WM_ICH8_MC_TABSIZE;
4480 else
4481 size = WM_MC_TABSIZE;
4482 /* Clear out the multicast table. */
4483 for (i = 0; i < size; i++)
4484 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4485
4486 ETHER_FIRST_MULTI(step, ec, enm);
4487 while (enm != NULL) {
4488 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4489 /*
4490 * We must listen to a range of multicast addresses.
4491 * For now, just accept all multicasts, rather than
4492 * trying to set only those filter bits needed to match
4493 * the range. (At this time, the only use of address
4494 * ranges is for IP multicast routing, for which the
4495 * range is big enough to require all bits set.)
4496 */
4497 goto allmulti;
4498 }
4499
4500 hash = wm_mchash(sc, enm->enm_addrlo);
4501
4502 reg = (hash >> 5);
4503 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4504 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4505 reg &= 0x1f;
4506 else
4507 reg &= 0x7f;
4508 bit = hash & 0x1f;
4509
4510 hash = CSR_READ(sc, mta_reg + (reg << 2));
4511 hash |= 1U << bit;
4512
4513 /* XXX Hardware bug?? */
4514 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4515 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4516 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4517 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4518 } else
4519 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4520
4521 ETHER_NEXT_MULTI(step, enm);
4522 }
4523
4524 ifp->if_flags &= ~IFF_ALLMULTI;
4525 goto setit;
4526
4527 allmulti:
4528 ifp->if_flags |= IFF_ALLMULTI;
4529 sc->sc_rctl |= RCTL_MPE;
4530
4531 setit:
4532 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4533 }
4534
4535 /*
4536 * wm_tbi_mediainit:
4537 *
4538 * Initialize media for use on 1000BASE-X devices.
4539 */
4540 static void
4541 wm_tbi_mediainit(struct wm_softc *sc)
4542 {
4543 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4544 const char *sep = "";
4545
4546 if (sc->sc_type < WM_T_82543)
4547 sc->sc_tipg = TIPG_WM_DFLT;
4548 else
4549 sc->sc_tipg = TIPG_LG_DFLT;
4550
4551 sc->sc_tbi_anegticks = 5;
4552
4553 /* Initialize our media structures */
4554 sc->sc_mii.mii_ifp = ifp;
4555
4556 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4557 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4558 wm_tbi_mediastatus);
4559
4560 /*
4561 * SWD Pins:
4562 *
4563 * 0 = Link LED (output)
4564 * 1 = Loss Of Signal (input)
4565 */
4566 sc->sc_ctrl |= CTRL_SWDPIO(0);
4567 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4568
4569 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4570
4571 #define ADD(ss, mm, dd) \
4572 do { \
4573 aprint_normal("%s%s", sep, ss); \
4574 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4575 sep = ", "; \
4576 } while (/*CONSTCOND*/0)
4577
4578 aprint_normal_dev(sc->sc_dev, "");
4579 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4580 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4581 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4582 aprint_normal("\n");
4583
4584 #undef ADD
4585
4586 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4587 }
4588
4589 /*
4590 * wm_tbi_mediastatus: [ifmedia interface function]
4591 *
4592 * Get the current interface media status on a 1000BASE-X device.
4593 */
4594 static void
4595 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4596 {
4597 struct wm_softc *sc = ifp->if_softc;
4598 uint32_t ctrl, status;
4599
4600 ifmr->ifm_status = IFM_AVALID;
4601 ifmr->ifm_active = IFM_ETHER;
4602
4603 status = CSR_READ(sc, WMREG_STATUS);
4604 if ((status & STATUS_LU) == 0) {
4605 ifmr->ifm_active |= IFM_NONE;
4606 return;
4607 }
4608
4609 ifmr->ifm_status |= IFM_ACTIVE;
4610 ifmr->ifm_active |= IFM_1000_SX;
4611 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4612 ifmr->ifm_active |= IFM_FDX;
4613 ctrl = CSR_READ(sc, WMREG_CTRL);
4614 if (ctrl & CTRL_RFCE)
4615 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4616 if (ctrl & CTRL_TFCE)
4617 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4618 }
4619
4620 /*
4621 * wm_tbi_mediachange: [ifmedia interface function]
4622 *
4623 * Set hardware to newly-selected media on a 1000BASE-X device.
4624 */
4625 static int
4626 wm_tbi_mediachange(struct ifnet *ifp)
4627 {
4628 struct wm_softc *sc = ifp->if_softc;
4629 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4630 uint32_t status;
4631 int i;
4632
4633 sc->sc_txcw = 0;
4634 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4635 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4636 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4637 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4638 sc->sc_txcw |= TXCW_ANE;
4639 } else {
4640 /*
4641 * If autonegotiation is turned off, force link up and turn on
4642 * full duplex
4643 */
4644 sc->sc_txcw &= ~TXCW_ANE;
4645 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4646 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4647 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4648 delay(1000);
4649 }
4650
4651 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4652 device_xname(sc->sc_dev),sc->sc_txcw));
4653 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4654 delay(10000);
4655
4656 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4657 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4658
4659 /*
4660 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4661 * optics detect a signal, 0 if they don't.
4662 */
4663 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4664 /* Have signal; wait for the link to come up. */
4665
4666 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4667 /*
4668 * Reset the link, and let autonegotiation do its thing
4669 */
4670 sc->sc_ctrl |= CTRL_LRST;
4671 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4672 delay(1000);
4673 sc->sc_ctrl &= ~CTRL_LRST;
4674 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4675 delay(1000);
4676 }
4677
4678 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4679 delay(10000);
4680 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4681 break;
4682 }
4683
4684 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4685 device_xname(sc->sc_dev),i));
4686
4687 status = CSR_READ(sc, WMREG_STATUS);
4688 DPRINTF(WM_DEBUG_LINK,
4689 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4690 device_xname(sc->sc_dev),status, STATUS_LU));
4691 if (status & STATUS_LU) {
4692 /* Link is up. */
4693 DPRINTF(WM_DEBUG_LINK,
4694 ("%s: LINK: set media -> link up %s\n",
4695 device_xname(sc->sc_dev),
4696 (status & STATUS_FD) ? "FDX" : "HDX"));
4697
4698 /*
4699 * NOTE: CTRL will update TFCE and RFCE automatically,
4700 * so we should update sc->sc_ctrl
4701 */
4702 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4703 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4704 sc->sc_fcrtl &= ~FCRTL_XONE;
4705 if (status & STATUS_FD)
4706 sc->sc_tctl |=
4707 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4708 else
4709 sc->sc_tctl |=
4710 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4711 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4712 sc->sc_fcrtl |= FCRTL_XONE;
4713 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4714 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4715 WMREG_OLD_FCRTL : WMREG_FCRTL,
4716 sc->sc_fcrtl);
4717 sc->sc_tbi_linkup = 1;
4718 } else {
4719 if (i == WM_LINKUP_TIMEOUT)
4720 wm_check_for_link(sc);
4721 /* Link is down. */
4722 DPRINTF(WM_DEBUG_LINK,
4723 ("%s: LINK: set media -> link down\n",
4724 device_xname(sc->sc_dev)));
4725 sc->sc_tbi_linkup = 0;
4726 }
4727 } else {
4728 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4729 device_xname(sc->sc_dev)));
4730 sc->sc_tbi_linkup = 0;
4731 }
4732
4733 wm_tbi_set_linkled(sc);
4734
4735 return (0);
4736 }
4737
4738 /*
4739 * wm_tbi_set_linkled:
4740 *
4741 * Update the link LED on 1000BASE-X devices.
4742 */
4743 static void
4744 wm_tbi_set_linkled(struct wm_softc *sc)
4745 {
4746
4747 if (sc->sc_tbi_linkup)
4748 sc->sc_ctrl |= CTRL_SWDPIN(0);
4749 else
4750 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4751
4752 /* 82540 or newer devices are active low */
4753 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4754
4755 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4756 }
4757
4758 /*
4759 * wm_tbi_check_link:
4760 *
4761 * Check the link on 1000BASE-X devices.
4762 */
4763 static void
4764 wm_tbi_check_link(struct wm_softc *sc)
4765 {
4766 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4767 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4768 uint32_t rxcw, ctrl, status;
4769
4770 status = CSR_READ(sc, WMREG_STATUS);
4771
4772 rxcw = CSR_READ(sc, WMREG_RXCW);
4773 ctrl = CSR_READ(sc, WMREG_CTRL);
4774
4775 /* set link status */
4776 if ((status & STATUS_LU) == 0) {
4777 DPRINTF(WM_DEBUG_LINK,
4778 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4779 sc->sc_tbi_linkup = 0;
4780 } else if (sc->sc_tbi_linkup == 0) {
4781 DPRINTF(WM_DEBUG_LINK,
4782 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4783 (status & STATUS_FD) ? "FDX" : "HDX"));
4784 sc->sc_tbi_linkup = 1;
4785 }
4786
4787 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4788 && ((status & STATUS_LU) == 0)) {
4789 sc->sc_tbi_linkup = 0;
4790 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4791 /* RXCFG storm! */
4792 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4793 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4794 wm_init(ifp);
4795 wm_start(ifp);
4796 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4797 /* If the timer expired, retry autonegotiation */
4798 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4799 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4800 sc->sc_tbi_ticks = 0;
4801 /*
4802 * Reset the link, and let autonegotiation do
4803 * its thing
4804 */
4805 sc->sc_ctrl |= CTRL_LRST;
4806 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4807 delay(1000);
4808 sc->sc_ctrl &= ~CTRL_LRST;
4809 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4810 delay(1000);
4811 CSR_WRITE(sc, WMREG_TXCW,
4812 sc->sc_txcw & ~TXCW_ANE);
4813 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4814 }
4815 }
4816 }
4817
4818 wm_tbi_set_linkled(sc);
4819 }
4820
4821 /*
4822 * wm_gmii_reset:
4823 *
4824 * Reset the PHY.
4825 */
4826 static void
4827 wm_gmii_reset(struct wm_softc *sc)
4828 {
4829 uint32_t reg;
4830 int func = 0; /* XXX gcc */
4831 int rv;
4832
4833 /* get phy semaphore */
4834 switch (sc->sc_type) {
4835 case WM_T_82571:
4836 case WM_T_82572:
4837 case WM_T_82573:
4838 case WM_T_82574:
4839 case WM_T_82583:
4840 /* XXX should get sw semaphore, too */
4841 rv = wm_get_swsm_semaphore(sc);
4842 break;
4843 case WM_T_80003:
4844 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4845 rv = wm_get_swfw_semaphore(sc,
4846 func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4847 break;
4848 case WM_T_ICH8:
4849 case WM_T_ICH9:
4850 case WM_T_ICH10:
4851 case WM_T_PCH:
4852 rv = wm_get_swfwhw_semaphore(sc);
4853 break;
4854 default:
4855 /* nothing to do*/
4856 rv = 0;
4857 break;
4858 }
4859 if (rv != 0) {
4860 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4861 __func__);
4862 return;
4863 }
4864
4865 switch (sc->sc_type) {
4866 case WM_T_82542_2_0:
4867 case WM_T_82542_2_1:
4868 /* null */
4869 break;
4870 case WM_T_82543:
4871 /*
4872 * With 82543, we need to force speed and duplex on the MAC
4873 * equal to what the PHY speed and duplex configuration is.
4874 * In addition, we need to perform a hardware reset on the PHY
4875 * to take it out of reset.
4876 */
4877 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4878 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4879
4880 /* The PHY reset pin is active-low. */
4881 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4882 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4883 CTRL_EXT_SWDPIN(4));
4884 reg |= CTRL_EXT_SWDPIO(4);
4885
4886 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4887 delay(10*1000);
4888
4889 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4890 delay(150);
4891 #if 0
4892 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4893 #endif
4894 delay(20*1000); /* XXX extra delay to get PHY ID? */
4895 break;
4896 case WM_T_82544: /* reset 10000us */
4897 case WM_T_82540:
4898 case WM_T_82545:
4899 case WM_T_82545_3:
4900 case WM_T_82546:
4901 case WM_T_82546_3:
4902 case WM_T_82541:
4903 case WM_T_82541_2:
4904 case WM_T_82547:
4905 case WM_T_82547_2:
4906 case WM_T_82571: /* reset 100us */
4907 case WM_T_82572:
4908 case WM_T_82573:
4909 case WM_T_82574:
4910 case WM_T_82583:
4911 case WM_T_80003:
4912 /* generic reset */
4913 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4914 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4915 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4916 delay(150);
4917
4918 if ((sc->sc_type == WM_T_82541)
4919 || (sc->sc_type == WM_T_82541_2)
4920 || (sc->sc_type == WM_T_82547)
4921 || (sc->sc_type == WM_T_82547_2)) {
4922 /* workaround for igp are done in igp_reset() */
4923 /* XXX add code to set LED after phy reset */
4924 }
4925 break;
4926 case WM_T_ICH8:
4927 case WM_T_ICH9:
4928 case WM_T_ICH10:
4929 case WM_T_PCH:
4930 /* generic reset */
4931 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4932 delay(100);
4933 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4934 delay(150);
4935 break;
4936 default:
4937 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4938 __func__);
4939 break;
4940 }
4941
4942 /* release PHY semaphore */
4943 switch (sc->sc_type) {
4944 case WM_T_82571:
4945 case WM_T_82572:
4946 case WM_T_82573:
4947 case WM_T_82574:
4948 case WM_T_82583:
4949 /* XXX sould put sw semaphore, too */
4950 wm_put_swsm_semaphore(sc);
4951 break;
4952 case WM_T_80003:
4953 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4954 break;
4955 case WM_T_ICH8:
4956 case WM_T_ICH9:
4957 case WM_T_ICH10:
4958 case WM_T_PCH:
4959 wm_put_swfwhw_semaphore(sc);
4960 break;
4961 default:
4962 /* nothing to do*/
4963 rv = 0;
4964 break;
4965 }
4966
4967 /* get_cfg_done */
4968 wm_get_cfg_done(sc);
4969
4970 /* extra setup */
4971 switch (sc->sc_type) {
4972 case WM_T_82542_2_0:
4973 case WM_T_82542_2_1:
4974 case WM_T_82543:
4975 case WM_T_82544:
4976 case WM_T_82540:
4977 case WM_T_82545:
4978 case WM_T_82545_3:
4979 case WM_T_82546:
4980 case WM_T_82546_3:
4981 case WM_T_82541_2:
4982 case WM_T_82547_2:
4983 case WM_T_82571:
4984 case WM_T_82572:
4985 case WM_T_82573:
4986 case WM_T_82574:
4987 case WM_T_82583:
4988 case WM_T_80003:
4989 /* null */
4990 break;
4991 case WM_T_82541:
4992 case WM_T_82547:
4993 /* XXX Configure actively LED after PHY reset */
4994 break;
4995 case WM_T_ICH8:
4996 case WM_T_ICH9:
4997 case WM_T_ICH10:
4998 case WM_T_PCH:
4999 /* Allow time for h/w to get to a quiescent state afer reset */
5000 delay(10*1000);
5001
5002 if (sc->sc_type == WM_T_PCH) {
5003 wm_hv_phy_workaround_ich8lan(sc);
5004
5005 /*
5006 * dummy read to clear the phy wakeup bit after lcd
5007 * reset
5008 */
5009 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5010 }
5011
5012 /*
5013 * XXX Configure the LCD with th extended configuration region
5014 * in NVM
5015 */
5016
5017 /* Configure the LCD with the OEM bits in NVM */
5018 if (sc->sc_type == WM_T_PCH) {
5019 /*
5020 * Disable LPLU.
5021 * XXX It seems that 82567 has LPLU, too.
5022 */
5023 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5024 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5025 reg |= HV_OEM_BITS_ANEGNOW;
5026 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5027 }
5028 break;
5029 default:
5030 panic("%s: unknown type\n", __func__);
5031 break;
5032 }
5033 }
5034
5035 /*
5036 * wm_gmii_mediainit:
5037 *
5038 * Initialize media for use on 1000BASE-T devices.
5039 */
5040 static void
5041 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5042 {
5043 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5044
5045 /* We have MII. */
5046 sc->sc_flags |= WM_F_HAS_MII;
5047
5048 if (sc->sc_type == WM_T_80003)
5049 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5050 else
5051 sc->sc_tipg = TIPG_1000T_DFLT;
5052
5053 /*
5054 * Let the chip set speed/duplex on its own based on
5055 * signals from the PHY.
5056 * XXXbouyer - I'm not sure this is right for the 80003,
5057 * the em driver only sets CTRL_SLU here - but it seems to work.
5058 */
5059 sc->sc_ctrl |= CTRL_SLU;
5060 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5061
5062 /* Initialize our media structures and probe the GMII. */
5063 sc->sc_mii.mii_ifp = ifp;
5064
5065 switch (prodid) {
5066 case PCI_PRODUCT_INTEL_PCH_M_LM:
5067 case PCI_PRODUCT_INTEL_PCH_M_LC:
5068 /* 82577 */
5069 sc->sc_phytype = WMPHY_82577;
5070 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5071 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5072 break;
5073 case PCI_PRODUCT_INTEL_PCH_D_DM:
5074 case PCI_PRODUCT_INTEL_PCH_D_DC:
5075 /* 82578 */
5076 sc->sc_phytype = WMPHY_82578;
5077 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5078 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5079 break;
5080 case PCI_PRODUCT_INTEL_82801I_BM:
5081 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5082 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5083 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5084 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5085 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5086 /* 82567 */
5087 sc->sc_phytype = WMPHY_BM;
5088 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5089 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5090 break;
5091 default:
5092 if (sc->sc_type >= WM_T_80003) {
5093 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5094 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5095 } else if (sc->sc_type >= WM_T_82544) {
5096 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5097 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5098 } else {
5099 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5100 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5101 }
5102 break;
5103
5104 }
5105 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5106
5107 wm_gmii_reset(sc);
5108
5109 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5110 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5111 wm_gmii_mediastatus);
5112
5113 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5114 MII_OFFSET_ANY, MIIF_DOPAUSE);
5115
5116 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5117 /* if failed, retry with *_bm_* */
5118 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5119 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5120
5121 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5122 MII_OFFSET_ANY, MIIF_DOPAUSE);
5123 }
5124 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5125 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5126 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5127 sc->sc_phytype = WMPHY_NONE;
5128 } else {
5129 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
5130 }
5131 }
5132
5133 /*
5134 * wm_gmii_mediastatus: [ifmedia interface function]
5135 *
5136 * Get the current interface media status on a 1000BASE-T device.
5137 */
5138 static void
5139 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5140 {
5141 struct wm_softc *sc = ifp->if_softc;
5142
5143 ether_mediastatus(ifp, ifmr);
5144 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
5145 sc->sc_flowflags;
5146 }
5147
5148 /*
5149 * wm_gmii_mediachange: [ifmedia interface function]
5150 *
5151 * Set hardware to newly-selected media on a 1000BASE-T device.
5152 */
5153 static int
5154 wm_gmii_mediachange(struct ifnet *ifp)
5155 {
5156 struct wm_softc *sc = ifp->if_softc;
5157 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5158 int rc;
5159
5160 if ((ifp->if_flags & IFF_UP) == 0)
5161 return 0;
5162
5163 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5164 sc->sc_ctrl |= CTRL_SLU;
5165 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5166 || (sc->sc_type > WM_T_82543)) {
5167 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5168 } else {
5169 sc->sc_ctrl &= ~CTRL_ASDE;
5170 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5171 if (ife->ifm_media & IFM_FDX)
5172 sc->sc_ctrl |= CTRL_FD;
5173 switch(IFM_SUBTYPE(ife->ifm_media)) {
5174 case IFM_10_T:
5175 sc->sc_ctrl |= CTRL_SPEED_10;
5176 break;
5177 case IFM_100_TX:
5178 sc->sc_ctrl |= CTRL_SPEED_100;
5179 break;
5180 case IFM_1000_T:
5181 sc->sc_ctrl |= CTRL_SPEED_1000;
5182 break;
5183 default:
5184 panic("wm_gmii_mediachange: bad media 0x%x",
5185 ife->ifm_media);
5186 }
5187 }
5188 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5189 if (sc->sc_type <= WM_T_82543)
5190 wm_gmii_reset(sc);
5191
5192 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5193 return 0;
5194 return rc;
5195 }
5196
5197 #define MDI_IO CTRL_SWDPIN(2)
5198 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5199 #define MDI_CLK CTRL_SWDPIN(3)
5200
5201 static void
5202 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5203 {
5204 uint32_t i, v;
5205
5206 v = CSR_READ(sc, WMREG_CTRL);
5207 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5208 v |= MDI_DIR | CTRL_SWDPIO(3);
5209
5210 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5211 if (data & i)
5212 v |= MDI_IO;
5213 else
5214 v &= ~MDI_IO;
5215 CSR_WRITE(sc, WMREG_CTRL, v);
5216 delay(10);
5217 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5218 delay(10);
5219 CSR_WRITE(sc, WMREG_CTRL, v);
5220 delay(10);
5221 }
5222 }
5223
5224 static uint32_t
5225 i82543_mii_recvbits(struct wm_softc *sc)
5226 {
5227 uint32_t v, i, data = 0;
5228
5229 v = CSR_READ(sc, WMREG_CTRL);
5230 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5231 v |= CTRL_SWDPIO(3);
5232
5233 CSR_WRITE(sc, WMREG_CTRL, v);
5234 delay(10);
5235 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5236 delay(10);
5237 CSR_WRITE(sc, WMREG_CTRL, v);
5238 delay(10);
5239
5240 for (i = 0; i < 16; i++) {
5241 data <<= 1;
5242 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5243 delay(10);
5244 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5245 data |= 1;
5246 CSR_WRITE(sc, WMREG_CTRL, v);
5247 delay(10);
5248 }
5249
5250 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5251 delay(10);
5252 CSR_WRITE(sc, WMREG_CTRL, v);
5253 delay(10);
5254
5255 return (data);
5256 }
5257
5258 #undef MDI_IO
5259 #undef MDI_DIR
5260 #undef MDI_CLK
5261
5262 /*
5263 * wm_gmii_i82543_readreg: [mii interface function]
5264 *
5265 * Read a PHY register on the GMII (i82543 version).
5266 */
5267 static int
5268 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5269 {
5270 struct wm_softc *sc = device_private(self);
5271 int rv;
5272
5273 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5274 i82543_mii_sendbits(sc, reg | (phy << 5) |
5275 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5276 rv = i82543_mii_recvbits(sc) & 0xffff;
5277
5278 DPRINTF(WM_DEBUG_GMII,
5279 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5280 device_xname(sc->sc_dev), phy, reg, rv));
5281
5282 return (rv);
5283 }
5284
5285 /*
5286 * wm_gmii_i82543_writereg: [mii interface function]
5287 *
5288 * Write a PHY register on the GMII (i82543 version).
5289 */
5290 static void
5291 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5292 {
5293 struct wm_softc *sc = device_private(self);
5294
5295 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5296 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5297 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5298 (MII_COMMAND_START << 30), 32);
5299 }
5300
5301 /*
5302 * wm_gmii_i82544_readreg: [mii interface function]
5303 *
5304 * Read a PHY register on the GMII.
5305 */
5306 static int
5307 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5308 {
5309 struct wm_softc *sc = device_private(self);
5310 uint32_t mdic = 0;
5311 int i, rv;
5312
5313 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5314 MDIC_REGADD(reg));
5315
5316 for (i = 0; i < 320; i++) {
5317 mdic = CSR_READ(sc, WMREG_MDIC);
5318 if (mdic & MDIC_READY)
5319 break;
5320 delay(10);
5321 }
5322
5323 if ((mdic & MDIC_READY) == 0) {
5324 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5325 device_xname(sc->sc_dev), phy, reg);
5326 rv = 0;
5327 } else if (mdic & MDIC_E) {
5328 #if 0 /* This is normal if no PHY is present. */
5329 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5330 device_xname(sc->sc_dev), phy, reg);
5331 #endif
5332 rv = 0;
5333 } else {
5334 rv = MDIC_DATA(mdic);
5335 if (rv == 0xffff)
5336 rv = 0;
5337 }
5338
5339 return (rv);
5340 }
5341
5342 /*
5343 * wm_gmii_i82544_writereg: [mii interface function]
5344 *
5345 * Write a PHY register on the GMII.
5346 */
5347 static void
5348 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5349 {
5350 struct wm_softc *sc = device_private(self);
5351 uint32_t mdic = 0;
5352 int i;
5353
5354 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5355 MDIC_REGADD(reg) | MDIC_DATA(val));
5356
5357 for (i = 0; i < 320; i++) {
5358 mdic = CSR_READ(sc, WMREG_MDIC);
5359 if (mdic & MDIC_READY)
5360 break;
5361 delay(10);
5362 }
5363
5364 if ((mdic & MDIC_READY) == 0)
5365 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5366 device_xname(sc->sc_dev), phy, reg);
5367 else if (mdic & MDIC_E)
5368 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5369 device_xname(sc->sc_dev), phy, reg);
5370 }
5371
5372 /*
5373 * wm_gmii_i80003_readreg: [mii interface function]
5374 *
5375 * Read a PHY register on the kumeran
5376 * This could be handled by the PHY layer if we didn't have to lock the
5377 * ressource ...
5378 */
5379 static int
5380 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5381 {
5382 struct wm_softc *sc = device_private(self);
5383 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5384 int rv;
5385
5386 if (phy != 1) /* only one PHY on kumeran bus */
5387 return 0;
5388
5389 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5390 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5391 __func__);
5392 return 0;
5393 }
5394
5395 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5396 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5397 reg >> GG82563_PAGE_SHIFT);
5398 } else {
5399 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5400 reg >> GG82563_PAGE_SHIFT);
5401 }
5402 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5403 delay(200);
5404 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5405 delay(200);
5406
5407 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5408 return (rv);
5409 }
5410
5411 /*
5412 * wm_gmii_i80003_writereg: [mii interface function]
5413 *
5414 * Write a PHY register on the kumeran.
5415 * This could be handled by the PHY layer if we didn't have to lock the
5416 * ressource ...
5417 */
5418 static void
5419 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5420 {
5421 struct wm_softc *sc = device_private(self);
5422 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5423
5424 if (phy != 1) /* only one PHY on kumeran bus */
5425 return;
5426
5427 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5428 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5429 __func__);
5430 return;
5431 }
5432
5433 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5434 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5435 reg >> GG82563_PAGE_SHIFT);
5436 } else {
5437 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5438 reg >> GG82563_PAGE_SHIFT);
5439 }
5440 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5441 delay(200);
5442 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5443 delay(200);
5444
5445 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5446 }
5447
5448 /*
5449 * wm_gmii_bm_readreg: [mii interface function]
5450 *
5451 * Read a PHY register on the kumeran
5452 * This could be handled by the PHY layer if we didn't have to lock the
5453 * ressource ...
5454 */
5455 static int
5456 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5457 {
5458 struct wm_softc *sc = device_private(self);
5459 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5460 int rv;
5461
5462 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5463 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5464 __func__);
5465 return 0;
5466 }
5467
5468 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5469 if (phy == 1)
5470 wm_gmii_i82544_writereg(self, phy, 0x1f,
5471 reg);
5472 else
5473 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5474 reg >> GG82563_PAGE_SHIFT);
5475
5476 }
5477
5478 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5479 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5480 return (rv);
5481 }
5482
5483 /*
5484 * wm_gmii_bm_writereg: [mii interface function]
5485 *
5486 * Write a PHY register on the kumeran.
5487 * This could be handled by the PHY layer if we didn't have to lock the
5488 * ressource ...
5489 */
5490 static void
5491 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5492 {
5493 struct wm_softc *sc = device_private(self);
5494 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5495
5496 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5497 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5498 __func__);
5499 return;
5500 }
5501
5502 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5503 if (phy == 1)
5504 wm_gmii_i82544_writereg(self, phy, 0x1f,
5505 reg);
5506 else
5507 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5508 reg >> GG82563_PAGE_SHIFT);
5509
5510 }
5511
5512 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5513 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5514 }
5515
5516 static void
5517 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
5518 {
5519 struct wm_softc *sc = device_private(self);
5520 uint16_t regnum = BM_PHY_REG_NUM(offset);
5521 uint16_t wuce;
5522
5523 /* XXX Gig must be disabled for MDIO accesses to page 800 */
5524 if (sc->sc_type == WM_T_PCH) {
5525 /* XXX e1000 driver do nothing... why? */
5526 }
5527
5528 /* Set page 769 */
5529 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5530 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5531
5532 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
5533
5534 wuce &= ~BM_WUC_HOST_WU_BIT;
5535 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
5536 wuce | BM_WUC_ENABLE_BIT);
5537
5538 /* Select page 800 */
5539 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5540 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
5541
5542 /* Write page 800 */
5543 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
5544
5545 if (rd)
5546 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
5547 else
5548 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
5549
5550 /* Set page 769 */
5551 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5552 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5553
5554 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
5555 }
5556
5557 /*
5558 * wm_gmii_hv_readreg: [mii interface function]
5559 *
5560 * Read a PHY register on the kumeran
5561 * This could be handled by the PHY layer if we didn't have to lock the
5562 * ressource ...
5563 */
5564 static int
5565 wm_gmii_hv_readreg(device_t self, int phy, int reg)
5566 {
5567 struct wm_softc *sc = device_private(self);
5568 uint16_t page = BM_PHY_REG_PAGE(reg);
5569 uint16_t regnum = BM_PHY_REG_NUM(reg);
5570 uint16_t val;
5571 int rv;
5572
5573 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5574 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5575 __func__);
5576 return 0;
5577 }
5578
5579 /* XXX Workaround failure in MDIO access while cable is disconnected */
5580 if (sc->sc_phytype == WMPHY_82577) {
5581 /* XXX must write */
5582 }
5583
5584 /* Page 800 works differently than the rest so it has its own func */
5585 if (page == BM_WUC_PAGE) {
5586 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
5587 return val;
5588 }
5589
5590 /*
5591 * Lower than page 768 works differently than the rest so it has its
5592 * own func
5593 */
5594 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5595 printf("gmii_hv_readreg!!!\n");
5596 return 0;
5597 }
5598
5599 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5600 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5601 page << BME1000_PAGE_SHIFT);
5602 }
5603
5604 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
5605 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5606 return (rv);
5607 }
5608
5609 /*
5610 * wm_gmii_hv_writereg: [mii interface function]
5611 *
5612 * Write a PHY register on the kumeran.
5613 * This could be handled by the PHY layer if we didn't have to lock the
5614 * ressource ...
5615 */
5616 static void
5617 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
5618 {
5619 struct wm_softc *sc = device_private(self);
5620 uint16_t page = BM_PHY_REG_PAGE(reg);
5621 uint16_t regnum = BM_PHY_REG_NUM(reg);
5622
5623 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5624 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5625 __func__);
5626 return;
5627 }
5628
5629 /* XXX Workaround failure in MDIO access while cable is disconnected */
5630
5631 /* Page 800 works differently than the rest so it has its own func */
5632 if (page == BM_WUC_PAGE) {
5633 uint16_t tmp;
5634
5635 tmp = val;
5636 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
5637 return;
5638 }
5639
5640 /*
5641 * Lower than page 768 works differently than the rest so it has its
5642 * own func
5643 */
5644 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5645 printf("gmii_hv_writereg!!!\n");
5646 return;
5647 }
5648
5649 /*
5650 * XXX Workaround MDIO accesses being disabled after entering IEEE
5651 * Power Down (whenever bit 11 of the PHY control register is set)
5652 */
5653
5654 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5655 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5656 page << BME1000_PAGE_SHIFT);
5657 }
5658
5659 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
5660 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5661 }
5662
5663 /*
5664 * wm_gmii_statchg: [mii interface function]
5665 *
5666 * Callback from MII layer when media changes.
5667 */
5668 static void
5669 wm_gmii_statchg(device_t self)
5670 {
5671 struct wm_softc *sc = device_private(self);
5672 struct mii_data *mii = &sc->sc_mii;
5673
5674 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5675 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5676 sc->sc_fcrtl &= ~FCRTL_XONE;
5677
5678 /*
5679 * Get flow control negotiation result.
5680 */
5681 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5682 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5683 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5684 mii->mii_media_active &= ~IFM_ETH_FMASK;
5685 }
5686
5687 if (sc->sc_flowflags & IFM_FLOW) {
5688 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5689 sc->sc_ctrl |= CTRL_TFCE;
5690 sc->sc_fcrtl |= FCRTL_XONE;
5691 }
5692 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5693 sc->sc_ctrl |= CTRL_RFCE;
5694 }
5695
5696 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5697 DPRINTF(WM_DEBUG_LINK,
5698 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5699 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5700 } else {
5701 DPRINTF(WM_DEBUG_LINK,
5702 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5703 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5704 }
5705
5706 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5707 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5708 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5709 : WMREG_FCRTL, sc->sc_fcrtl);
5710 if (sc->sc_type == WM_T_80003) {
5711 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5712 case IFM_1000_T:
5713 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5714 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5715 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5716 break;
5717 default:
5718 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5719 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5720 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5721 break;
5722 }
5723 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5724 }
5725 }
5726
5727 /*
5728 * wm_kmrn_readreg:
5729 *
5730 * Read a kumeran register
5731 */
5732 static int
5733 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5734 {
5735 int rv;
5736
5737 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5738 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5739 aprint_error_dev(sc->sc_dev,
5740 "%s: failed to get semaphore\n", __func__);
5741 return 0;
5742 }
5743 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5744 if (wm_get_swfwhw_semaphore(sc)) {
5745 aprint_error_dev(sc->sc_dev,
5746 "%s: failed to get semaphore\n", __func__);
5747 return 0;
5748 }
5749 }
5750
5751 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5752 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5753 KUMCTRLSTA_REN);
5754 delay(2);
5755
5756 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5757
5758 if (sc->sc_flags == WM_F_SWFW_SYNC)
5759 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5760 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5761 wm_put_swfwhw_semaphore(sc);
5762
5763 return (rv);
5764 }
5765
5766 /*
5767 * wm_kmrn_writereg:
5768 *
5769 * Write a kumeran register
5770 */
5771 static void
5772 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5773 {
5774
5775 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5776 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5777 aprint_error_dev(sc->sc_dev,
5778 "%s: failed to get semaphore\n", __func__);
5779 return;
5780 }
5781 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5782 if (wm_get_swfwhw_semaphore(sc)) {
5783 aprint_error_dev(sc->sc_dev,
5784 "%s: failed to get semaphore\n", __func__);
5785 return;
5786 }
5787 }
5788
5789 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5790 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5791 (val & KUMCTRLSTA_MASK));
5792
5793 if (sc->sc_flags == WM_F_SWFW_SYNC)
5794 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5795 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5796 wm_put_swfwhw_semaphore(sc);
5797 }
5798
5799 static int
5800 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5801 {
5802 uint32_t eecd = 0;
5803
5804 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5805 || sc->sc_type == WM_T_82583) {
5806 eecd = CSR_READ(sc, WMREG_EECD);
5807
5808 /* Isolate bits 15 & 16 */
5809 eecd = ((eecd >> 15) & 0x03);
5810
5811 /* If both bits are set, device is Flash type */
5812 if (eecd == 0x03)
5813 return 0;
5814 }
5815 return 1;
5816 }
5817
5818 static int
5819 wm_get_swsm_semaphore(struct wm_softc *sc)
5820 {
5821 int32_t timeout;
5822 uint32_t swsm;
5823
5824 /* Get the FW semaphore. */
5825 timeout = 1000 + 1; /* XXX */
5826 while (timeout) {
5827 swsm = CSR_READ(sc, WMREG_SWSM);
5828 swsm |= SWSM_SWESMBI;
5829 CSR_WRITE(sc, WMREG_SWSM, swsm);
5830 /* if we managed to set the bit we got the semaphore. */
5831 swsm = CSR_READ(sc, WMREG_SWSM);
5832 if (swsm & SWSM_SWESMBI)
5833 break;
5834
5835 delay(50);
5836 timeout--;
5837 }
5838
5839 if (timeout == 0) {
5840 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5841 /* Release semaphores */
5842 wm_put_swsm_semaphore(sc);
5843 return 1;
5844 }
5845 return 0;
5846 }
5847
5848 static void
5849 wm_put_swsm_semaphore(struct wm_softc *sc)
5850 {
5851 uint32_t swsm;
5852
5853 swsm = CSR_READ(sc, WMREG_SWSM);
5854 swsm &= ~(SWSM_SWESMBI);
5855 CSR_WRITE(sc, WMREG_SWSM, swsm);
5856 }
5857
5858 static int
5859 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5860 {
5861 uint32_t swfw_sync;
5862 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5863 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5864 int timeout = 200;
5865
5866 for(timeout = 0; timeout < 200; timeout++) {
5867 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5868 if (wm_get_swsm_semaphore(sc)) {
5869 aprint_error_dev(sc->sc_dev,
5870 "%s: failed to get semaphore\n",
5871 __func__);
5872 return 1;
5873 }
5874 }
5875 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5876 if ((swfw_sync & (swmask | fwmask)) == 0) {
5877 swfw_sync |= swmask;
5878 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5879 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5880 wm_put_swsm_semaphore(sc);
5881 return 0;
5882 }
5883 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5884 wm_put_swsm_semaphore(sc);
5885 delay(5000);
5886 }
5887 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5888 device_xname(sc->sc_dev), mask, swfw_sync);
5889 return 1;
5890 }
5891
5892 static void
5893 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5894 {
5895 uint32_t swfw_sync;
5896
5897 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5898 while (wm_get_swsm_semaphore(sc) != 0)
5899 continue;
5900 }
5901 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5902 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5903 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5904 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5905 wm_put_swsm_semaphore(sc);
5906 }
5907
5908 static int
5909 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5910 {
5911 uint32_t ext_ctrl;
5912 int timeout = 200;
5913
5914 for(timeout = 0; timeout < 200; timeout++) {
5915 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5916 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5917 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5918
5919 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5920 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5921 return 0;
5922 delay(5000);
5923 }
5924 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5925 device_xname(sc->sc_dev), ext_ctrl);
5926 return 1;
5927 }
5928
5929 static void
5930 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5931 {
5932 uint32_t ext_ctrl;
5933 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5934 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5935 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5936 }
5937
5938 static int
5939 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5940 {
5941 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5942 uint8_t bank_high_byte;
5943 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5944
5945 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
5946 /* Value of bit 22 corresponds to the flash bank we're on. */
5947 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5948 } else {
5949 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5950 if ((bank_high_byte & 0xc0) == 0x80)
5951 *bank = 0;
5952 else {
5953 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5954 &bank_high_byte);
5955 if ((bank_high_byte & 0xc0) == 0x80)
5956 *bank = 1;
5957 else {
5958 aprint_error_dev(sc->sc_dev,
5959 "EEPROM not present\n");
5960 return -1;
5961 }
5962 }
5963 }
5964
5965 return 0;
5966 }
5967
5968 /******************************************************************************
5969 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5970 * register.
5971 *
5972 * sc - Struct containing variables accessed by shared code
5973 * offset - offset of word in the EEPROM to read
5974 * data - word read from the EEPROM
5975 * words - number of words to read
5976 *****************************************************************************/
5977 static int
5978 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5979 {
5980 int32_t error = 0;
5981 uint32_t flash_bank = 0;
5982 uint32_t act_offset = 0;
5983 uint32_t bank_offset = 0;
5984 uint16_t word = 0;
5985 uint16_t i = 0;
5986
5987 /* We need to know which is the valid flash bank. In the event
5988 * that we didn't allocate eeprom_shadow_ram, we may not be
5989 * managing flash_bank. So it cannot be trusted and needs
5990 * to be updated with each read.
5991 */
5992 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5993 if (error) {
5994 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5995 __func__);
5996 return error;
5997 }
5998
5999 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6000 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6001
6002 error = wm_get_swfwhw_semaphore(sc);
6003 if (error) {
6004 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6005 __func__);
6006 return error;
6007 }
6008
6009 for (i = 0; i < words; i++) {
6010 /* The NVM part needs a byte offset, hence * 2 */
6011 act_offset = bank_offset + ((offset + i) * 2);
6012 error = wm_read_ich8_word(sc, act_offset, &word);
6013 if (error) {
6014 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6015 __func__);
6016 break;
6017 }
6018 data[i] = word;
6019 }
6020
6021 wm_put_swfwhw_semaphore(sc);
6022 return error;
6023 }
6024
6025 /******************************************************************************
6026 * This function does initial flash setup so that a new read/write/erase cycle
6027 * can be started.
6028 *
6029 * sc - The pointer to the hw structure
6030 ****************************************************************************/
6031 static int32_t
6032 wm_ich8_cycle_init(struct wm_softc *sc)
6033 {
6034 uint16_t hsfsts;
6035 int32_t error = 1;
6036 int32_t i = 0;
6037
6038 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6039
6040 /* May be check the Flash Des Valid bit in Hw status */
6041 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6042 return error;
6043 }
6044
6045 /* Clear FCERR in Hw status by writing 1 */
6046 /* Clear DAEL in Hw status by writing a 1 */
6047 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6048
6049 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6050
6051 /* Either we should have a hardware SPI cycle in progress bit to check
6052 * against, in order to start a new cycle or FDONE bit should be changed
6053 * in the hardware so that it is 1 after harware reset, which can then be
6054 * used as an indication whether a cycle is in progress or has been
6055 * completed .. we should also have some software semaphore mechanism to
6056 * guard FDONE or the cycle in progress bit so that two threads access to
6057 * those bits can be sequentiallized or a way so that 2 threads dont
6058 * start the cycle at the same time */
6059
6060 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6061 /* There is no cycle running at present, so we can start a cycle */
6062 /* Begin by setting Flash Cycle Done. */
6063 hsfsts |= HSFSTS_DONE;
6064 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6065 error = 0;
6066 } else {
6067 /* otherwise poll for sometime so the current cycle has a chance
6068 * to end before giving up. */
6069 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6070 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6071 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6072 error = 0;
6073 break;
6074 }
6075 delay(1);
6076 }
6077 if (error == 0) {
6078 /* Successful in waiting for previous cycle to timeout,
6079 * now set the Flash Cycle Done. */
6080 hsfsts |= HSFSTS_DONE;
6081 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6082 }
6083 }
6084 return error;
6085 }
6086
6087 /******************************************************************************
6088 * This function starts a flash cycle and waits for its completion
6089 *
6090 * sc - The pointer to the hw structure
6091 ****************************************************************************/
6092 static int32_t
6093 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6094 {
6095 uint16_t hsflctl;
6096 uint16_t hsfsts;
6097 int32_t error = 1;
6098 uint32_t i = 0;
6099
6100 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6101 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6102 hsflctl |= HSFCTL_GO;
6103 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6104
6105 /* wait till FDONE bit is set to 1 */
6106 do {
6107 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6108 if (hsfsts & HSFSTS_DONE)
6109 break;
6110 delay(1);
6111 i++;
6112 } while (i < timeout);
6113 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
6114 error = 0;
6115 }
6116 return error;
6117 }
6118
6119 /******************************************************************************
6120 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6121 *
6122 * sc - The pointer to the hw structure
6123 * index - The index of the byte or word to read.
6124 * size - Size of data to read, 1=byte 2=word
6125 * data - Pointer to the word to store the value read.
6126 *****************************************************************************/
6127 static int32_t
6128 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6129 uint32_t size, uint16_t* data)
6130 {
6131 uint16_t hsfsts;
6132 uint16_t hsflctl;
6133 uint32_t flash_linear_address;
6134 uint32_t flash_data = 0;
6135 int32_t error = 1;
6136 int32_t count = 0;
6137
6138 if (size < 1 || size > 2 || data == 0x0 ||
6139 index > ICH_FLASH_LINEAR_ADDR_MASK)
6140 return error;
6141
6142 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6143 sc->sc_ich8_flash_base;
6144
6145 do {
6146 delay(1);
6147 /* Steps */
6148 error = wm_ich8_cycle_init(sc);
6149 if (error)
6150 break;
6151
6152 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6153 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6154 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
6155 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6156 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6157
6158 /* Write the last 24 bits of index into Flash Linear address field in
6159 * Flash Address */
6160 /* TODO: TBD maybe check the index against the size of flash */
6161
6162 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6163
6164 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6165
6166 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
6167 * sequence a few more times, else read in (shift in) the Flash Data0,
6168 * the order is least significant byte first msb to lsb */
6169 if (error == 0) {
6170 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6171 if (size == 1) {
6172 *data = (uint8_t)(flash_data & 0x000000FF);
6173 } else if (size == 2) {
6174 *data = (uint16_t)(flash_data & 0x0000FFFF);
6175 }
6176 break;
6177 } else {
6178 /* If we've gotten here, then things are probably completely hosed,
6179 * but if the error condition is detected, it won't hurt to give
6180 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
6181 */
6182 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6183 if (hsfsts & HSFSTS_ERR) {
6184 /* Repeat for some time before giving up. */
6185 continue;
6186 } else if ((hsfsts & HSFSTS_DONE) == 0) {
6187 break;
6188 }
6189 }
6190 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6191
6192 return error;
6193 }
6194
6195 /******************************************************************************
6196 * Reads a single byte from the NVM using the ICH8 flash access registers.
6197 *
6198 * sc - pointer to wm_hw structure
6199 * index - The index of the byte to read.
6200 * data - Pointer to a byte to store the value read.
6201 *****************************************************************************/
6202 static int32_t
6203 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6204 {
6205 int32_t status;
6206 uint16_t word = 0;
6207
6208 status = wm_read_ich8_data(sc, index, 1, &word);
6209 if (status == 0) {
6210 *data = (uint8_t)word;
6211 }
6212
6213 return status;
6214 }
6215
6216 /******************************************************************************
6217 * Reads a word from the NVM using the ICH8 flash access registers.
6218 *
6219 * sc - pointer to wm_hw structure
6220 * index - The starting byte index of the word to read.
6221 * data - Pointer to a word to store the value read.
6222 *****************************************************************************/
6223 static int32_t
6224 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6225 {
6226 int32_t status;
6227
6228 status = wm_read_ich8_data(sc, index, 2, data);
6229 return status;
6230 }
6231
6232 static int
6233 wm_check_mng_mode(struct wm_softc *sc)
6234 {
6235 int rv;
6236
6237 switch (sc->sc_type) {
6238 case WM_T_ICH8:
6239 case WM_T_ICH9:
6240 case WM_T_ICH10:
6241 case WM_T_PCH:
6242 rv = wm_check_mng_mode_ich8lan(sc);
6243 break;
6244 case WM_T_82574:
6245 case WM_T_82583:
6246 rv = wm_check_mng_mode_82574(sc);
6247 break;
6248 case WM_T_82571:
6249 case WM_T_82572:
6250 case WM_T_82573:
6251 case WM_T_80003:
6252 rv = wm_check_mng_mode_generic(sc);
6253 break;
6254 default:
6255 /* noting to do */
6256 rv = 0;
6257 break;
6258 }
6259
6260 return rv;
6261 }
6262
6263 static int
6264 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6265 {
6266 uint32_t fwsm;
6267
6268 fwsm = CSR_READ(sc, WMREG_FWSM);
6269
6270 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6271 return 1;
6272
6273 return 0;
6274 }
6275
6276 static int
6277 wm_check_mng_mode_82574(struct wm_softc *sc)
6278 {
6279 uint16_t data;
6280
6281 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6282
6283 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6284 return 1;
6285
6286 return 0;
6287 }
6288
6289 static int
6290 wm_check_mng_mode_generic(struct wm_softc *sc)
6291 {
6292 uint32_t fwsm;
6293
6294 fwsm = CSR_READ(sc, WMREG_FWSM);
6295
6296 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6297 return 1;
6298
6299 return 0;
6300 }
6301
6302 static int
6303 wm_check_reset_block(struct wm_softc *sc)
6304 {
6305 uint32_t reg;
6306
6307 switch (sc->sc_type) {
6308 case WM_T_ICH8:
6309 case WM_T_ICH9:
6310 case WM_T_ICH10:
6311 case WM_T_PCH:
6312 reg = CSR_READ(sc, WMREG_FWSM);
6313 if ((reg & FWSM_RSPCIPHY) != 0)
6314 return 0;
6315 else
6316 return -1;
6317 break;
6318 case WM_T_82571:
6319 case WM_T_82572:
6320 case WM_T_82573:
6321 case WM_T_82574:
6322 case WM_T_82583:
6323 case WM_T_80003:
6324 reg = CSR_READ(sc, WMREG_MANC);
6325 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6326 return -1;
6327 else
6328 return 0;
6329 break;
6330 default:
6331 /* no problem */
6332 break;
6333 }
6334
6335 return 0;
6336 }
6337
6338 static void
6339 wm_get_hw_control(struct wm_softc *sc)
6340 {
6341 uint32_t reg;
6342
6343 switch (sc->sc_type) {
6344 case WM_T_82573:
6345 #if 0
6346 case WM_T_82574:
6347 case WM_T_82583:
6348 /*
6349 * FreeBSD's em driver has the function for 82574 to checks
6350 * the management mode, but it's not used. Why?
6351 */
6352 #endif
6353 reg = CSR_READ(sc, WMREG_SWSM);
6354 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6355 break;
6356 case WM_T_82571:
6357 case WM_T_82572:
6358 case WM_T_80003:
6359 case WM_T_ICH8:
6360 case WM_T_ICH9:
6361 case WM_T_ICH10:
6362 case WM_T_PCH:
6363 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6364 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6365 break;
6366 default:
6367 break;
6368 }
6369 }
6370
6371 /* XXX Currently TBI only */
6372 static int
6373 wm_check_for_link(struct wm_softc *sc)
6374 {
6375 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6376 uint32_t rxcw;
6377 uint32_t ctrl;
6378 uint32_t status;
6379 uint32_t sig;
6380
6381 rxcw = CSR_READ(sc, WMREG_RXCW);
6382 ctrl = CSR_READ(sc, WMREG_CTRL);
6383 status = CSR_READ(sc, WMREG_STATUS);
6384
6385 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6386
6387 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6388 device_xname(sc->sc_dev), __func__,
6389 ((ctrl & CTRL_SWDPIN(1)) == sig),
6390 ((status & STATUS_LU) != 0),
6391 ((rxcw & RXCW_C) != 0)
6392 ));
6393
6394 /*
6395 * SWDPIN LU RXCW
6396 * 0 0 0
6397 * 0 0 1 (should not happen)
6398 * 0 1 0 (should not happen)
6399 * 0 1 1 (should not happen)
6400 * 1 0 0 Disable autonego and force linkup
6401 * 1 0 1 got /C/ but not linkup yet
6402 * 1 1 0 (linkup)
6403 * 1 1 1 If IFM_AUTO, back to autonego
6404 *
6405 */
6406 if (((ctrl & CTRL_SWDPIN(1)) == sig)
6407 && ((status & STATUS_LU) == 0)
6408 && ((rxcw & RXCW_C) == 0)) {
6409 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6410 __func__));
6411 sc->sc_tbi_linkup = 0;
6412 /* Disable auto-negotiation in the TXCW register */
6413 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6414
6415 /*
6416 * Force link-up and also force full-duplex.
6417 *
6418 * NOTE: CTRL was updated TFCE and RFCE automatically,
6419 * so we should update sc->sc_ctrl
6420 */
6421 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6422 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6423 } else if(((status & STATUS_LU) != 0)
6424 && ((rxcw & RXCW_C) != 0)
6425 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6426 sc->sc_tbi_linkup = 1;
6427 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6428 __func__));
6429 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6430 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6431 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6432 && ((rxcw & RXCW_C) != 0)) {
6433 DPRINTF(WM_DEBUG_LINK, ("/C/"));
6434 } else {
6435 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6436 status));
6437 }
6438
6439 return 0;
6440 }
6441
6442 /*
6443 * Workaround for pch's PHYs
6444 * XXX should be moved to new PHY driver?
6445 */
6446 static void
6447 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
6448 {
6449
6450 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
6451
6452 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
6453
6454 /* 82578 */
6455 if (sc->sc_phytype == WMPHY_82578) {
6456 /* PCH rev. < 3 */
6457 if (sc->sc_rev < 3) {
6458 /* XXX 6 bit shift? Why? Is it page2? */
6459 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
6460 0x66c0);
6461 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
6462 0xffff);
6463 }
6464
6465 /* XXX phy rev. < 2 */
6466 }
6467
6468 /* Select page 0 */
6469
6470 /* XXX acquire semaphore */
6471 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
6472 /* XXX release semaphore */
6473
6474 /*
6475 * Configure the K1 Si workaround during phy reset assuming there is
6476 * link so that it disables K1 if link is in 1Gbps.
6477 */
6478 wm_k1_gig_workaround_hv(sc, 1);
6479 }
6480
6481 static void
6482 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
6483 {
6484 int k1_enable = sc->sc_nvm_k1_enabled;
6485
6486 /* XXX acquire semaphore */
6487
6488 if (link) {
6489 k1_enable = 0;
6490
6491 /* Link stall fix for link up */
6492 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
6493 } else {
6494 /* Link stall fix for link down */
6495 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
6496 }
6497
6498 wm_configure_k1_ich8lan(sc, k1_enable);
6499
6500 /* XXX release semaphore */
6501 }
6502
6503 static void
6504 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
6505 {
6506 uint32_t ctrl, ctrl_ext, tmp;
6507 uint16_t kmrn_reg;
6508
6509 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
6510
6511 if (k1_enable)
6512 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
6513 else
6514 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
6515
6516 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
6517
6518 delay(20);
6519
6520 ctrl = CSR_READ(sc, WMREG_CTRL);
6521 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6522
6523 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
6524 tmp |= CTRL_FRCSPD;
6525
6526 CSR_WRITE(sc, WMREG_CTRL, tmp);
6527 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
6528 delay(20);
6529
6530 CSR_WRITE(sc, WMREG_CTRL, ctrl);
6531 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6532 delay(20);
6533 }
6534