if_wm.c revision 1.192 1 /* $NetBSD: if_wm.c,v 1.192 2010/01/14 18:56:02 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.192 2010/01/14 18:56:02 msaitoh Exp $");
80
81 #include "bpfilter.h"
82 #include "rnd.h"
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126 #include <dev/mii/igphyreg.h>
127 #include <dev/mii/inbmphyreg.h>
128
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132
133 #include <dev/pci/if_wmreg.h>
134 #include <dev/pci/if_wmvar.h>
135
136 #ifdef WM_DEBUG
137 #define WM_DEBUG_LINK 0x01
138 #define WM_DEBUG_TX 0x02
139 #define WM_DEBUG_RX 0x04
140 #define WM_DEBUG_GMII 0x08
141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142
143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* WM_DEBUG */
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204
205 struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209
210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213
214 /*
215 * Software state for transmit jobs.
216 */
217 struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */
223 };
224
225 /*
226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together.
229 */
230 struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233 };
234
235 #define WM_LINKUP_TIMEOUT 50
236
237 /*
238 * Software state per device.
239 */
240 struct wm_softc {
241 device_t sc_dev; /* generic device information */
242 bus_space_tag_t sc_st; /* bus space tag */
243 bus_space_handle_t sc_sh; /* bus space handle */
244 bus_space_tag_t sc_iot; /* I/O space tag */
245 bus_space_handle_t sc_ioh; /* I/O space handle */
246 bus_space_tag_t sc_flasht; /* flash registers space tag */
247 bus_space_handle_t sc_flashh; /* flash registers space handle */
248 bus_dma_tag_t sc_dmat; /* bus DMA tag */
249 struct ethercom sc_ethercom; /* ethernet common data */
250 pci_chipset_tag_t sc_pc;
251 pcitag_t sc_pcitag;
252
253 wm_chip_type sc_type; /* MAC type */
254 int sc_rev; /* MAC revision */
255 wm_phy_type sc_phytype; /* PHY type */
256 int sc_flags; /* flags; see below */
257 int sc_if_flags; /* last if_flags */
258 int sc_bus_speed; /* PCI/PCIX bus speed */
259 int sc_pcix_offset; /* PCIX capability register offset */
260 int sc_flowflags; /* 802.3x flow control flags */
261
262 void *sc_ih; /* interrupt cookie */
263
264 int sc_ee_addrbits; /* EEPROM address bits */
265
266 struct mii_data sc_mii; /* MII/media information */
267
268 callout_t sc_tick_ch; /* tick callout */
269
270 bus_dmamap_t sc_cddmamap; /* control data DMA map */
271 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
272
273 int sc_align_tweak;
274
275 /*
276 * Software state for the transmit and receive descriptors.
277 */
278 int sc_txnum; /* must be a power of two */
279 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
280 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
281
282 /*
283 * Control data structures.
284 */
285 int sc_ntxdesc; /* must be a power of two */
286 struct wm_control_data_82544 *sc_control_data;
287 #define sc_txdescs sc_control_data->wcd_txdescs
288 #define sc_rxdescs sc_control_data->wcd_rxdescs
289
290 #ifdef WM_EVENT_COUNTERS
291 /* Event counters. */
292 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
293 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
294 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
295 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
296 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
297 struct evcnt sc_ev_rxintr; /* Rx interrupts */
298 struct evcnt sc_ev_linkintr; /* Link interrupts */
299
300 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
301 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
302 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
303 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
304 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
305 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
306 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
307 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
308
309 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
310 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
311
312 struct evcnt sc_ev_tu; /* Tx underrun */
313
314 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
315 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
316 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
317 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
318 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
319 #endif /* WM_EVENT_COUNTERS */
320
321 bus_addr_t sc_tdt_reg; /* offset of TDT register */
322
323 int sc_txfree; /* number of free Tx descriptors */
324 int sc_txnext; /* next ready Tx descriptor */
325
326 int sc_txsfree; /* number of free Tx jobs */
327 int sc_txsnext; /* next free Tx job */
328 int sc_txsdirty; /* dirty Tx jobs */
329
330 /* These 5 variables are used only on the 82547. */
331 int sc_txfifo_size; /* Tx FIFO size */
332 int sc_txfifo_head; /* current head of FIFO */
333 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
334 int sc_txfifo_stall; /* Tx FIFO is stalled */
335 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
336
337 bus_addr_t sc_rdt_reg; /* offset of RDT register */
338
339 int sc_rxptr; /* next ready Rx descriptor/queue ent */
340 int sc_rxdiscard;
341 int sc_rxlen;
342 struct mbuf *sc_rxhead;
343 struct mbuf *sc_rxtail;
344 struct mbuf **sc_rxtailp;
345
346 uint32_t sc_ctrl; /* prototype CTRL register */
347 #if 0
348 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
349 #endif
350 uint32_t sc_icr; /* prototype interrupt bits */
351 uint32_t sc_itr; /* prototype intr throttling reg */
352 uint32_t sc_tctl; /* prototype TCTL register */
353 uint32_t sc_rctl; /* prototype RCTL register */
354 uint32_t sc_txcw; /* prototype TXCW register */
355 uint32_t sc_tipg; /* prototype TIPG register */
356 uint32_t sc_fcrtl; /* prototype FCRTL register */
357 uint32_t sc_pba; /* prototype PBA register */
358
359 int sc_tbi_linkup; /* TBI link status */
360 int sc_tbi_anegticks; /* autonegotiation ticks */
361 int sc_tbi_ticks; /* tbi ticks */
362 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
363 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
364
365 int sc_mchash_type; /* multicast filter offset */
366
367 #if NRND > 0
368 rndsource_element_t rnd_source; /* random source */
369 #endif
370 int sc_ich8_flash_base;
371 int sc_ich8_flash_bank_size;
372 int sc_nvm_k1_enabled;
373 };
374
375 #define WM_RXCHAIN_RESET(sc) \
376 do { \
377 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
378 *(sc)->sc_rxtailp = NULL; \
379 (sc)->sc_rxlen = 0; \
380 } while (/*CONSTCOND*/0)
381
382 #define WM_RXCHAIN_LINK(sc, m) \
383 do { \
384 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
385 (sc)->sc_rxtailp = &(m)->m_next; \
386 } while (/*CONSTCOND*/0)
387
388 #ifdef WM_EVENT_COUNTERS
389 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
390 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
391 #else
392 #define WM_EVCNT_INCR(ev) /* nothing */
393 #define WM_EVCNT_ADD(ev, val) /* nothing */
394 #endif
395
396 #define CSR_READ(sc, reg) \
397 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
398 #define CSR_WRITE(sc, reg, val) \
399 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
400 #define CSR_WRITE_FLUSH(sc) \
401 (void) CSR_READ((sc), WMREG_STATUS)
402
403 #define ICH8_FLASH_READ32(sc, reg) \
404 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
405 #define ICH8_FLASH_WRITE32(sc, reg, data) \
406 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
407
408 #define ICH8_FLASH_READ16(sc, reg) \
409 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
410 #define ICH8_FLASH_WRITE16(sc, reg, data) \
411 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
412
413 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
414 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
415
416 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
417 #define WM_CDTXADDR_HI(sc, x) \
418 (sizeof(bus_addr_t) == 8 ? \
419 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
420
421 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
422 #define WM_CDRXADDR_HI(sc, x) \
423 (sizeof(bus_addr_t) == 8 ? \
424 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
425
426 #define WM_CDTXSYNC(sc, x, n, ops) \
427 do { \
428 int __x, __n; \
429 \
430 __x = (x); \
431 __n = (n); \
432 \
433 /* If it will wrap around, sync to the end of the ring. */ \
434 if ((__x + __n) > WM_NTXDESC(sc)) { \
435 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
436 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
437 (WM_NTXDESC(sc) - __x), (ops)); \
438 __n -= (WM_NTXDESC(sc) - __x); \
439 __x = 0; \
440 } \
441 \
442 /* Now sync whatever is left. */ \
443 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
444 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
445 } while (/*CONSTCOND*/0)
446
447 #define WM_CDRXSYNC(sc, x, ops) \
448 do { \
449 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
450 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
451 } while (/*CONSTCOND*/0)
452
453 #define WM_INIT_RXDESC(sc, x) \
454 do { \
455 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
456 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
457 struct mbuf *__m = __rxs->rxs_mbuf; \
458 \
459 /* \
460 * Note: We scoot the packet forward 2 bytes in the buffer \
461 * so that the payload after the Ethernet header is aligned \
462 * to a 4-byte boundary. \
463 * \
464 * XXX BRAINDAMAGE ALERT! \
465 * The stupid chip uses the same size for every buffer, which \
466 * is set in the Receive Control register. We are using the 2K \
467 * size option, but what we REALLY want is (2K - 2)! For this \
468 * reason, we can't "scoot" packets longer than the standard \
469 * Ethernet MTU. On strict-alignment platforms, if the total \
470 * size exceeds (2K - 2) we set align_tweak to 0 and let \
471 * the upper layer copy the headers. \
472 */ \
473 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
474 \
475 wm_set_dma_addr(&__rxd->wrx_addr, \
476 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
477 __rxd->wrx_len = 0; \
478 __rxd->wrx_cksum = 0; \
479 __rxd->wrx_status = 0; \
480 __rxd->wrx_errors = 0; \
481 __rxd->wrx_special = 0; \
482 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
483 \
484 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
485 } while (/*CONSTCOND*/0)
486
487 static void wm_start(struct ifnet *);
488 static void wm_watchdog(struct ifnet *);
489 static int wm_ioctl(struct ifnet *, u_long, void *);
490 static int wm_init(struct ifnet *);
491 static void wm_stop(struct ifnet *, int);
492
493 static void wm_reset(struct wm_softc *);
494 static void wm_rxdrain(struct wm_softc *);
495 static int wm_add_rxbuf(struct wm_softc *, int);
496 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
497 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
498 static int wm_validate_eeprom_checksum(struct wm_softc *);
499 static void wm_tick(void *);
500
501 static void wm_set_filter(struct wm_softc *);
502
503 static int wm_intr(void *);
504 static void wm_txintr(struct wm_softc *);
505 static void wm_rxintr(struct wm_softc *);
506 static void wm_linkintr(struct wm_softc *, uint32_t);
507
508 static void wm_tbi_mediainit(struct wm_softc *);
509 static int wm_tbi_mediachange(struct ifnet *);
510 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
511
512 static void wm_tbi_set_linkled(struct wm_softc *);
513 static void wm_tbi_check_link(struct wm_softc *);
514
515 static void wm_gmii_reset(struct wm_softc *);
516
517 static int wm_gmii_i82543_readreg(device_t, int, int);
518 static void wm_gmii_i82543_writereg(device_t, int, int, int);
519
520 static int wm_gmii_i82544_readreg(device_t, int, int);
521 static void wm_gmii_i82544_writereg(device_t, int, int, int);
522
523 static int wm_gmii_i80003_readreg(device_t, int, int);
524 static void wm_gmii_i80003_writereg(device_t, int, int, int);
525 static int wm_gmii_bm_readreg(device_t, int, int);
526 static void wm_gmii_bm_writereg(device_t, int, int, int);
527 static int wm_gmii_hv_readreg(device_t, int, int);
528 static void wm_gmii_hv_writereg(device_t, int, int, int);
529
530 static void wm_gmii_statchg(device_t);
531
532 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
533 static int wm_gmii_mediachange(struct ifnet *);
534 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
535
536 static int wm_kmrn_readreg(struct wm_softc *, int);
537 static void wm_kmrn_writereg(struct wm_softc *, int, int);
538
539 static void wm_set_spiaddrsize(struct wm_softc *);
540 static int wm_match(device_t, cfdata_t, void *);
541 static void wm_attach(device_t, device_t, void *);
542 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
543 static void wm_get_auto_rd_done(struct wm_softc *);
544 static void wm_lan_init_done(struct wm_softc *);
545 static void wm_get_cfg_done(struct wm_softc *);
546 static int wm_get_swsm_semaphore(struct wm_softc *);
547 static void wm_put_swsm_semaphore(struct wm_softc *);
548 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
549 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
550 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
551 static int wm_get_swfwhw_semaphore(struct wm_softc *);
552 static void wm_put_swfwhw_semaphore(struct wm_softc *);
553
554 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
555 static int32_t wm_ich8_cycle_init(struct wm_softc *);
556 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
557 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
558 uint32_t, uint16_t *);
559 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
560 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
561 static void wm_82547_txfifo_stall(void *);
562 static int wm_check_mng_mode(struct wm_softc *);
563 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
564 static int wm_check_mng_mode_82574(struct wm_softc *);
565 static int wm_check_mng_mode_generic(struct wm_softc *);
566 static int wm_check_reset_block(struct wm_softc *);
567 static void wm_get_hw_control(struct wm_softc *);
568 static int wm_check_for_link(struct wm_softc *);
569 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
570 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
571 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
572
573 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
574 wm_match, wm_attach, NULL, NULL);
575
576 /*
577 * Devices supported by this driver.
578 */
579 static const struct wm_product {
580 pci_vendor_id_t wmp_vendor;
581 pci_product_id_t wmp_product;
582 const char *wmp_name;
583 wm_chip_type wmp_type;
584 int wmp_flags;
585 #define WMP_F_1000X 0x01
586 #define WMP_F_1000T 0x02
587 } wm_products[] = {
588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
589 "Intel i82542 1000BASE-X Ethernet",
590 WM_T_82542_2_1, WMP_F_1000X },
591
592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
593 "Intel i82543GC 1000BASE-X Ethernet",
594 WM_T_82543, WMP_F_1000X },
595
596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
597 "Intel i82543GC 1000BASE-T Ethernet",
598 WM_T_82543, WMP_F_1000T },
599
600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
601 "Intel i82544EI 1000BASE-T Ethernet",
602 WM_T_82544, WMP_F_1000T },
603
604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
605 "Intel i82544EI 1000BASE-X Ethernet",
606 WM_T_82544, WMP_F_1000X },
607
608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
609 "Intel i82544GC 1000BASE-T Ethernet",
610 WM_T_82544, WMP_F_1000T },
611
612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
613 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
614 WM_T_82544, WMP_F_1000T },
615
616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
617 "Intel i82540EM 1000BASE-T Ethernet",
618 WM_T_82540, WMP_F_1000T },
619
620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
621 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
622 WM_T_82540, WMP_F_1000T },
623
624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
625 "Intel i82540EP 1000BASE-T Ethernet",
626 WM_T_82540, WMP_F_1000T },
627
628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
629 "Intel i82540EP 1000BASE-T Ethernet",
630 WM_T_82540, WMP_F_1000T },
631
632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
633 "Intel i82540EP 1000BASE-T Ethernet",
634 WM_T_82540, WMP_F_1000T },
635
636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
637 "Intel i82545EM 1000BASE-T Ethernet",
638 WM_T_82545, WMP_F_1000T },
639
640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
641 "Intel i82545GM 1000BASE-T Ethernet",
642 WM_T_82545_3, WMP_F_1000T },
643
644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
645 "Intel i82545GM 1000BASE-X Ethernet",
646 WM_T_82545_3, WMP_F_1000X },
647 #if 0
648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
649 "Intel i82545GM Gigabit Ethernet (SERDES)",
650 WM_T_82545_3, WMP_F_SERDES },
651 #endif
652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
653 "Intel i82546EB 1000BASE-T Ethernet",
654 WM_T_82546, WMP_F_1000T },
655
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
657 "Intel i82546EB 1000BASE-T Ethernet",
658 WM_T_82546, WMP_F_1000T },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
661 "Intel i82545EM 1000BASE-X Ethernet",
662 WM_T_82545, WMP_F_1000X },
663
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
665 "Intel i82546EB 1000BASE-X Ethernet",
666 WM_T_82546, WMP_F_1000X },
667
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
669 "Intel i82546GB 1000BASE-T Ethernet",
670 WM_T_82546_3, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
673 "Intel i82546GB 1000BASE-X Ethernet",
674 WM_T_82546_3, WMP_F_1000X },
675 #if 0
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
677 "Intel i82546GB Gigabit Ethernet (SERDES)",
678 WM_T_82546_3, WMP_F_SERDES },
679 #endif
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
681 "i82546GB quad-port Gigabit Ethernet",
682 WM_T_82546_3, WMP_F_1000T },
683
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
685 "i82546GB quad-port Gigabit Ethernet (KSP3)",
686 WM_T_82546_3, WMP_F_1000T },
687
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
689 "Intel PRO/1000MT (82546GB)",
690 WM_T_82546_3, WMP_F_1000T },
691
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
693 "Intel i82541EI 1000BASE-T Ethernet",
694 WM_T_82541, WMP_F_1000T },
695
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
697 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
698 WM_T_82541, WMP_F_1000T },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
701 "Intel i82541EI Mobile 1000BASE-T Ethernet",
702 WM_T_82541, WMP_F_1000T },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
705 "Intel i82541ER 1000BASE-T Ethernet",
706 WM_T_82541_2, WMP_F_1000T },
707
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
709 "Intel i82541GI 1000BASE-T Ethernet",
710 WM_T_82541_2, WMP_F_1000T },
711
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
713 "Intel i82541GI Mobile 1000BASE-T Ethernet",
714 WM_T_82541_2, WMP_F_1000T },
715
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
717 "Intel i82541PI 1000BASE-T Ethernet",
718 WM_T_82541_2, WMP_F_1000T },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
721 "Intel i82547EI 1000BASE-T Ethernet",
722 WM_T_82547, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
725 "Intel i82547EI Mobile 1000BASE-T Ethernet",
726 WM_T_82547, WMP_F_1000T },
727
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
729 "Intel i82547GI 1000BASE-T Ethernet",
730 WM_T_82547_2, WMP_F_1000T },
731
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
733 "Intel PRO/1000 PT (82571EB)",
734 WM_T_82571, WMP_F_1000T },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
737 "Intel PRO/1000 PF (82571EB)",
738 WM_T_82571, WMP_F_1000X },
739 #if 0
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
741 "Intel PRO/1000 PB (82571EB)",
742 WM_T_82571, WMP_F_SERDES },
743 #endif
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
745 "Intel PRO/1000 QT (82571EB)",
746 WM_T_82571, WMP_F_1000T },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
749 "Intel i82572EI 1000baseT Ethernet",
750 WM_T_82572, WMP_F_1000T },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
753 "Intel PRO/1000 PT Quad Port Server Adapter",
754 WM_T_82571, WMP_F_1000T, },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
757 "Intel i82572EI 1000baseX Ethernet",
758 WM_T_82572, WMP_F_1000X },
759 #if 0
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
761 "Intel i82572EI Gigabit Ethernet (SERDES)",
762 WM_T_82572, WMP_F_SERDES },
763 #endif
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
766 "Intel i82572EI 1000baseT Ethernet",
767 WM_T_82572, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
770 "Intel i82573E",
771 WM_T_82573, WMP_F_1000T },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
774 "Intel i82573E IAMT",
775 WM_T_82573, WMP_F_1000T },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
778 "Intel i82573L Gigabit Ethernet",
779 WM_T_82573, WMP_F_1000T },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
782 "Intel i82574L",
783 WM_T_82574, WMP_F_1000T },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
786 "Intel i82583V",
787 WM_T_82583, WMP_F_1000T },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
790 "i80003 dual 1000baseT Ethernet",
791 WM_T_80003, WMP_F_1000T },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
794 "i80003 dual 1000baseX Ethernet",
795 WM_T_80003, WMP_F_1000T },
796 #if 0
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
798 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
799 WM_T_80003, WMP_F_SERDES },
800 #endif
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
803 "Intel i80003 1000baseT Ethernet",
804 WM_T_80003, WMP_F_1000T },
805 #if 0
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
807 "Intel i80003 Gigabit Ethernet (SERDES)",
808 WM_T_80003, WMP_F_SERDES },
809 #endif
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
811 "Intel i82801H (M_AMT) LAN Controller",
812 WM_T_ICH8, WMP_F_1000T },
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
814 "Intel i82801H (AMT) LAN Controller",
815 WM_T_ICH8, WMP_F_1000T },
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
817 "Intel i82801H LAN Controller",
818 WM_T_ICH8, WMP_F_1000T },
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
820 "Intel i82801H (IFE) LAN Controller",
821 WM_T_ICH8, WMP_F_1000T },
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
823 "Intel i82801H (M) LAN Controller",
824 WM_T_ICH8, WMP_F_1000T },
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
826 "Intel i82801H IFE (GT) LAN Controller",
827 WM_T_ICH8, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
829 "Intel i82801H IFE (G) LAN Controller",
830 WM_T_ICH8, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
832 "82801I (AMT) LAN Controller",
833 WM_T_ICH9, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
835 "82801I LAN Controller",
836 WM_T_ICH9, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
838 "82801I (G) LAN Controller",
839 WM_T_ICH9, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
841 "82801I (GT) LAN Controller",
842 WM_T_ICH9, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
844 "82801I (C) LAN Controller",
845 WM_T_ICH9, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
847 "82801I mobile LAN Controller",
848 WM_T_ICH9, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
850 "82801I mobile (V) LAN Controller",
851 WM_T_ICH9, WMP_F_1000T },
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
853 "82801I mobile (AMT) LAN Controller",
854 WM_T_ICH9, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
856 "82567LM-4 LAN Controller",
857 WM_T_ICH9, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
859 "82567V-3 LAN Controller",
860 WM_T_ICH9, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
862 "82567LM-2 LAN Controller",
863 WM_T_ICH10, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
865 "82567LF-2 LAN Controller",
866 WM_T_ICH10, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
868 "82567LM-3 LAN Controller",
869 WM_T_ICH10, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
871 "82567LF-3 LAN Controller",
872 WM_T_ICH10, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
874 "82567V-2 LAN Controller",
875 WM_T_ICH10, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
877 "PCH LAN (82578LM) Controller",
878 WM_T_PCH, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
880 "PCH LAN (82578LC) Controller",
881 WM_T_PCH, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
883 "PCH LAN (82578DM) Controller",
884 WM_T_PCH, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
886 "PCH LAN (82578DC) Controller",
887 WM_T_PCH, WMP_F_1000T },
888 { 0, 0,
889 NULL,
890 0, 0 },
891 };
892
893 #ifdef WM_EVENT_COUNTERS
894 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
895 #endif /* WM_EVENT_COUNTERS */
896
897 #if 0 /* Not currently used */
898 static inline uint32_t
899 wm_io_read(struct wm_softc *sc, int reg)
900 {
901
902 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
903 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
904 }
905 #endif
906
907 static inline void
908 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
909 {
910
911 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
912 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
913 }
914
915 static inline void
916 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
917 {
918 wa->wa_low = htole32(v & 0xffffffffU);
919 if (sizeof(bus_addr_t) == 8)
920 wa->wa_high = htole32((uint64_t) v >> 32);
921 else
922 wa->wa_high = 0;
923 }
924
925 static void
926 wm_set_spiaddrsize(struct wm_softc *sc)
927 {
928 uint32_t reg;
929
930 sc->sc_flags |= WM_F_EEPROM_SPI;
931 reg = CSR_READ(sc, WMREG_EECD);
932 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
933 }
934
935 static const struct wm_product *
936 wm_lookup(const struct pci_attach_args *pa)
937 {
938 const struct wm_product *wmp;
939
940 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
941 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
942 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
943 return (wmp);
944 }
945 return (NULL);
946 }
947
948 static int
949 wm_match(device_t parent, cfdata_t cf, void *aux)
950 {
951 struct pci_attach_args *pa = aux;
952
953 if (wm_lookup(pa) != NULL)
954 return (1);
955
956 return (0);
957 }
958
959 static void
960 wm_attach(device_t parent, device_t self, void *aux)
961 {
962 struct wm_softc *sc = device_private(self);
963 struct pci_attach_args *pa = aux;
964 prop_dictionary_t dict;
965 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
966 pci_chipset_tag_t pc = pa->pa_pc;
967 pci_intr_handle_t ih;
968 size_t cdata_size;
969 const char *intrstr = NULL;
970 const char *eetype, *xname;
971 bus_space_tag_t memt;
972 bus_space_handle_t memh;
973 bus_dma_segment_t seg;
974 int memh_valid;
975 int i, rseg, error;
976 const struct wm_product *wmp;
977 prop_data_t ea;
978 prop_number_t pn;
979 uint8_t enaddr[ETHER_ADDR_LEN];
980 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
981 pcireg_t preg, memtype;
982 uint32_t reg;
983
984 sc->sc_dev = self;
985 callout_init(&sc->sc_tick_ch, 0);
986
987 wmp = wm_lookup(pa);
988 if (wmp == NULL) {
989 printf("\n");
990 panic("wm_attach: impossible");
991 }
992
993 sc->sc_pc = pa->pa_pc;
994 sc->sc_pcitag = pa->pa_tag;
995
996 if (pci_dma64_available(pa))
997 sc->sc_dmat = pa->pa_dmat64;
998 else
999 sc->sc_dmat = pa->pa_dmat;
1000
1001 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1002 aprint_naive(": Ethernet controller\n");
1003 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1004
1005 sc->sc_type = wmp->wmp_type;
1006 if (sc->sc_type < WM_T_82543) {
1007 if (sc->sc_rev < 2) {
1008 aprint_error_dev(sc->sc_dev,
1009 "i82542 must be at least rev. 2\n");
1010 return;
1011 }
1012 if (sc->sc_rev < 3)
1013 sc->sc_type = WM_T_82542_2_0;
1014 }
1015
1016 /* Set device properties (mactype) */
1017 dict = device_properties(sc->sc_dev);
1018 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1019
1020 /*
1021 * Map the device. All devices support memory-mapped acccess,
1022 * and it is really required for normal operation.
1023 */
1024 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1025 switch (memtype) {
1026 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1027 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1028 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1029 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1030 break;
1031 default:
1032 memh_valid = 0;
1033 break;
1034 }
1035
1036 if (memh_valid) {
1037 sc->sc_st = memt;
1038 sc->sc_sh = memh;
1039 } else {
1040 aprint_error_dev(sc->sc_dev,
1041 "unable to map device registers\n");
1042 return;
1043 }
1044
1045 /*
1046 * In addition, i82544 and later support I/O mapped indirect
1047 * register access. It is not desirable (nor supported in
1048 * this driver) to use it for normal operation, though it is
1049 * required to work around bugs in some chip versions.
1050 */
1051 if (sc->sc_type >= WM_T_82544) {
1052 /* First we have to find the I/O BAR. */
1053 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1054 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1055 PCI_MAPREG_TYPE_IO)
1056 break;
1057 }
1058 if (i == PCI_MAPREG_END)
1059 aprint_error_dev(sc->sc_dev,
1060 "WARNING: unable to find I/O BAR\n");
1061 else {
1062 /*
1063 * The i8254x doesn't apparently respond when the
1064 * I/O BAR is 0, which looks somewhat like it's not
1065 * been configured.
1066 */
1067 preg = pci_conf_read(pc, pa->pa_tag, i);
1068 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1069 aprint_error_dev(sc->sc_dev,
1070 "WARNING: I/O BAR at zero.\n");
1071 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1072 0, &sc->sc_iot, &sc->sc_ioh,
1073 NULL, NULL) == 0) {
1074 sc->sc_flags |= WM_F_IOH_VALID;
1075 } else {
1076 aprint_error_dev(sc->sc_dev,
1077 "WARNING: unable to map I/O space\n");
1078 }
1079 }
1080
1081 }
1082
1083 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1084 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1085 preg |= PCI_COMMAND_MASTER_ENABLE;
1086 if (sc->sc_type < WM_T_82542_2_1)
1087 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1088 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1089
1090 /* power up chip */
1091 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1092 NULL)) && error != EOPNOTSUPP) {
1093 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1094 return;
1095 }
1096
1097 /*
1098 * Map and establish our interrupt.
1099 */
1100 if (pci_intr_map(pa, &ih)) {
1101 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1102 return;
1103 }
1104 intrstr = pci_intr_string(pc, ih);
1105 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1106 if (sc->sc_ih == NULL) {
1107 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1108 if (intrstr != NULL)
1109 aprint_error(" at %s", intrstr);
1110 aprint_error("\n");
1111 return;
1112 }
1113 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1114
1115 /*
1116 * Determine a few things about the bus we're connected to.
1117 */
1118 if (sc->sc_type < WM_T_82543) {
1119 /* We don't really know the bus characteristics here. */
1120 sc->sc_bus_speed = 33;
1121 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1122 /*
1123 * CSA (Communication Streaming Architecture) is about as fast
1124 * a 32-bit 66MHz PCI Bus.
1125 */
1126 sc->sc_flags |= WM_F_CSA;
1127 sc->sc_bus_speed = 66;
1128 aprint_verbose_dev(sc->sc_dev,
1129 "Communication Streaming Architecture\n");
1130 if (sc->sc_type == WM_T_82547) {
1131 callout_init(&sc->sc_txfifo_ch, 0);
1132 callout_setfunc(&sc->sc_txfifo_ch,
1133 wm_82547_txfifo_stall, sc);
1134 aprint_verbose_dev(sc->sc_dev,
1135 "using 82547 Tx FIFO stall work-around\n");
1136 }
1137 } else if (sc->sc_type >= WM_T_82571) {
1138 sc->sc_flags |= WM_F_PCIE;
1139 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1140 && (sc->sc_type != WM_T_ICH10)
1141 && (sc->sc_type != WM_T_PCH))
1142 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1143 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1144 } else {
1145 reg = CSR_READ(sc, WMREG_STATUS);
1146 if (reg & STATUS_BUS64)
1147 sc->sc_flags |= WM_F_BUS64;
1148 if ((reg & STATUS_PCIX_MODE) != 0) {
1149 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1150
1151 sc->sc_flags |= WM_F_PCIX;
1152 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1153 PCI_CAP_PCIX,
1154 &sc->sc_pcix_offset, NULL) == 0)
1155 aprint_error_dev(sc->sc_dev,
1156 "unable to find PCIX capability\n");
1157 else if (sc->sc_type != WM_T_82545_3 &&
1158 sc->sc_type != WM_T_82546_3) {
1159 /*
1160 * Work around a problem caused by the BIOS
1161 * setting the max memory read byte count
1162 * incorrectly.
1163 */
1164 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1165 sc->sc_pcix_offset + PCI_PCIX_CMD);
1166 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1167 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1168
1169 bytecnt =
1170 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1171 PCI_PCIX_CMD_BYTECNT_SHIFT;
1172 maxb =
1173 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1174 PCI_PCIX_STATUS_MAXB_SHIFT;
1175 if (bytecnt > maxb) {
1176 aprint_verbose_dev(sc->sc_dev,
1177 "resetting PCI-X MMRBC: %d -> %d\n",
1178 512 << bytecnt, 512 << maxb);
1179 pcix_cmd = (pcix_cmd &
1180 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1181 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1182 pci_conf_write(pa->pa_pc, pa->pa_tag,
1183 sc->sc_pcix_offset + PCI_PCIX_CMD,
1184 pcix_cmd);
1185 }
1186 }
1187 }
1188 /*
1189 * The quad port adapter is special; it has a PCIX-PCIX
1190 * bridge on the board, and can run the secondary bus at
1191 * a higher speed.
1192 */
1193 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1194 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1195 : 66;
1196 } else if (sc->sc_flags & WM_F_PCIX) {
1197 switch (reg & STATUS_PCIXSPD_MASK) {
1198 case STATUS_PCIXSPD_50_66:
1199 sc->sc_bus_speed = 66;
1200 break;
1201 case STATUS_PCIXSPD_66_100:
1202 sc->sc_bus_speed = 100;
1203 break;
1204 case STATUS_PCIXSPD_100_133:
1205 sc->sc_bus_speed = 133;
1206 break;
1207 default:
1208 aprint_error_dev(sc->sc_dev,
1209 "unknown PCIXSPD %d; assuming 66MHz\n",
1210 reg & STATUS_PCIXSPD_MASK);
1211 sc->sc_bus_speed = 66;
1212 break;
1213 }
1214 } else
1215 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1216 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1217 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1218 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1219 }
1220
1221 /*
1222 * Allocate the control data structures, and create and load the
1223 * DMA map for it.
1224 *
1225 * NOTE: All Tx descriptors must be in the same 4G segment of
1226 * memory. So must Rx descriptors. We simplify by allocating
1227 * both sets within the same 4G segment.
1228 */
1229 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1230 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1231 cdata_size = sc->sc_type < WM_T_82544 ?
1232 sizeof(struct wm_control_data_82542) :
1233 sizeof(struct wm_control_data_82544);
1234 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1235 (bus_size_t) 0x100000000ULL,
1236 &seg, 1, &rseg, 0)) != 0) {
1237 aprint_error_dev(sc->sc_dev,
1238 "unable to allocate control data, error = %d\n",
1239 error);
1240 goto fail_0;
1241 }
1242
1243 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1244 (void **)&sc->sc_control_data,
1245 BUS_DMA_COHERENT)) != 0) {
1246 aprint_error_dev(sc->sc_dev,
1247 "unable to map control data, error = %d\n", error);
1248 goto fail_1;
1249 }
1250
1251 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1252 0, 0, &sc->sc_cddmamap)) != 0) {
1253 aprint_error_dev(sc->sc_dev,
1254 "unable to create control data DMA map, error = %d\n",
1255 error);
1256 goto fail_2;
1257 }
1258
1259 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1260 sc->sc_control_data, cdata_size, NULL,
1261 0)) != 0) {
1262 aprint_error_dev(sc->sc_dev,
1263 "unable to load control data DMA map, error = %d\n",
1264 error);
1265 goto fail_3;
1266 }
1267
1268 /*
1269 * Create the transmit buffer DMA maps.
1270 */
1271 WM_TXQUEUELEN(sc) =
1272 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1273 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1274 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1275 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1276 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1277 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1278 aprint_error_dev(sc->sc_dev,
1279 "unable to create Tx DMA map %d, error = %d\n",
1280 i, error);
1281 goto fail_4;
1282 }
1283 }
1284
1285 /*
1286 * Create the receive buffer DMA maps.
1287 */
1288 for (i = 0; i < WM_NRXDESC; i++) {
1289 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1290 MCLBYTES, 0, 0,
1291 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1292 aprint_error_dev(sc->sc_dev,
1293 "unable to create Rx DMA map %d error = %d\n",
1294 i, error);
1295 goto fail_5;
1296 }
1297 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1298 }
1299
1300 /* clear interesting stat counters */
1301 CSR_READ(sc, WMREG_COLC);
1302 CSR_READ(sc, WMREG_RXERRC);
1303
1304 /*
1305 * Reset the chip to a known state.
1306 */
1307 wm_reset(sc);
1308
1309 switch (sc->sc_type) {
1310 case WM_T_82571:
1311 case WM_T_82572:
1312 case WM_T_82573:
1313 case WM_T_82574:
1314 case WM_T_82583:
1315 case WM_T_80003:
1316 case WM_T_ICH8:
1317 case WM_T_ICH9:
1318 case WM_T_ICH10:
1319 case WM_T_PCH:
1320 if (wm_check_mng_mode(sc) != 0)
1321 wm_get_hw_control(sc);
1322 break;
1323 default:
1324 break;
1325 }
1326
1327 /*
1328 * Get some information about the EEPROM.
1329 */
1330 switch (sc->sc_type) {
1331 case WM_T_82542_2_0:
1332 case WM_T_82542_2_1:
1333 case WM_T_82543:
1334 case WM_T_82544:
1335 /* Microwire */
1336 sc->sc_ee_addrbits = 6;
1337 break;
1338 case WM_T_82540:
1339 case WM_T_82545:
1340 case WM_T_82545_3:
1341 case WM_T_82546:
1342 case WM_T_82546_3:
1343 /* Microwire */
1344 reg = CSR_READ(sc, WMREG_EECD);
1345 if (reg & EECD_EE_SIZE)
1346 sc->sc_ee_addrbits = 8;
1347 else
1348 sc->sc_ee_addrbits = 6;
1349 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1350 break;
1351 case WM_T_82541:
1352 case WM_T_82541_2:
1353 case WM_T_82547:
1354 case WM_T_82547_2:
1355 reg = CSR_READ(sc, WMREG_EECD);
1356 if (reg & EECD_EE_TYPE) {
1357 /* SPI */
1358 wm_set_spiaddrsize(sc);
1359 } else
1360 /* Microwire */
1361 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1362 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1363 break;
1364 case WM_T_82571:
1365 case WM_T_82572:
1366 /* SPI */
1367 wm_set_spiaddrsize(sc);
1368 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1369 break;
1370 case WM_T_82573:
1371 case WM_T_82574:
1372 case WM_T_82583:
1373 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1374 sc->sc_flags |= WM_F_EEPROM_FLASH;
1375 else {
1376 /* SPI */
1377 wm_set_spiaddrsize(sc);
1378 }
1379 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1380 break;
1381 case WM_T_80003:
1382 /* SPI */
1383 wm_set_spiaddrsize(sc);
1384 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1385 break;
1386 case WM_T_ICH8:
1387 case WM_T_ICH9:
1388 /* Check whether EEPROM is present or not */
1389 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1390 /* Not found */
1391 aprint_error_dev(sc->sc_dev,
1392 "EEPROM PRESENT bit isn't set\n");
1393 sc->sc_flags |= WM_F_EEPROM_INVALID;
1394 }
1395 /* FALLTHROUGH */
1396 case WM_T_ICH10:
1397 case WM_T_PCH:
1398 /* FLASH */
1399 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1400 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1401 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1402 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1403 aprint_error_dev(sc->sc_dev,
1404 "can't map FLASH registers\n");
1405 return;
1406 }
1407 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1408 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1409 ICH_FLASH_SECTOR_SIZE;
1410 sc->sc_ich8_flash_bank_size =
1411 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1412 sc->sc_ich8_flash_bank_size -=
1413 (reg & ICH_GFPREG_BASE_MASK);
1414 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1415 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1416 break;
1417 default:
1418 break;
1419 }
1420
1421 /*
1422 * Defer printing the EEPROM type until after verifying the checksum
1423 * This allows the EEPROM type to be printed correctly in the case
1424 * that no EEPROM is attached.
1425 */
1426 /*
1427 * Validate the EEPROM checksum. If the checksum fails, flag
1428 * this for later, so we can fail future reads from the EEPROM.
1429 */
1430 if (wm_validate_eeprom_checksum(sc)) {
1431 /*
1432 * Read twice again because some PCI-e parts fail the
1433 * first check due to the link being in sleep state.
1434 */
1435 if (wm_validate_eeprom_checksum(sc))
1436 sc->sc_flags |= WM_F_EEPROM_INVALID;
1437 }
1438
1439 /* Set device properties (macflags) */
1440 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1441
1442 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1443 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1444 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1445 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1446 } else {
1447 if (sc->sc_flags & WM_F_EEPROM_SPI)
1448 eetype = "SPI";
1449 else
1450 eetype = "MicroWire";
1451 aprint_verbose_dev(sc->sc_dev,
1452 "%u word (%d address bits) %s EEPROM\n",
1453 1U << sc->sc_ee_addrbits,
1454 sc->sc_ee_addrbits, eetype);
1455 }
1456
1457 /*
1458 * Read the Ethernet address from the EEPROM, if not first found
1459 * in device properties.
1460 */
1461 ea = prop_dictionary_get(dict, "mac-addr");
1462 if (ea != NULL) {
1463 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1464 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1465 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1466 } else {
1467 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1468 sizeof(myea) / sizeof(myea[0]), myea)) {
1469 aprint_error_dev(sc->sc_dev,
1470 "unable to read Ethernet address\n");
1471 return;
1472 }
1473 enaddr[0] = myea[0] & 0xff;
1474 enaddr[1] = myea[0] >> 8;
1475 enaddr[2] = myea[1] & 0xff;
1476 enaddr[3] = myea[1] >> 8;
1477 enaddr[4] = myea[2] & 0xff;
1478 enaddr[5] = myea[2] >> 8;
1479 }
1480
1481 /*
1482 * Toggle the LSB of the MAC address on the second port
1483 * of the dual port controller.
1484 */
1485 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1486 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1487 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1488 enaddr[5] ^= 1;
1489 }
1490
1491 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1492 ether_sprintf(enaddr));
1493
1494 /*
1495 * Read the config info from the EEPROM, and set up various
1496 * bits in the control registers based on their contents.
1497 */
1498 pn = prop_dictionary_get(dict, "i82543-cfg1");
1499 if (pn != NULL) {
1500 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1501 cfg1 = (uint16_t) prop_number_integer_value(pn);
1502 } else {
1503 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1504 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1505 return;
1506 }
1507 }
1508
1509 pn = prop_dictionary_get(dict, "i82543-cfg2");
1510 if (pn != NULL) {
1511 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1512 cfg2 = (uint16_t) prop_number_integer_value(pn);
1513 } else {
1514 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1515 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1516 return;
1517 }
1518 }
1519
1520 if (sc->sc_type >= WM_T_82544) {
1521 pn = prop_dictionary_get(dict, "i82543-swdpin");
1522 if (pn != NULL) {
1523 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1524 swdpin = (uint16_t) prop_number_integer_value(pn);
1525 } else {
1526 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1527 aprint_error_dev(sc->sc_dev,
1528 "unable to read SWDPIN\n");
1529 return;
1530 }
1531 }
1532 }
1533
1534 if (cfg1 & EEPROM_CFG1_ILOS)
1535 sc->sc_ctrl |= CTRL_ILOS;
1536 if (sc->sc_type >= WM_T_82544) {
1537 sc->sc_ctrl |=
1538 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1539 CTRL_SWDPIO_SHIFT;
1540 sc->sc_ctrl |=
1541 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1542 CTRL_SWDPINS_SHIFT;
1543 } else {
1544 sc->sc_ctrl |=
1545 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1546 CTRL_SWDPIO_SHIFT;
1547 }
1548
1549 #if 0
1550 if (sc->sc_type >= WM_T_82544) {
1551 if (cfg1 & EEPROM_CFG1_IPS0)
1552 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1553 if (cfg1 & EEPROM_CFG1_IPS1)
1554 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1555 sc->sc_ctrl_ext |=
1556 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1557 CTRL_EXT_SWDPIO_SHIFT;
1558 sc->sc_ctrl_ext |=
1559 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1560 CTRL_EXT_SWDPINS_SHIFT;
1561 } else {
1562 sc->sc_ctrl_ext |=
1563 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1564 CTRL_EXT_SWDPIO_SHIFT;
1565 }
1566 #endif
1567
1568 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1569 #if 0
1570 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1571 #endif
1572
1573 /*
1574 * Set up some register offsets that are different between
1575 * the i82542 and the i82543 and later chips.
1576 */
1577 if (sc->sc_type < WM_T_82543) {
1578 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1579 sc->sc_tdt_reg = WMREG_OLD_TDT;
1580 } else {
1581 sc->sc_rdt_reg = WMREG_RDT;
1582 sc->sc_tdt_reg = WMREG_TDT;
1583 }
1584
1585 if (sc->sc_type == WM_T_PCH) {
1586 uint16_t val;
1587
1588 /* Save the NVM K1 bit setting */
1589 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1590
1591 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1592 sc->sc_nvm_k1_enabled = 1;
1593 else
1594 sc->sc_nvm_k1_enabled = 0;
1595 }
1596
1597 /*
1598 * Determine if we're TBI or GMII mode, and initialize the
1599 * media structures accordingly.
1600 */
1601 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1602 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1603 || sc->sc_type == WM_T_82573
1604 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1605 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1606 wm_gmii_mediainit(sc, wmp->wmp_product);
1607 } else if (sc->sc_type < WM_T_82543 ||
1608 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1609 if (wmp->wmp_flags & WMP_F_1000T)
1610 aprint_error_dev(sc->sc_dev,
1611 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1612 wm_tbi_mediainit(sc);
1613 } else {
1614 if (wmp->wmp_flags & WMP_F_1000X)
1615 aprint_error_dev(sc->sc_dev,
1616 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1617 wm_gmii_mediainit(sc, wmp->wmp_product);
1618 }
1619
1620 ifp = &sc->sc_ethercom.ec_if;
1621 xname = device_xname(sc->sc_dev);
1622 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1623 ifp->if_softc = sc;
1624 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1625 ifp->if_ioctl = wm_ioctl;
1626 ifp->if_start = wm_start;
1627 ifp->if_watchdog = wm_watchdog;
1628 ifp->if_init = wm_init;
1629 ifp->if_stop = wm_stop;
1630 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1631 IFQ_SET_READY(&ifp->if_snd);
1632
1633 /* Check for jumbo frame */
1634 switch (sc->sc_type) {
1635 case WM_T_82573:
1636 /* XXX limited to 9234 if ASPM is disabled */
1637 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1638 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1639 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1640 break;
1641 case WM_T_82571:
1642 case WM_T_82572:
1643 case WM_T_82574:
1644 case WM_T_80003:
1645 case WM_T_ICH9:
1646 case WM_T_ICH10:
1647 /* XXX limited to 9234 */
1648 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1649 break;
1650 case WM_T_PCH:
1651 /* XXX limited to 4096 */
1652 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1653 break;
1654 case WM_T_82542_2_0:
1655 case WM_T_82542_2_1:
1656 case WM_T_82583:
1657 case WM_T_ICH8:
1658 /* No support for jumbo frame */
1659 break;
1660 default:
1661 /* ETHER_MAX_LEN_JUMBO */
1662 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1663 break;
1664 }
1665
1666 /*
1667 * If we're a i82543 or greater, we can support VLANs.
1668 */
1669 if (sc->sc_type >= WM_T_82543)
1670 sc->sc_ethercom.ec_capabilities |=
1671 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1672
1673 /*
1674 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1675 * on i82543 and later.
1676 */
1677 if (sc->sc_type >= WM_T_82543) {
1678 ifp->if_capabilities |=
1679 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1680 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1681 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1682 IFCAP_CSUM_TCPv6_Tx |
1683 IFCAP_CSUM_UDPv6_Tx;
1684 }
1685
1686 /*
1687 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1688 *
1689 * 82541GI (8086:1076) ... no
1690 * 82572EI (8086:10b9) ... yes
1691 */
1692 if (sc->sc_type >= WM_T_82571) {
1693 ifp->if_capabilities |=
1694 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1695 }
1696
1697 /*
1698 * If we're a i82544 or greater (except i82547), we can do
1699 * TCP segmentation offload.
1700 */
1701 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1702 ifp->if_capabilities |= IFCAP_TSOv4;
1703 }
1704
1705 if (sc->sc_type >= WM_T_82571) {
1706 ifp->if_capabilities |= IFCAP_TSOv6;
1707 }
1708
1709 /*
1710 * Attach the interface.
1711 */
1712 if_attach(ifp);
1713 ether_ifattach(ifp, enaddr);
1714 #if NRND > 0
1715 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1716 #endif
1717
1718 #ifdef WM_EVENT_COUNTERS
1719 /* Attach event counters. */
1720 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1721 NULL, xname, "txsstall");
1722 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1723 NULL, xname, "txdstall");
1724 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1725 NULL, xname, "txfifo_stall");
1726 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1727 NULL, xname, "txdw");
1728 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1729 NULL, xname, "txqe");
1730 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1731 NULL, xname, "rxintr");
1732 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1733 NULL, xname, "linkintr");
1734
1735 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1736 NULL, xname, "rxipsum");
1737 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1738 NULL, xname, "rxtusum");
1739 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1740 NULL, xname, "txipsum");
1741 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1742 NULL, xname, "txtusum");
1743 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1744 NULL, xname, "txtusum6");
1745
1746 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1747 NULL, xname, "txtso");
1748 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1749 NULL, xname, "txtso6");
1750 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1751 NULL, xname, "txtsopain");
1752
1753 for (i = 0; i < WM_NTXSEGS; i++) {
1754 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1755 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1756 NULL, xname, wm_txseg_evcnt_names[i]);
1757 }
1758
1759 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1760 NULL, xname, "txdrop");
1761
1762 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1763 NULL, xname, "tu");
1764
1765 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1766 NULL, xname, "tx_xoff");
1767 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1768 NULL, xname, "tx_xon");
1769 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1770 NULL, xname, "rx_xoff");
1771 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1772 NULL, xname, "rx_xon");
1773 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1774 NULL, xname, "rx_macctl");
1775 #endif /* WM_EVENT_COUNTERS */
1776
1777 if (pmf_device_register(self, NULL, NULL))
1778 pmf_class_network_register(self, ifp);
1779 else
1780 aprint_error_dev(self, "couldn't establish power handler\n");
1781
1782 return;
1783
1784 /*
1785 * Free any resources we've allocated during the failed attach
1786 * attempt. Do this in reverse order and fall through.
1787 */
1788 fail_5:
1789 for (i = 0; i < WM_NRXDESC; i++) {
1790 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1791 bus_dmamap_destroy(sc->sc_dmat,
1792 sc->sc_rxsoft[i].rxs_dmamap);
1793 }
1794 fail_4:
1795 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1796 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1797 bus_dmamap_destroy(sc->sc_dmat,
1798 sc->sc_txsoft[i].txs_dmamap);
1799 }
1800 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1801 fail_3:
1802 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1803 fail_2:
1804 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1805 cdata_size);
1806 fail_1:
1807 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1808 fail_0:
1809 return;
1810 }
1811
1812 /*
1813 * wm_tx_offload:
1814 *
1815 * Set up TCP/IP checksumming parameters for the
1816 * specified packet.
1817 */
1818 static int
1819 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1820 uint8_t *fieldsp)
1821 {
1822 struct mbuf *m0 = txs->txs_mbuf;
1823 struct livengood_tcpip_ctxdesc *t;
1824 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1825 uint32_t ipcse;
1826 struct ether_header *eh;
1827 int offset, iphl;
1828 uint8_t fields;
1829
1830 /*
1831 * XXX It would be nice if the mbuf pkthdr had offset
1832 * fields for the protocol headers.
1833 */
1834
1835 eh = mtod(m0, struct ether_header *);
1836 switch (htons(eh->ether_type)) {
1837 case ETHERTYPE_IP:
1838 case ETHERTYPE_IPV6:
1839 offset = ETHER_HDR_LEN;
1840 break;
1841
1842 case ETHERTYPE_VLAN:
1843 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1844 break;
1845
1846 default:
1847 /*
1848 * Don't support this protocol or encapsulation.
1849 */
1850 *fieldsp = 0;
1851 *cmdp = 0;
1852 return (0);
1853 }
1854
1855 if ((m0->m_pkthdr.csum_flags &
1856 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1857 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1858 } else {
1859 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1860 }
1861 ipcse = offset + iphl - 1;
1862
1863 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1864 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1865 seg = 0;
1866 fields = 0;
1867
1868 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1869 int hlen = offset + iphl;
1870 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1871
1872 if (__predict_false(m0->m_len <
1873 (hlen + sizeof(struct tcphdr)))) {
1874 /*
1875 * TCP/IP headers are not in the first mbuf; we need
1876 * to do this the slow and painful way. Let's just
1877 * hope this doesn't happen very often.
1878 */
1879 struct tcphdr th;
1880
1881 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1882
1883 m_copydata(m0, hlen, sizeof(th), &th);
1884 if (v4) {
1885 struct ip ip;
1886
1887 m_copydata(m0, offset, sizeof(ip), &ip);
1888 ip.ip_len = 0;
1889 m_copyback(m0,
1890 offset + offsetof(struct ip, ip_len),
1891 sizeof(ip.ip_len), &ip.ip_len);
1892 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1893 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1894 } else {
1895 struct ip6_hdr ip6;
1896
1897 m_copydata(m0, offset, sizeof(ip6), &ip6);
1898 ip6.ip6_plen = 0;
1899 m_copyback(m0,
1900 offset + offsetof(struct ip6_hdr, ip6_plen),
1901 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1902 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1903 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1904 }
1905 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1906 sizeof(th.th_sum), &th.th_sum);
1907
1908 hlen += th.th_off << 2;
1909 } else {
1910 /*
1911 * TCP/IP headers are in the first mbuf; we can do
1912 * this the easy way.
1913 */
1914 struct tcphdr *th;
1915
1916 if (v4) {
1917 struct ip *ip =
1918 (void *)(mtod(m0, char *) + offset);
1919 th = (void *)(mtod(m0, char *) + hlen);
1920
1921 ip->ip_len = 0;
1922 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1923 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1924 } else {
1925 struct ip6_hdr *ip6 =
1926 (void *)(mtod(m0, char *) + offset);
1927 th = (void *)(mtod(m0, char *) + hlen);
1928
1929 ip6->ip6_plen = 0;
1930 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1931 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1932 }
1933 hlen += th->th_off << 2;
1934 }
1935
1936 if (v4) {
1937 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1938 cmdlen |= WTX_TCPIP_CMD_IP;
1939 } else {
1940 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1941 ipcse = 0;
1942 }
1943 cmd |= WTX_TCPIP_CMD_TSE;
1944 cmdlen |= WTX_TCPIP_CMD_TSE |
1945 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1946 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1947 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1948 }
1949
1950 /*
1951 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1952 * offload feature, if we load the context descriptor, we
1953 * MUST provide valid values for IPCSS and TUCSS fields.
1954 */
1955
1956 ipcs = WTX_TCPIP_IPCSS(offset) |
1957 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1958 WTX_TCPIP_IPCSE(ipcse);
1959 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1960 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1961 fields |= WTX_IXSM;
1962 }
1963
1964 offset += iphl;
1965
1966 if (m0->m_pkthdr.csum_flags &
1967 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1968 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1969 fields |= WTX_TXSM;
1970 tucs = WTX_TCPIP_TUCSS(offset) |
1971 WTX_TCPIP_TUCSO(offset +
1972 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1973 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1974 } else if ((m0->m_pkthdr.csum_flags &
1975 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1976 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1977 fields |= WTX_TXSM;
1978 tucs = WTX_TCPIP_TUCSS(offset) |
1979 WTX_TCPIP_TUCSO(offset +
1980 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1981 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1982 } else {
1983 /* Just initialize it to a valid TCP context. */
1984 tucs = WTX_TCPIP_TUCSS(offset) |
1985 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1986 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1987 }
1988
1989 /* Fill in the context descriptor. */
1990 t = (struct livengood_tcpip_ctxdesc *)
1991 &sc->sc_txdescs[sc->sc_txnext];
1992 t->tcpip_ipcs = htole32(ipcs);
1993 t->tcpip_tucs = htole32(tucs);
1994 t->tcpip_cmdlen = htole32(cmdlen);
1995 t->tcpip_seg = htole32(seg);
1996 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1997
1998 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1999 txs->txs_ndesc++;
2000
2001 *cmdp = cmd;
2002 *fieldsp = fields;
2003
2004 return (0);
2005 }
2006
2007 static void
2008 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2009 {
2010 struct mbuf *m;
2011 int i;
2012
2013 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2014 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2015 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2016 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2017 m->m_data, m->m_len, m->m_flags);
2018 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2019 i, i == 1 ? "" : "s");
2020 }
2021
2022 /*
2023 * wm_82547_txfifo_stall:
2024 *
2025 * Callout used to wait for the 82547 Tx FIFO to drain,
2026 * reset the FIFO pointers, and restart packet transmission.
2027 */
2028 static void
2029 wm_82547_txfifo_stall(void *arg)
2030 {
2031 struct wm_softc *sc = arg;
2032 int s;
2033
2034 s = splnet();
2035
2036 if (sc->sc_txfifo_stall) {
2037 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2038 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2039 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2040 /*
2041 * Packets have drained. Stop transmitter, reset
2042 * FIFO pointers, restart transmitter, and kick
2043 * the packet queue.
2044 */
2045 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2046 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2047 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2048 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2049 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2050 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2051 CSR_WRITE(sc, WMREG_TCTL, tctl);
2052 CSR_WRITE_FLUSH(sc);
2053
2054 sc->sc_txfifo_head = 0;
2055 sc->sc_txfifo_stall = 0;
2056 wm_start(&sc->sc_ethercom.ec_if);
2057 } else {
2058 /*
2059 * Still waiting for packets to drain; try again in
2060 * another tick.
2061 */
2062 callout_schedule(&sc->sc_txfifo_ch, 1);
2063 }
2064 }
2065
2066 splx(s);
2067 }
2068
2069 /*
2070 * wm_82547_txfifo_bugchk:
2071 *
2072 * Check for bug condition in the 82547 Tx FIFO. We need to
2073 * prevent enqueueing a packet that would wrap around the end
2074 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2075 *
2076 * We do this by checking the amount of space before the end
2077 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2078 * the Tx FIFO, wait for all remaining packets to drain, reset
2079 * the internal FIFO pointers to the beginning, and restart
2080 * transmission on the interface.
2081 */
2082 #define WM_FIFO_HDR 0x10
2083 #define WM_82547_PAD_LEN 0x3e0
2084 static int
2085 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2086 {
2087 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2088 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2089
2090 /* Just return if already stalled. */
2091 if (sc->sc_txfifo_stall)
2092 return (1);
2093
2094 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2095 /* Stall only occurs in half-duplex mode. */
2096 goto send_packet;
2097 }
2098
2099 if (len >= WM_82547_PAD_LEN + space) {
2100 sc->sc_txfifo_stall = 1;
2101 callout_schedule(&sc->sc_txfifo_ch, 1);
2102 return (1);
2103 }
2104
2105 send_packet:
2106 sc->sc_txfifo_head += len;
2107 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2108 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2109
2110 return (0);
2111 }
2112
2113 /*
2114 * wm_start: [ifnet interface function]
2115 *
2116 * Start packet transmission on the interface.
2117 */
2118 static void
2119 wm_start(struct ifnet *ifp)
2120 {
2121 struct wm_softc *sc = ifp->if_softc;
2122 struct mbuf *m0;
2123 struct m_tag *mtag;
2124 struct wm_txsoft *txs;
2125 bus_dmamap_t dmamap;
2126 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2127 bus_addr_t curaddr;
2128 bus_size_t seglen, curlen;
2129 uint32_t cksumcmd;
2130 uint8_t cksumfields;
2131
2132 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2133 return;
2134
2135 /*
2136 * Remember the previous number of free descriptors.
2137 */
2138 ofree = sc->sc_txfree;
2139
2140 /*
2141 * Loop through the send queue, setting up transmit descriptors
2142 * until we drain the queue, or use up all available transmit
2143 * descriptors.
2144 */
2145 for (;;) {
2146 /* Grab a packet off the queue. */
2147 IFQ_POLL(&ifp->if_snd, m0);
2148 if (m0 == NULL)
2149 break;
2150
2151 DPRINTF(WM_DEBUG_TX,
2152 ("%s: TX: have packet to transmit: %p\n",
2153 device_xname(sc->sc_dev), m0));
2154
2155 /* Get a work queue entry. */
2156 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2157 wm_txintr(sc);
2158 if (sc->sc_txsfree == 0) {
2159 DPRINTF(WM_DEBUG_TX,
2160 ("%s: TX: no free job descriptors\n",
2161 device_xname(sc->sc_dev)));
2162 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2163 break;
2164 }
2165 }
2166
2167 txs = &sc->sc_txsoft[sc->sc_txsnext];
2168 dmamap = txs->txs_dmamap;
2169
2170 use_tso = (m0->m_pkthdr.csum_flags &
2171 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2172
2173 /*
2174 * So says the Linux driver:
2175 * The controller does a simple calculation to make sure
2176 * there is enough room in the FIFO before initiating the
2177 * DMA for each buffer. The calc is:
2178 * 4 = ceil(buffer len / MSS)
2179 * To make sure we don't overrun the FIFO, adjust the max
2180 * buffer len if the MSS drops.
2181 */
2182 dmamap->dm_maxsegsz =
2183 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2184 ? m0->m_pkthdr.segsz << 2
2185 : WTX_MAX_LEN;
2186
2187 /*
2188 * Load the DMA map. If this fails, the packet either
2189 * didn't fit in the allotted number of segments, or we
2190 * were short on resources. For the too-many-segments
2191 * case, we simply report an error and drop the packet,
2192 * since we can't sanely copy a jumbo packet to a single
2193 * buffer.
2194 */
2195 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2196 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2197 if (error) {
2198 if (error == EFBIG) {
2199 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2200 log(LOG_ERR, "%s: Tx packet consumes too many "
2201 "DMA segments, dropping...\n",
2202 device_xname(sc->sc_dev));
2203 IFQ_DEQUEUE(&ifp->if_snd, m0);
2204 wm_dump_mbuf_chain(sc, m0);
2205 m_freem(m0);
2206 continue;
2207 }
2208 /*
2209 * Short on resources, just stop for now.
2210 */
2211 DPRINTF(WM_DEBUG_TX,
2212 ("%s: TX: dmamap load failed: %d\n",
2213 device_xname(sc->sc_dev), error));
2214 break;
2215 }
2216
2217 segs_needed = dmamap->dm_nsegs;
2218 if (use_tso) {
2219 /* For sentinel descriptor; see below. */
2220 segs_needed++;
2221 }
2222
2223 /*
2224 * Ensure we have enough descriptors free to describe
2225 * the packet. Note, we always reserve one descriptor
2226 * at the end of the ring due to the semantics of the
2227 * TDT register, plus one more in the event we need
2228 * to load offload context.
2229 */
2230 if (segs_needed > sc->sc_txfree - 2) {
2231 /*
2232 * Not enough free descriptors to transmit this
2233 * packet. We haven't committed anything yet,
2234 * so just unload the DMA map, put the packet
2235 * pack on the queue, and punt. Notify the upper
2236 * layer that there are no more slots left.
2237 */
2238 DPRINTF(WM_DEBUG_TX,
2239 ("%s: TX: need %d (%d) descriptors, have %d\n",
2240 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2241 segs_needed, sc->sc_txfree - 1));
2242 ifp->if_flags |= IFF_OACTIVE;
2243 bus_dmamap_unload(sc->sc_dmat, dmamap);
2244 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2245 break;
2246 }
2247
2248 /*
2249 * Check for 82547 Tx FIFO bug. We need to do this
2250 * once we know we can transmit the packet, since we
2251 * do some internal FIFO space accounting here.
2252 */
2253 if (sc->sc_type == WM_T_82547 &&
2254 wm_82547_txfifo_bugchk(sc, m0)) {
2255 DPRINTF(WM_DEBUG_TX,
2256 ("%s: TX: 82547 Tx FIFO bug detected\n",
2257 device_xname(sc->sc_dev)));
2258 ifp->if_flags |= IFF_OACTIVE;
2259 bus_dmamap_unload(sc->sc_dmat, dmamap);
2260 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2261 break;
2262 }
2263
2264 IFQ_DEQUEUE(&ifp->if_snd, m0);
2265
2266 /*
2267 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2268 */
2269
2270 DPRINTF(WM_DEBUG_TX,
2271 ("%s: TX: packet has %d (%d) DMA segments\n",
2272 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2273
2274 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2275
2276 /*
2277 * Store a pointer to the packet so that we can free it
2278 * later.
2279 *
2280 * Initially, we consider the number of descriptors the
2281 * packet uses the number of DMA segments. This may be
2282 * incremented by 1 if we do checksum offload (a descriptor
2283 * is used to set the checksum context).
2284 */
2285 txs->txs_mbuf = m0;
2286 txs->txs_firstdesc = sc->sc_txnext;
2287 txs->txs_ndesc = segs_needed;
2288
2289 /* Set up offload parameters for this packet. */
2290 if (m0->m_pkthdr.csum_flags &
2291 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2292 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2293 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2294 if (wm_tx_offload(sc, txs, &cksumcmd,
2295 &cksumfields) != 0) {
2296 /* Error message already displayed. */
2297 bus_dmamap_unload(sc->sc_dmat, dmamap);
2298 continue;
2299 }
2300 } else {
2301 cksumcmd = 0;
2302 cksumfields = 0;
2303 }
2304
2305 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2306
2307 /* Sync the DMA map. */
2308 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2309 BUS_DMASYNC_PREWRITE);
2310
2311 /*
2312 * Initialize the transmit descriptor.
2313 */
2314 for (nexttx = sc->sc_txnext, seg = 0;
2315 seg < dmamap->dm_nsegs; seg++) {
2316 for (seglen = dmamap->dm_segs[seg].ds_len,
2317 curaddr = dmamap->dm_segs[seg].ds_addr;
2318 seglen != 0;
2319 curaddr += curlen, seglen -= curlen,
2320 nexttx = WM_NEXTTX(sc, nexttx)) {
2321 curlen = seglen;
2322
2323 /*
2324 * So says the Linux driver:
2325 * Work around for premature descriptor
2326 * write-backs in TSO mode. Append a
2327 * 4-byte sentinel descriptor.
2328 */
2329 if (use_tso &&
2330 seg == dmamap->dm_nsegs - 1 &&
2331 curlen > 8)
2332 curlen -= 4;
2333
2334 wm_set_dma_addr(
2335 &sc->sc_txdescs[nexttx].wtx_addr,
2336 curaddr);
2337 sc->sc_txdescs[nexttx].wtx_cmdlen =
2338 htole32(cksumcmd | curlen);
2339 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2340 0;
2341 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2342 cksumfields;
2343 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2344 lasttx = nexttx;
2345
2346 DPRINTF(WM_DEBUG_TX,
2347 ("%s: TX: desc %d: low 0x%08lx, "
2348 "len 0x%04x\n",
2349 device_xname(sc->sc_dev), nexttx,
2350 curaddr & 0xffffffffUL, (unsigned)curlen));
2351 }
2352 }
2353
2354 KASSERT(lasttx != -1);
2355
2356 /*
2357 * Set up the command byte on the last descriptor of
2358 * the packet. If we're in the interrupt delay window,
2359 * delay the interrupt.
2360 */
2361 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2362 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2363
2364 /*
2365 * If VLANs are enabled and the packet has a VLAN tag, set
2366 * up the descriptor to encapsulate the packet for us.
2367 *
2368 * This is only valid on the last descriptor of the packet.
2369 */
2370 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2371 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2372 htole32(WTX_CMD_VLE);
2373 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2374 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2375 }
2376
2377 txs->txs_lastdesc = lasttx;
2378
2379 DPRINTF(WM_DEBUG_TX,
2380 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2381 device_xname(sc->sc_dev),
2382 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2383
2384 /* Sync the descriptors we're using. */
2385 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2386 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2387
2388 /* Give the packet to the chip. */
2389 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2390
2391 DPRINTF(WM_DEBUG_TX,
2392 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2393
2394 DPRINTF(WM_DEBUG_TX,
2395 ("%s: TX: finished transmitting packet, job %d\n",
2396 device_xname(sc->sc_dev), sc->sc_txsnext));
2397
2398 /* Advance the tx pointer. */
2399 sc->sc_txfree -= txs->txs_ndesc;
2400 sc->sc_txnext = nexttx;
2401
2402 sc->sc_txsfree--;
2403 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2404
2405 #if NBPFILTER > 0
2406 /* Pass the packet to any BPF listeners. */
2407 if (ifp->if_bpf)
2408 bpf_mtap(ifp->if_bpf, m0);
2409 #endif /* NBPFILTER > 0 */
2410 }
2411
2412 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2413 /* No more slots; notify upper layer. */
2414 ifp->if_flags |= IFF_OACTIVE;
2415 }
2416
2417 if (sc->sc_txfree != ofree) {
2418 /* Set a watchdog timer in case the chip flakes out. */
2419 ifp->if_timer = 5;
2420 }
2421 }
2422
2423 /*
2424 * wm_watchdog: [ifnet interface function]
2425 *
2426 * Watchdog timer handler.
2427 */
2428 static void
2429 wm_watchdog(struct ifnet *ifp)
2430 {
2431 struct wm_softc *sc = ifp->if_softc;
2432
2433 /*
2434 * Since we're using delayed interrupts, sweep up
2435 * before we report an error.
2436 */
2437 wm_txintr(sc);
2438
2439 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2440 log(LOG_ERR,
2441 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2442 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2443 sc->sc_txnext);
2444 ifp->if_oerrors++;
2445
2446 /* Reset the interface. */
2447 (void) wm_init(ifp);
2448 }
2449
2450 /* Try to get more packets going. */
2451 wm_start(ifp);
2452 }
2453
2454 /*
2455 * wm_ioctl: [ifnet interface function]
2456 *
2457 * Handle control requests from the operator.
2458 */
2459 static int
2460 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2461 {
2462 struct wm_softc *sc = ifp->if_softc;
2463 struct ifreq *ifr = (struct ifreq *) data;
2464 struct ifaddr *ifa = (struct ifaddr *)data;
2465 struct sockaddr_dl *sdl;
2466 int diff, s, error;
2467
2468 s = splnet();
2469
2470 switch (cmd) {
2471 case SIOCSIFFLAGS:
2472 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2473 break;
2474 if (ifp->if_flags & IFF_UP) {
2475 diff = (ifp->if_flags ^ sc->sc_if_flags)
2476 & (IFF_PROMISC | IFF_ALLMULTI);
2477 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2478 /*
2479 * If the difference bettween last flag and
2480 * new flag is only IFF_PROMISC or
2481 * IFF_ALLMULTI, set multicast filter only
2482 * (don't reset to prevent link down).
2483 */
2484 wm_set_filter(sc);
2485 } else {
2486 /*
2487 * Reset the interface to pick up changes in
2488 * any other flags that affect the hardware
2489 * state.
2490 */
2491 wm_init(ifp);
2492 }
2493 } else {
2494 if (ifp->if_flags & IFF_RUNNING)
2495 wm_stop(ifp, 1);
2496 }
2497 sc->sc_if_flags = ifp->if_flags;
2498 error = 0;
2499 break;
2500 case SIOCSIFMEDIA:
2501 case SIOCGIFMEDIA:
2502 /* Flow control requires full-duplex mode. */
2503 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2504 (ifr->ifr_media & IFM_FDX) == 0)
2505 ifr->ifr_media &= ~IFM_ETH_FMASK;
2506 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2507 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2508 /* We can do both TXPAUSE and RXPAUSE. */
2509 ifr->ifr_media |=
2510 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2511 }
2512 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2513 }
2514 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2515 break;
2516 case SIOCINITIFADDR:
2517 if (ifa->ifa_addr->sa_family == AF_LINK) {
2518 sdl = satosdl(ifp->if_dl->ifa_addr);
2519 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2520 LLADDR(satosdl(ifa->ifa_addr)),
2521 ifp->if_addrlen);
2522 /* unicast address is first multicast entry */
2523 wm_set_filter(sc);
2524 error = 0;
2525 break;
2526 }
2527 /* Fall through for rest */
2528 default:
2529 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2530 break;
2531
2532 error = 0;
2533
2534 if (cmd == SIOCSIFCAP)
2535 error = (*ifp->if_init)(ifp);
2536 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2537 ;
2538 else if (ifp->if_flags & IFF_RUNNING) {
2539 /*
2540 * Multicast list has changed; set the hardware filter
2541 * accordingly.
2542 */
2543 wm_set_filter(sc);
2544 }
2545 break;
2546 }
2547
2548 /* Try to get more packets going. */
2549 wm_start(ifp);
2550
2551 splx(s);
2552 return (error);
2553 }
2554
2555 /*
2556 * wm_intr:
2557 *
2558 * Interrupt service routine.
2559 */
2560 static int
2561 wm_intr(void *arg)
2562 {
2563 struct wm_softc *sc = arg;
2564 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2565 uint32_t icr;
2566 int handled = 0;
2567
2568 while (1 /* CONSTCOND */) {
2569 icr = CSR_READ(sc, WMREG_ICR);
2570 if ((icr & sc->sc_icr) == 0)
2571 break;
2572 #if 0 /*NRND > 0*/
2573 if (RND_ENABLED(&sc->rnd_source))
2574 rnd_add_uint32(&sc->rnd_source, icr);
2575 #endif
2576
2577 handled = 1;
2578
2579 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2580 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2581 DPRINTF(WM_DEBUG_RX,
2582 ("%s: RX: got Rx intr 0x%08x\n",
2583 device_xname(sc->sc_dev),
2584 icr & (ICR_RXDMT0|ICR_RXT0)));
2585 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2586 }
2587 #endif
2588 wm_rxintr(sc);
2589
2590 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2591 if (icr & ICR_TXDW) {
2592 DPRINTF(WM_DEBUG_TX,
2593 ("%s: TX: got TXDW interrupt\n",
2594 device_xname(sc->sc_dev)));
2595 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2596 }
2597 #endif
2598 wm_txintr(sc);
2599
2600 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2601 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2602 wm_linkintr(sc, icr);
2603 }
2604
2605 if (icr & ICR_RXO) {
2606 ifp->if_ierrors++;
2607 #if defined(WM_DEBUG)
2608 log(LOG_WARNING, "%s: Receive overrun\n",
2609 device_xname(sc->sc_dev));
2610 #endif /* defined(WM_DEBUG) */
2611 }
2612 }
2613
2614 if (handled) {
2615 /* Try to get more packets going. */
2616 wm_start(ifp);
2617 }
2618
2619 return (handled);
2620 }
2621
2622 /*
2623 * wm_txintr:
2624 *
2625 * Helper; handle transmit interrupts.
2626 */
2627 static void
2628 wm_txintr(struct wm_softc *sc)
2629 {
2630 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2631 struct wm_txsoft *txs;
2632 uint8_t status;
2633 int i;
2634
2635 ifp->if_flags &= ~IFF_OACTIVE;
2636
2637 /*
2638 * Go through the Tx list and free mbufs for those
2639 * frames which have been transmitted.
2640 */
2641 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2642 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2643 txs = &sc->sc_txsoft[i];
2644
2645 DPRINTF(WM_DEBUG_TX,
2646 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2647
2648 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2649 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2650
2651 status =
2652 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2653 if ((status & WTX_ST_DD) == 0) {
2654 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2655 BUS_DMASYNC_PREREAD);
2656 break;
2657 }
2658
2659 DPRINTF(WM_DEBUG_TX,
2660 ("%s: TX: job %d done: descs %d..%d\n",
2661 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2662 txs->txs_lastdesc));
2663
2664 /*
2665 * XXX We should probably be using the statistics
2666 * XXX registers, but I don't know if they exist
2667 * XXX on chips before the i82544.
2668 */
2669
2670 #ifdef WM_EVENT_COUNTERS
2671 if (status & WTX_ST_TU)
2672 WM_EVCNT_INCR(&sc->sc_ev_tu);
2673 #endif /* WM_EVENT_COUNTERS */
2674
2675 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2676 ifp->if_oerrors++;
2677 if (status & WTX_ST_LC)
2678 log(LOG_WARNING, "%s: late collision\n",
2679 device_xname(sc->sc_dev));
2680 else if (status & WTX_ST_EC) {
2681 ifp->if_collisions += 16;
2682 log(LOG_WARNING, "%s: excessive collisions\n",
2683 device_xname(sc->sc_dev));
2684 }
2685 } else
2686 ifp->if_opackets++;
2687
2688 sc->sc_txfree += txs->txs_ndesc;
2689 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2690 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2691 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2692 m_freem(txs->txs_mbuf);
2693 txs->txs_mbuf = NULL;
2694 }
2695
2696 /* Update the dirty transmit buffer pointer. */
2697 sc->sc_txsdirty = i;
2698 DPRINTF(WM_DEBUG_TX,
2699 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2700
2701 /*
2702 * If there are no more pending transmissions, cancel the watchdog
2703 * timer.
2704 */
2705 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2706 ifp->if_timer = 0;
2707 }
2708
2709 /*
2710 * wm_rxintr:
2711 *
2712 * Helper; handle receive interrupts.
2713 */
2714 static void
2715 wm_rxintr(struct wm_softc *sc)
2716 {
2717 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2718 struct wm_rxsoft *rxs;
2719 struct mbuf *m;
2720 int i, len;
2721 uint8_t status, errors;
2722 uint16_t vlantag;
2723
2724 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2725 rxs = &sc->sc_rxsoft[i];
2726
2727 DPRINTF(WM_DEBUG_RX,
2728 ("%s: RX: checking descriptor %d\n",
2729 device_xname(sc->sc_dev), i));
2730
2731 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2732
2733 status = sc->sc_rxdescs[i].wrx_status;
2734 errors = sc->sc_rxdescs[i].wrx_errors;
2735 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2736 vlantag = sc->sc_rxdescs[i].wrx_special;
2737
2738 if ((status & WRX_ST_DD) == 0) {
2739 /*
2740 * We have processed all of the receive descriptors.
2741 */
2742 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2743 break;
2744 }
2745
2746 if (__predict_false(sc->sc_rxdiscard)) {
2747 DPRINTF(WM_DEBUG_RX,
2748 ("%s: RX: discarding contents of descriptor %d\n",
2749 device_xname(sc->sc_dev), i));
2750 WM_INIT_RXDESC(sc, i);
2751 if (status & WRX_ST_EOP) {
2752 /* Reset our state. */
2753 DPRINTF(WM_DEBUG_RX,
2754 ("%s: RX: resetting rxdiscard -> 0\n",
2755 device_xname(sc->sc_dev)));
2756 sc->sc_rxdiscard = 0;
2757 }
2758 continue;
2759 }
2760
2761 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2762 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2763
2764 m = rxs->rxs_mbuf;
2765
2766 /*
2767 * Add a new receive buffer to the ring, unless of
2768 * course the length is zero. Treat the latter as a
2769 * failed mapping.
2770 */
2771 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2772 /*
2773 * Failed, throw away what we've done so
2774 * far, and discard the rest of the packet.
2775 */
2776 ifp->if_ierrors++;
2777 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2778 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2779 WM_INIT_RXDESC(sc, i);
2780 if ((status & WRX_ST_EOP) == 0)
2781 sc->sc_rxdiscard = 1;
2782 if (sc->sc_rxhead != NULL)
2783 m_freem(sc->sc_rxhead);
2784 WM_RXCHAIN_RESET(sc);
2785 DPRINTF(WM_DEBUG_RX,
2786 ("%s: RX: Rx buffer allocation failed, "
2787 "dropping packet%s\n", device_xname(sc->sc_dev),
2788 sc->sc_rxdiscard ? " (discard)" : ""));
2789 continue;
2790 }
2791
2792 m->m_len = len;
2793 sc->sc_rxlen += len;
2794 DPRINTF(WM_DEBUG_RX,
2795 ("%s: RX: buffer at %p len %d\n",
2796 device_xname(sc->sc_dev), m->m_data, len));
2797
2798 /*
2799 * If this is not the end of the packet, keep
2800 * looking.
2801 */
2802 if ((status & WRX_ST_EOP) == 0) {
2803 WM_RXCHAIN_LINK(sc, m);
2804 DPRINTF(WM_DEBUG_RX,
2805 ("%s: RX: not yet EOP, rxlen -> %d\n",
2806 device_xname(sc->sc_dev), sc->sc_rxlen));
2807 continue;
2808 }
2809
2810 /*
2811 * Okay, we have the entire packet now. The chip is
2812 * configured to include the FCS (not all chips can
2813 * be configured to strip it), so we need to trim it.
2814 * May need to adjust length of previous mbuf in the
2815 * chain if the current mbuf is too short.
2816 */
2817 if (m->m_len < ETHER_CRC_LEN) {
2818 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2819 m->m_len = 0;
2820 } else {
2821 m->m_len -= ETHER_CRC_LEN;
2822 }
2823 len = sc->sc_rxlen - ETHER_CRC_LEN;
2824
2825 WM_RXCHAIN_LINK(sc, m);
2826
2827 *sc->sc_rxtailp = NULL;
2828 m = sc->sc_rxhead;
2829
2830 WM_RXCHAIN_RESET(sc);
2831
2832 DPRINTF(WM_DEBUG_RX,
2833 ("%s: RX: have entire packet, len -> %d\n",
2834 device_xname(sc->sc_dev), len));
2835
2836 /*
2837 * If an error occurred, update stats and drop the packet.
2838 */
2839 if (errors &
2840 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2841 ifp->if_ierrors++;
2842 if (errors & WRX_ER_SE)
2843 log(LOG_WARNING, "%s: symbol error\n",
2844 device_xname(sc->sc_dev));
2845 else if (errors & WRX_ER_SEQ)
2846 log(LOG_WARNING, "%s: receive sequence error\n",
2847 device_xname(sc->sc_dev));
2848 else if (errors & WRX_ER_CE)
2849 log(LOG_WARNING, "%s: CRC error\n",
2850 device_xname(sc->sc_dev));
2851 m_freem(m);
2852 continue;
2853 }
2854
2855 /*
2856 * No errors. Receive the packet.
2857 */
2858 m->m_pkthdr.rcvif = ifp;
2859 m->m_pkthdr.len = len;
2860
2861 /*
2862 * If VLANs are enabled, VLAN packets have been unwrapped
2863 * for us. Associate the tag with the packet.
2864 */
2865 if ((status & WRX_ST_VP) != 0) {
2866 VLAN_INPUT_TAG(ifp, m,
2867 le16toh(vlantag),
2868 continue);
2869 }
2870
2871 /*
2872 * Set up checksum info for this packet.
2873 */
2874 if ((status & WRX_ST_IXSM) == 0) {
2875 if (status & WRX_ST_IPCS) {
2876 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2877 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2878 if (errors & WRX_ER_IPE)
2879 m->m_pkthdr.csum_flags |=
2880 M_CSUM_IPv4_BAD;
2881 }
2882 if (status & WRX_ST_TCPCS) {
2883 /*
2884 * Note: we don't know if this was TCP or UDP,
2885 * so we just set both bits, and expect the
2886 * upper layers to deal.
2887 */
2888 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2889 m->m_pkthdr.csum_flags |=
2890 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2891 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2892 if (errors & WRX_ER_TCPE)
2893 m->m_pkthdr.csum_flags |=
2894 M_CSUM_TCP_UDP_BAD;
2895 }
2896 }
2897
2898 ifp->if_ipackets++;
2899
2900 #if NBPFILTER > 0
2901 /* Pass this up to any BPF listeners. */
2902 if (ifp->if_bpf)
2903 bpf_mtap(ifp->if_bpf, m);
2904 #endif /* NBPFILTER > 0 */
2905
2906 /* Pass it on. */
2907 (*ifp->if_input)(ifp, m);
2908 }
2909
2910 /* Update the receive pointer. */
2911 sc->sc_rxptr = i;
2912
2913 DPRINTF(WM_DEBUG_RX,
2914 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2915 }
2916
2917 /*
2918 * wm_linkintr_gmii:
2919 *
2920 * Helper; handle link interrupts for GMII.
2921 */
2922 static void
2923 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
2924 {
2925
2926 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2927 __func__));
2928
2929 if (icr & ICR_LSC) {
2930 DPRINTF(WM_DEBUG_LINK,
2931 ("%s: LINK: LSC -> mii_tick\n",
2932 device_xname(sc->sc_dev)));
2933 mii_tick(&sc->sc_mii);
2934 if (sc->sc_type == WM_T_82543) {
2935 int miistatus, active;
2936
2937 /*
2938 * With 82543, we need to force speed and
2939 * duplex on the MAC equal to what the PHY
2940 * speed and duplex configuration is.
2941 */
2942 miistatus = sc->sc_mii.mii_media_status;
2943
2944 if (miistatus & IFM_ACTIVE) {
2945 active = sc->sc_mii.mii_media_active;
2946 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
2947 switch (IFM_SUBTYPE(active)) {
2948 case IFM_10_T:
2949 sc->sc_ctrl |= CTRL_SPEED_10;
2950 break;
2951 case IFM_100_TX:
2952 sc->sc_ctrl |= CTRL_SPEED_100;
2953 break;
2954 case IFM_1000_T:
2955 sc->sc_ctrl |= CTRL_SPEED_1000;
2956 break;
2957 default:
2958 /*
2959 * fiber?
2960 * Shoud not enter here.
2961 */
2962 printf("unknown media (%x)\n",
2963 active);
2964 break;
2965 }
2966 if (active & IFM_FDX)
2967 sc->sc_ctrl |= CTRL_FD;
2968 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2969 }
2970 } else if (sc->sc_type == WM_T_PCH) {
2971 wm_k1_gig_workaround_hv(sc,
2972 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
2973 }
2974
2975 if ((sc->sc_phytype == WMPHY_82578)
2976 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
2977 == IFM_1000_T)) {
2978
2979 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
2980 printf("XXX link sall wa\n");
2981 delay(200*1000); /* XXX too big */
2982
2983 /* Link stall fix for link up */
2984 wm_gmii_hv_writereg(sc->sc_dev, 1,
2985 HV_MUX_DATA_CTRL,
2986 HV_MUX_DATA_CTRL_GEN_TO_MAC
2987 | HV_MUX_DATA_CTRL_FORCE_SPEED);
2988 wm_gmii_hv_writereg(sc->sc_dev, 1,
2989 HV_MUX_DATA_CTRL,
2990 HV_MUX_DATA_CTRL_GEN_TO_MAC);
2991 }
2992 }
2993 } else if (icr & ICR_RXSEQ) {
2994 DPRINTF(WM_DEBUG_LINK,
2995 ("%s: LINK Receive sequence error\n",
2996 device_xname(sc->sc_dev)));
2997 }
2998 }
2999
3000 /*
3001 * wm_linkintr_tbi:
3002 *
3003 * Helper; handle link interrupts for TBI mode.
3004 */
3005 static void
3006 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3007 {
3008 uint32_t status;
3009
3010 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3011 __func__));
3012
3013 status = CSR_READ(sc, WMREG_STATUS);
3014 if (icr & ICR_LSC) {
3015 if (status & STATUS_LU) {
3016 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3017 device_xname(sc->sc_dev),
3018 (status & STATUS_FD) ? "FDX" : "HDX"));
3019 /*
3020 * NOTE: CTRL will update TFCE and RFCE automatically,
3021 * so we should update sc->sc_ctrl
3022 */
3023
3024 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3025 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3026 sc->sc_fcrtl &= ~FCRTL_XONE;
3027 if (status & STATUS_FD)
3028 sc->sc_tctl |=
3029 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3030 else
3031 sc->sc_tctl |=
3032 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3033 if (sc->sc_ctrl & CTRL_TFCE)
3034 sc->sc_fcrtl |= FCRTL_XONE;
3035 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3036 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3037 WMREG_OLD_FCRTL : WMREG_FCRTL,
3038 sc->sc_fcrtl);
3039 sc->sc_tbi_linkup = 1;
3040 } else {
3041 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3042 device_xname(sc->sc_dev)));
3043 sc->sc_tbi_linkup = 0;
3044 }
3045 wm_tbi_set_linkled(sc);
3046 } else if (icr & ICR_RXCFG) {
3047 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3048 device_xname(sc->sc_dev)));
3049 sc->sc_tbi_nrxcfg++;
3050 wm_check_for_link(sc);
3051 } else if (icr & ICR_RXSEQ) {
3052 DPRINTF(WM_DEBUG_LINK,
3053 ("%s: LINK: Receive sequence error\n",
3054 device_xname(sc->sc_dev)));
3055 }
3056 }
3057
3058 /*
3059 * wm_linkintr:
3060 *
3061 * Helper; handle link interrupts.
3062 */
3063 static void
3064 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3065 {
3066
3067 if (sc->sc_flags & WM_F_HAS_MII)
3068 wm_linkintr_gmii(sc, icr);
3069 else
3070 wm_linkintr_tbi(sc, icr);
3071 }
3072
3073 /*
3074 * wm_tick:
3075 *
3076 * One second timer, used to check link status, sweep up
3077 * completed transmit jobs, etc.
3078 */
3079 static void
3080 wm_tick(void *arg)
3081 {
3082 struct wm_softc *sc = arg;
3083 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3084 int s;
3085
3086 s = splnet();
3087
3088 if (sc->sc_type >= WM_T_82542_2_1) {
3089 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3090 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3091 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3092 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3093 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3094 }
3095
3096 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3097 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3098
3099 if (sc->sc_flags & WM_F_HAS_MII)
3100 mii_tick(&sc->sc_mii);
3101 else
3102 wm_tbi_check_link(sc);
3103
3104 splx(s);
3105
3106 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3107 }
3108
3109 /*
3110 * wm_reset:
3111 *
3112 * Reset the i82542 chip.
3113 */
3114 static void
3115 wm_reset(struct wm_softc *sc)
3116 {
3117 int phy_reset = 0;
3118 uint32_t reg, func, mask;
3119 int i;
3120
3121 /*
3122 * Allocate on-chip memory according to the MTU size.
3123 * The Packet Buffer Allocation register must be written
3124 * before the chip is reset.
3125 */
3126 switch (sc->sc_type) {
3127 case WM_T_82547:
3128 case WM_T_82547_2:
3129 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3130 PBA_22K : PBA_30K;
3131 sc->sc_txfifo_head = 0;
3132 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3133 sc->sc_txfifo_size =
3134 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3135 sc->sc_txfifo_stall = 0;
3136 break;
3137 case WM_T_82571:
3138 case WM_T_82572:
3139 case WM_T_80003:
3140 sc->sc_pba = PBA_32K;
3141 break;
3142 case WM_T_82573:
3143 sc->sc_pba = PBA_12K;
3144 break;
3145 case WM_T_82574:
3146 case WM_T_82583:
3147 sc->sc_pba = PBA_20K;
3148 break;
3149 case WM_T_ICH8:
3150 sc->sc_pba = PBA_8K;
3151 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3152 break;
3153 case WM_T_ICH9:
3154 case WM_T_ICH10:
3155 case WM_T_PCH:
3156 sc->sc_pba = PBA_10K;
3157 break;
3158 default:
3159 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3160 PBA_40K : PBA_48K;
3161 break;
3162 }
3163 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3164
3165 if (sc->sc_flags & WM_F_PCIE) {
3166 int timeout = 800;
3167
3168 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3169 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3170
3171 while (timeout--) {
3172 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3173 break;
3174 delay(100);
3175 }
3176 }
3177
3178 /* clear interrupt */
3179 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3180
3181 /* Stop the transmit and receive processes. */
3182 CSR_WRITE(sc, WMREG_RCTL, 0);
3183 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3184
3185 /* set_tbi_sbp_82543() */
3186
3187 delay(10*1000);
3188
3189 /* Must acquire the MDIO ownership before MAC reset */
3190 switch(sc->sc_type) {
3191 case WM_T_82573:
3192 case WM_T_82574:
3193 case WM_T_82583:
3194 i = 0;
3195 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3196 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3197 do {
3198 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3199 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3200 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3201 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3202 break;
3203 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3204 delay(2*1000);
3205 i++;
3206 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3207 break;
3208 default:
3209 break;
3210 }
3211
3212 /*
3213 * 82541 Errata 29? & 82547 Errata 28?
3214 * See also the description about PHY_RST bit in CTRL register
3215 * in 8254x_GBe_SDM.pdf.
3216 */
3217 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3218 CSR_WRITE(sc, WMREG_CTRL,
3219 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3220 delay(5000);
3221 }
3222
3223 switch (sc->sc_type) {
3224 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3225 case WM_T_82541:
3226 case WM_T_82541_2:
3227 case WM_T_82547:
3228 case WM_T_82547_2:
3229 /*
3230 * On some chipsets, a reset through a memory-mapped write
3231 * cycle can cause the chip to reset before completing the
3232 * write cycle. This causes major headache that can be
3233 * avoided by issuing the reset via indirect register writes
3234 * through I/O space.
3235 *
3236 * So, if we successfully mapped the I/O BAR at attach time,
3237 * use that. Otherwise, try our luck with a memory-mapped
3238 * reset.
3239 */
3240 if (sc->sc_flags & WM_F_IOH_VALID)
3241 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3242 else
3243 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3244 break;
3245 case WM_T_82545_3:
3246 case WM_T_82546_3:
3247 /* Use the shadow control register on these chips. */
3248 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3249 break;
3250 case WM_T_80003:
3251 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3252 mask = func ? SWFW_PHY1_SM : SWFW_PHY0_SM;
3253 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3254 wm_get_swfw_semaphore(sc, mask);
3255 CSR_WRITE(sc, WMREG_CTRL, reg);
3256 wm_put_swfw_semaphore(sc, mask);
3257 break;
3258 case WM_T_ICH8:
3259 case WM_T_ICH9:
3260 case WM_T_ICH10:
3261 case WM_T_PCH:
3262 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3263 if (wm_check_reset_block(sc) == 0) {
3264 if (sc->sc_type >= WM_T_PCH) {
3265 uint32_t status;
3266
3267 status = CSR_READ(sc, WMREG_STATUS);
3268 CSR_WRITE(sc, WMREG_STATUS,
3269 status & ~STATUS_PHYRA);
3270 }
3271
3272 reg |= CTRL_PHY_RESET;
3273 phy_reset = 1;
3274 }
3275 wm_get_swfwhw_semaphore(sc);
3276 CSR_WRITE(sc, WMREG_CTRL, reg);
3277 delay(20*1000);
3278 wm_put_swfwhw_semaphore(sc);
3279 break;
3280 case WM_T_82542_2_0:
3281 case WM_T_82542_2_1:
3282 case WM_T_82543:
3283 case WM_T_82540:
3284 case WM_T_82545:
3285 case WM_T_82546:
3286 case WM_T_82571:
3287 case WM_T_82572:
3288 case WM_T_82573:
3289 case WM_T_82574:
3290 case WM_T_82583:
3291 default:
3292 /* Everything else can safely use the documented method. */
3293 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3294 break;
3295 }
3296
3297 if (phy_reset != 0)
3298 wm_get_cfg_done(sc);
3299
3300 /* reload EEPROM */
3301 switch(sc->sc_type) {
3302 case WM_T_82542_2_0:
3303 case WM_T_82542_2_1:
3304 case WM_T_82543:
3305 case WM_T_82544:
3306 delay(10);
3307 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3308 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3309 delay(2000);
3310 break;
3311 case WM_T_82540:
3312 case WM_T_82545:
3313 case WM_T_82545_3:
3314 case WM_T_82546:
3315 case WM_T_82546_3:
3316 delay(5*1000);
3317 /* XXX Disable HW ARPs on ASF enabled adapters */
3318 break;
3319 case WM_T_82541:
3320 case WM_T_82541_2:
3321 case WM_T_82547:
3322 case WM_T_82547_2:
3323 delay(20000);
3324 /* XXX Disable HW ARPs on ASF enabled adapters */
3325 break;
3326 case WM_T_82571:
3327 case WM_T_82572:
3328 case WM_T_82573:
3329 case WM_T_82574:
3330 case WM_T_82583:
3331 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3332 delay(10);
3333 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3334 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3335 }
3336 /* check EECD_EE_AUTORD */
3337 wm_get_auto_rd_done(sc);
3338 /*
3339 * Phy configuration from NVM just starts after EECD_AUTO_RD
3340 * is set.
3341 */
3342 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3343 || (sc->sc_type == WM_T_82583))
3344 delay(25*1000);
3345 break;
3346 case WM_T_80003:
3347 case WM_T_ICH8:
3348 case WM_T_ICH9:
3349 /* check EECD_EE_AUTORD */
3350 wm_get_auto_rd_done(sc);
3351 break;
3352 case WM_T_ICH10:
3353 case WM_T_PCH:
3354 wm_lan_init_done(sc);
3355 break;
3356 default:
3357 panic("%s: unknown type\n", __func__);
3358 }
3359
3360 /* reload sc_ctrl */
3361 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3362
3363 /* dummy read from WUC */
3364 if (sc->sc_type == WM_T_PCH)
3365 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3366 /*
3367 * For PCH, this write will make sure that any noise will be detected
3368 * as a CRC error and be dropped rather than show up as a bad packet
3369 * to the DMA engine
3370 */
3371 if (sc->sc_type == WM_T_PCH)
3372 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3373
3374 #if 0
3375 for (i = 0; i < 1000; i++) {
3376 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3377 return;
3378 }
3379 delay(20);
3380 }
3381
3382 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3383 log(LOG_ERR, "%s: reset failed to complete\n",
3384 device_xname(sc->sc_dev));
3385 #endif
3386 }
3387
3388 /*
3389 * wm_init: [ifnet interface function]
3390 *
3391 * Initialize the interface. Must be called at splnet().
3392 */
3393 static int
3394 wm_init(struct ifnet *ifp)
3395 {
3396 struct wm_softc *sc = ifp->if_softc;
3397 struct wm_rxsoft *rxs;
3398 int i, error = 0;
3399 uint32_t reg;
3400
3401 /*
3402 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3403 * There is a small but measurable benefit to avoiding the adjusment
3404 * of the descriptor so that the headers are aligned, for normal mtu,
3405 * on such platforms. One possibility is that the DMA itself is
3406 * slightly more efficient if the front of the entire packet (instead
3407 * of the front of the headers) is aligned.
3408 *
3409 * Note we must always set align_tweak to 0 if we are using
3410 * jumbo frames.
3411 */
3412 #ifdef __NO_STRICT_ALIGNMENT
3413 sc->sc_align_tweak = 0;
3414 #else
3415 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3416 sc->sc_align_tweak = 0;
3417 else
3418 sc->sc_align_tweak = 2;
3419 #endif /* __NO_STRICT_ALIGNMENT */
3420
3421 /* Cancel any pending I/O. */
3422 wm_stop(ifp, 0);
3423
3424 /* update statistics before reset */
3425 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3426 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3427
3428 /* Reset the chip to a known state. */
3429 wm_reset(sc);
3430
3431 switch (sc->sc_type) {
3432 case WM_T_82571:
3433 case WM_T_82572:
3434 case WM_T_82573:
3435 case WM_T_82574:
3436 case WM_T_82583:
3437 case WM_T_80003:
3438 case WM_T_ICH8:
3439 case WM_T_ICH9:
3440 case WM_T_ICH10:
3441 case WM_T_PCH:
3442 if (wm_check_mng_mode(sc) != 0)
3443 wm_get_hw_control(sc);
3444 break;
3445 default:
3446 break;
3447 }
3448
3449 /* Reset the PHY. */
3450 if (sc->sc_flags & WM_F_HAS_MII)
3451 wm_gmii_reset(sc);
3452
3453 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3454 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3455 if (sc->sc_type == WM_T_PCH)
3456 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3457
3458 /* Initialize the transmit descriptor ring. */
3459 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3460 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3461 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3462 sc->sc_txfree = WM_NTXDESC(sc);
3463 sc->sc_txnext = 0;
3464
3465 if (sc->sc_type < WM_T_82543) {
3466 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3467 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3468 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3469 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3470 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3471 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3472 } else {
3473 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3474 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3475 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3476 CSR_WRITE(sc, WMREG_TDH, 0);
3477 CSR_WRITE(sc, WMREG_TDT, 0);
3478 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3479 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3480
3481 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3482 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3483 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3484 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3485 }
3486 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3487 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3488
3489 /* Initialize the transmit job descriptors. */
3490 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3491 sc->sc_txsoft[i].txs_mbuf = NULL;
3492 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3493 sc->sc_txsnext = 0;
3494 sc->sc_txsdirty = 0;
3495
3496 /*
3497 * Initialize the receive descriptor and receive job
3498 * descriptor rings.
3499 */
3500 if (sc->sc_type < WM_T_82543) {
3501 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3502 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3503 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3504 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3505 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3506 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3507
3508 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3509 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3510 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3511 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3512 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3513 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3514 } else {
3515 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3516 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3517 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3518 CSR_WRITE(sc, WMREG_RDH, 0);
3519 CSR_WRITE(sc, WMREG_RDT, 0);
3520 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3521 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3522 }
3523 for (i = 0; i < WM_NRXDESC; i++) {
3524 rxs = &sc->sc_rxsoft[i];
3525 if (rxs->rxs_mbuf == NULL) {
3526 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3527 log(LOG_ERR, "%s: unable to allocate or map rx "
3528 "buffer %d, error = %d\n",
3529 device_xname(sc->sc_dev), i, error);
3530 /*
3531 * XXX Should attempt to run with fewer receive
3532 * XXX buffers instead of just failing.
3533 */
3534 wm_rxdrain(sc);
3535 goto out;
3536 }
3537 } else
3538 WM_INIT_RXDESC(sc, i);
3539 }
3540 sc->sc_rxptr = 0;
3541 sc->sc_rxdiscard = 0;
3542 WM_RXCHAIN_RESET(sc);
3543
3544 /*
3545 * Clear out the VLAN table -- we don't use it (yet).
3546 */
3547 CSR_WRITE(sc, WMREG_VET, 0);
3548 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3549 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3550
3551 /*
3552 * Set up flow-control parameters.
3553 *
3554 * XXX Values could probably stand some tuning.
3555 */
3556 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3557 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3558 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3559 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3560 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3561 }
3562
3563 sc->sc_fcrtl = FCRTL_DFLT;
3564 if (sc->sc_type < WM_T_82543) {
3565 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3566 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3567 } else {
3568 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3569 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3570 }
3571
3572 if (sc->sc_type == WM_T_80003)
3573 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3574 else
3575 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3576
3577 /* Deal with VLAN enables. */
3578 if (VLAN_ATTACHED(&sc->sc_ethercom))
3579 sc->sc_ctrl |= CTRL_VME;
3580 else
3581 sc->sc_ctrl &= ~CTRL_VME;
3582
3583 /* Write the control registers. */
3584 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3585
3586 if (sc->sc_flags & WM_F_HAS_MII) {
3587 int val;
3588
3589 switch (sc->sc_type) {
3590 case WM_T_80003:
3591 case WM_T_ICH8:
3592 case WM_T_ICH9:
3593 case WM_T_ICH10:
3594 case WM_T_PCH:
3595 /*
3596 * Set the mac to wait the maximum time between each
3597 * iteration and increase the max iterations when
3598 * polling the phy; this fixes erroneous timeouts at
3599 * 10Mbps.
3600 */
3601 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3602 0xFFFF);
3603 val = wm_kmrn_readreg(sc,
3604 KUMCTRLSTA_OFFSET_INB_PARAM);
3605 val |= 0x3F;
3606 wm_kmrn_writereg(sc,
3607 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3608 break;
3609 default:
3610 break;
3611 }
3612
3613 if (sc->sc_type == WM_T_80003) {
3614 val = CSR_READ(sc, WMREG_CTRL_EXT);
3615 val &= ~CTRL_EXT_LINK_MODE_MASK;
3616 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3617
3618 /* Bypass RX and TX FIFO's */
3619 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3620 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3621 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3622
3623 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3624 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3625 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3626 }
3627 }
3628 #if 0
3629 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3630 #endif
3631
3632 /*
3633 * Set up checksum offload parameters.
3634 */
3635 reg = CSR_READ(sc, WMREG_RXCSUM);
3636 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3637 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3638 reg |= RXCSUM_IPOFL;
3639 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3640 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3641 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3642 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3643 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3644
3645 /* Reset TBI's RXCFG count */
3646 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3647
3648 /*
3649 * Set up the interrupt registers.
3650 */
3651 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3652 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3653 ICR_RXO | ICR_RXT0;
3654 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3655 sc->sc_icr |= ICR_RXCFG;
3656 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3657
3658 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3659 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
3660 reg = CSR_READ(sc, WMREG_KABGTXD);
3661 reg |= KABGTXD_BGSQLBIAS;
3662 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3663 }
3664
3665 /* Set up the inter-packet gap. */
3666 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3667
3668 if (sc->sc_type >= WM_T_82543) {
3669 /*
3670 * Set up the interrupt throttling register (units of 256ns)
3671 * Note that a footnote in Intel's documentation says this
3672 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3673 * or 10Mbit mode. Empirically, it appears to be the case
3674 * that that is also true for the 1024ns units of the other
3675 * interrupt-related timer registers -- so, really, we ought
3676 * to divide this value by 4 when the link speed is low.
3677 *
3678 * XXX implement this division at link speed change!
3679 */
3680
3681 /*
3682 * For N interrupts/sec, set this value to:
3683 * 1000000000 / (N * 256). Note that we set the
3684 * absolute and packet timer values to this value
3685 * divided by 4 to get "simple timer" behavior.
3686 */
3687
3688 sc->sc_itr = 1500; /* 2604 ints/sec */
3689 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3690 }
3691
3692 /* Set the VLAN ethernetype. */
3693 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3694
3695 /*
3696 * Set up the transmit control register; we start out with
3697 * a collision distance suitable for FDX, but update it whe
3698 * we resolve the media type.
3699 */
3700 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3701 | TCTL_CT(TX_COLLISION_THRESHOLD)
3702 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3703 if (sc->sc_type >= WM_T_82571)
3704 sc->sc_tctl |= TCTL_MULR;
3705 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3706
3707 if (sc->sc_type == WM_T_80003) {
3708 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3709 reg &= ~TCTL_EXT_GCEX_MASK;
3710 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3711 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3712 }
3713
3714 /* Set the media. */
3715 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3716 goto out;
3717
3718 /*
3719 * Set up the receive control register; we actually program
3720 * the register when we set the receive filter. Use multicast
3721 * address offset type 0.
3722 *
3723 * Only the i82544 has the ability to strip the incoming
3724 * CRC, so we don't enable that feature.
3725 */
3726 sc->sc_mchash_type = 0;
3727 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3728 | RCTL_MO(sc->sc_mchash_type);
3729
3730 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3731 && (ifp->if_mtu > ETHERMTU))
3732 sc->sc_rctl |= RCTL_LPE;
3733
3734 if (MCLBYTES == 2048) {
3735 sc->sc_rctl |= RCTL_2k;
3736 } else {
3737 if (sc->sc_type >= WM_T_82543) {
3738 switch(MCLBYTES) {
3739 case 4096:
3740 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3741 break;
3742 case 8192:
3743 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3744 break;
3745 case 16384:
3746 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3747 break;
3748 default:
3749 panic("wm_init: MCLBYTES %d unsupported",
3750 MCLBYTES);
3751 break;
3752 }
3753 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3754 }
3755
3756 /* Set the receive filter. */
3757 wm_set_filter(sc);
3758
3759 /* Start the one second link check clock. */
3760 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3761
3762 /* ...all done! */
3763 ifp->if_flags |= IFF_RUNNING;
3764 ifp->if_flags &= ~IFF_OACTIVE;
3765
3766 out:
3767 if (error)
3768 log(LOG_ERR, "%s: interface not running\n",
3769 device_xname(sc->sc_dev));
3770 return (error);
3771 }
3772
3773 /*
3774 * wm_rxdrain:
3775 *
3776 * Drain the receive queue.
3777 */
3778 static void
3779 wm_rxdrain(struct wm_softc *sc)
3780 {
3781 struct wm_rxsoft *rxs;
3782 int i;
3783
3784 for (i = 0; i < WM_NRXDESC; i++) {
3785 rxs = &sc->sc_rxsoft[i];
3786 if (rxs->rxs_mbuf != NULL) {
3787 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3788 m_freem(rxs->rxs_mbuf);
3789 rxs->rxs_mbuf = NULL;
3790 }
3791 }
3792 }
3793
3794 /*
3795 * wm_stop: [ifnet interface function]
3796 *
3797 * Stop transmission on the interface.
3798 */
3799 static void
3800 wm_stop(struct ifnet *ifp, int disable)
3801 {
3802 struct wm_softc *sc = ifp->if_softc;
3803 struct wm_txsoft *txs;
3804 int i;
3805
3806 /* Stop the one second clock. */
3807 callout_stop(&sc->sc_tick_ch);
3808
3809 /* Stop the 82547 Tx FIFO stall check timer. */
3810 if (sc->sc_type == WM_T_82547)
3811 callout_stop(&sc->sc_txfifo_ch);
3812
3813 if (sc->sc_flags & WM_F_HAS_MII) {
3814 /* Down the MII. */
3815 mii_down(&sc->sc_mii);
3816 } else {
3817 #if 0
3818 /* Should we clear PHY's status properly? */
3819 wm_reset(sc);
3820 #endif
3821 }
3822
3823 /* Stop the transmit and receive processes. */
3824 CSR_WRITE(sc, WMREG_TCTL, 0);
3825 CSR_WRITE(sc, WMREG_RCTL, 0);
3826
3827 /*
3828 * Clear the interrupt mask to ensure the device cannot assert its
3829 * interrupt line.
3830 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3831 * any currently pending or shared interrupt.
3832 */
3833 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3834 sc->sc_icr = 0;
3835
3836 /* Release any queued transmit buffers. */
3837 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3838 txs = &sc->sc_txsoft[i];
3839 if (txs->txs_mbuf != NULL) {
3840 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3841 m_freem(txs->txs_mbuf);
3842 txs->txs_mbuf = NULL;
3843 }
3844 }
3845
3846 /* Mark the interface as down and cancel the watchdog timer. */
3847 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3848 ifp->if_timer = 0;
3849
3850 if (disable)
3851 wm_rxdrain(sc);
3852 }
3853
3854 void
3855 wm_get_auto_rd_done(struct wm_softc *sc)
3856 {
3857 int i;
3858
3859 /* wait for eeprom to reload */
3860 switch (sc->sc_type) {
3861 case WM_T_82571:
3862 case WM_T_82572:
3863 case WM_T_82573:
3864 case WM_T_82574:
3865 case WM_T_82583:
3866 case WM_T_80003:
3867 case WM_T_ICH8:
3868 case WM_T_ICH9:
3869 for (i = 0; i < 10; i++) {
3870 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3871 break;
3872 delay(1000);
3873 }
3874 if (i == 10) {
3875 log(LOG_ERR, "%s: auto read from eeprom failed to "
3876 "complete\n", device_xname(sc->sc_dev));
3877 }
3878 break;
3879 default:
3880 break;
3881 }
3882 }
3883
3884 void
3885 wm_lan_init_done(struct wm_softc *sc)
3886 {
3887 uint32_t reg = 0;
3888 int i;
3889
3890 /* wait for eeprom to reload */
3891 switch (sc->sc_type) {
3892 case WM_T_ICH10:
3893 case WM_T_PCH:
3894 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3895 reg = CSR_READ(sc, WMREG_STATUS);
3896 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3897 break;
3898 delay(100);
3899 }
3900 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3901 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3902 "complete\n", device_xname(sc->sc_dev), __func__);
3903 }
3904 break;
3905 default:
3906 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3907 __func__);
3908 break;
3909 }
3910
3911 reg &= ~STATUS_LAN_INIT_DONE;
3912 CSR_WRITE(sc, WMREG_STATUS, reg);
3913 }
3914
3915 void
3916 wm_get_cfg_done(struct wm_softc *sc)
3917 {
3918 int func = 0;
3919 int mask;
3920 uint32_t reg;
3921 int i;
3922
3923 /* wait for eeprom to reload */
3924 switch (sc->sc_type) {
3925 case WM_T_82542_2_0:
3926 case WM_T_82542_2_1:
3927 /* null */
3928 break;
3929 case WM_T_82543:
3930 case WM_T_82544:
3931 case WM_T_82540:
3932 case WM_T_82545:
3933 case WM_T_82545_3:
3934 case WM_T_82546:
3935 case WM_T_82546_3:
3936 case WM_T_82541:
3937 case WM_T_82541_2:
3938 case WM_T_82547:
3939 case WM_T_82547_2:
3940 case WM_T_82573:
3941 case WM_T_82574:
3942 case WM_T_82583:
3943 /* generic */
3944 delay(10*1000);
3945 break;
3946 case WM_T_80003:
3947 case WM_T_82571:
3948 case WM_T_82572:
3949 if (sc->sc_type == WM_T_80003)
3950 func = (CSR_READ(sc, WMREG_STATUS)
3951 >> STATUS_FUNCID_SHIFT) & 1;
3952 else
3953 func = 0; /* XXX Is it true for 82571? */
3954 mask = (func == 1) ? EEMNGCTL_CFGDONE_1 : EEMNGCTL_CFGDONE_0;
3955 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3956 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3957 break;
3958 delay(1000);
3959 }
3960 if (i >= WM_PHY_CFG_TIMEOUT) {
3961 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3962 device_xname(sc->sc_dev), __func__));
3963 }
3964 break;
3965 case WM_T_ICH8:
3966 case WM_T_ICH9:
3967 case WM_T_ICH10:
3968 case WM_T_PCH:
3969 if (sc->sc_type >= WM_T_PCH) {
3970 reg = CSR_READ(sc, WMREG_STATUS);
3971 if ((reg & STATUS_PHYRA) != 0)
3972 CSR_WRITE(sc, WMREG_STATUS,
3973 reg & ~STATUS_PHYRA);
3974 }
3975 delay(10*1000);
3976 break;
3977 default:
3978 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3979 __func__);
3980 break;
3981 }
3982 }
3983
3984 /*
3985 * wm_acquire_eeprom:
3986 *
3987 * Perform the EEPROM handshake required on some chips.
3988 */
3989 static int
3990 wm_acquire_eeprom(struct wm_softc *sc)
3991 {
3992 uint32_t reg;
3993 int x;
3994 int ret = 0;
3995
3996 /* always success */
3997 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3998 return 0;
3999
4000 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4001 ret = wm_get_swfwhw_semaphore(sc);
4002 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4003 /* this will also do wm_get_swsm_semaphore() if needed */
4004 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4005 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4006 ret = wm_get_swsm_semaphore(sc);
4007 }
4008
4009 if (ret) {
4010 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4011 __func__);
4012 return 1;
4013 }
4014
4015 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4016 reg = CSR_READ(sc, WMREG_EECD);
4017
4018 /* Request EEPROM access. */
4019 reg |= EECD_EE_REQ;
4020 CSR_WRITE(sc, WMREG_EECD, reg);
4021
4022 /* ..and wait for it to be granted. */
4023 for (x = 0; x < 1000; x++) {
4024 reg = CSR_READ(sc, WMREG_EECD);
4025 if (reg & EECD_EE_GNT)
4026 break;
4027 delay(5);
4028 }
4029 if ((reg & EECD_EE_GNT) == 0) {
4030 aprint_error_dev(sc->sc_dev,
4031 "could not acquire EEPROM GNT\n");
4032 reg &= ~EECD_EE_REQ;
4033 CSR_WRITE(sc, WMREG_EECD, reg);
4034 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4035 wm_put_swfwhw_semaphore(sc);
4036 if (sc->sc_flags & WM_F_SWFW_SYNC)
4037 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4038 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4039 wm_put_swsm_semaphore(sc);
4040 return (1);
4041 }
4042 }
4043
4044 return (0);
4045 }
4046
4047 /*
4048 * wm_release_eeprom:
4049 *
4050 * Release the EEPROM mutex.
4051 */
4052 static void
4053 wm_release_eeprom(struct wm_softc *sc)
4054 {
4055 uint32_t reg;
4056
4057 /* always success */
4058 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4059 return;
4060
4061 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4062 reg = CSR_READ(sc, WMREG_EECD);
4063 reg &= ~EECD_EE_REQ;
4064 CSR_WRITE(sc, WMREG_EECD, reg);
4065 }
4066
4067 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4068 wm_put_swfwhw_semaphore(sc);
4069 if (sc->sc_flags & WM_F_SWFW_SYNC)
4070 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4071 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4072 wm_put_swsm_semaphore(sc);
4073 }
4074
4075 /*
4076 * wm_eeprom_sendbits:
4077 *
4078 * Send a series of bits to the EEPROM.
4079 */
4080 static void
4081 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4082 {
4083 uint32_t reg;
4084 int x;
4085
4086 reg = CSR_READ(sc, WMREG_EECD);
4087
4088 for (x = nbits; x > 0; x--) {
4089 if (bits & (1U << (x - 1)))
4090 reg |= EECD_DI;
4091 else
4092 reg &= ~EECD_DI;
4093 CSR_WRITE(sc, WMREG_EECD, reg);
4094 delay(2);
4095 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4096 delay(2);
4097 CSR_WRITE(sc, WMREG_EECD, reg);
4098 delay(2);
4099 }
4100 }
4101
4102 /*
4103 * wm_eeprom_recvbits:
4104 *
4105 * Receive a series of bits from the EEPROM.
4106 */
4107 static void
4108 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4109 {
4110 uint32_t reg, val;
4111 int x;
4112
4113 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4114
4115 val = 0;
4116 for (x = nbits; x > 0; x--) {
4117 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4118 delay(2);
4119 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4120 val |= (1U << (x - 1));
4121 CSR_WRITE(sc, WMREG_EECD, reg);
4122 delay(2);
4123 }
4124 *valp = val;
4125 }
4126
4127 /*
4128 * wm_read_eeprom_uwire:
4129 *
4130 * Read a word from the EEPROM using the MicroWire protocol.
4131 */
4132 static int
4133 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4134 {
4135 uint32_t reg, val;
4136 int i;
4137
4138 for (i = 0; i < wordcnt; i++) {
4139 /* Clear SK and DI. */
4140 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4141 CSR_WRITE(sc, WMREG_EECD, reg);
4142
4143 /* Set CHIP SELECT. */
4144 reg |= EECD_CS;
4145 CSR_WRITE(sc, WMREG_EECD, reg);
4146 delay(2);
4147
4148 /* Shift in the READ command. */
4149 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4150
4151 /* Shift in address. */
4152 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4153
4154 /* Shift out the data. */
4155 wm_eeprom_recvbits(sc, &val, 16);
4156 data[i] = val & 0xffff;
4157
4158 /* Clear CHIP SELECT. */
4159 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4160 CSR_WRITE(sc, WMREG_EECD, reg);
4161 delay(2);
4162 }
4163
4164 return (0);
4165 }
4166
4167 /*
4168 * wm_spi_eeprom_ready:
4169 *
4170 * Wait for a SPI EEPROM to be ready for commands.
4171 */
4172 static int
4173 wm_spi_eeprom_ready(struct wm_softc *sc)
4174 {
4175 uint32_t val;
4176 int usec;
4177
4178 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4179 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4180 wm_eeprom_recvbits(sc, &val, 8);
4181 if ((val & SPI_SR_RDY) == 0)
4182 break;
4183 }
4184 if (usec >= SPI_MAX_RETRIES) {
4185 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4186 return (1);
4187 }
4188 return (0);
4189 }
4190
4191 /*
4192 * wm_read_eeprom_spi:
4193 *
4194 * Read a work from the EEPROM using the SPI protocol.
4195 */
4196 static int
4197 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4198 {
4199 uint32_t reg, val;
4200 int i;
4201 uint8_t opc;
4202
4203 /* Clear SK and CS. */
4204 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4205 CSR_WRITE(sc, WMREG_EECD, reg);
4206 delay(2);
4207
4208 if (wm_spi_eeprom_ready(sc))
4209 return (1);
4210
4211 /* Toggle CS to flush commands. */
4212 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4213 delay(2);
4214 CSR_WRITE(sc, WMREG_EECD, reg);
4215 delay(2);
4216
4217 opc = SPI_OPC_READ;
4218 if (sc->sc_ee_addrbits == 8 && word >= 128)
4219 opc |= SPI_OPC_A8;
4220
4221 wm_eeprom_sendbits(sc, opc, 8);
4222 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4223
4224 for (i = 0; i < wordcnt; i++) {
4225 wm_eeprom_recvbits(sc, &val, 16);
4226 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4227 }
4228
4229 /* Raise CS and clear SK. */
4230 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4231 CSR_WRITE(sc, WMREG_EECD, reg);
4232 delay(2);
4233
4234 return (0);
4235 }
4236
4237 #define EEPROM_CHECKSUM 0xBABA
4238 #define EEPROM_SIZE 0x0040
4239
4240 /*
4241 * wm_validate_eeprom_checksum
4242 *
4243 * The checksum is defined as the sum of the first 64 (16 bit) words.
4244 */
4245 static int
4246 wm_validate_eeprom_checksum(struct wm_softc *sc)
4247 {
4248 uint16_t checksum;
4249 uint16_t eeprom_data;
4250 int i;
4251
4252 checksum = 0;
4253
4254 for (i = 0; i < EEPROM_SIZE; i++) {
4255 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4256 return 1;
4257 checksum += eeprom_data;
4258 }
4259
4260 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4261 return 1;
4262
4263 return 0;
4264 }
4265
4266 /*
4267 * wm_read_eeprom:
4268 *
4269 * Read data from the serial EEPROM.
4270 */
4271 static int
4272 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4273 {
4274 int rv;
4275
4276 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4277 return 1;
4278
4279 if (wm_acquire_eeprom(sc))
4280 return 1;
4281
4282 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4283 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4284 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4285 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4286 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4287 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4288 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4289 else
4290 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4291
4292 wm_release_eeprom(sc);
4293 return rv;
4294 }
4295
4296 static int
4297 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4298 uint16_t *data)
4299 {
4300 int i, eerd = 0;
4301 int error = 0;
4302
4303 for (i = 0; i < wordcnt; i++) {
4304 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4305
4306 CSR_WRITE(sc, WMREG_EERD, eerd);
4307 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4308 if (error != 0)
4309 break;
4310
4311 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4312 }
4313
4314 return error;
4315 }
4316
4317 static int
4318 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4319 {
4320 uint32_t attempts = 100000;
4321 uint32_t i, reg = 0;
4322 int32_t done = -1;
4323
4324 for (i = 0; i < attempts; i++) {
4325 reg = CSR_READ(sc, rw);
4326
4327 if (reg & EERD_DONE) {
4328 done = 0;
4329 break;
4330 }
4331 delay(5);
4332 }
4333
4334 return done;
4335 }
4336
4337 /*
4338 * wm_add_rxbuf:
4339 *
4340 * Add a receive buffer to the indiciated descriptor.
4341 */
4342 static int
4343 wm_add_rxbuf(struct wm_softc *sc, int idx)
4344 {
4345 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4346 struct mbuf *m;
4347 int error;
4348
4349 MGETHDR(m, M_DONTWAIT, MT_DATA);
4350 if (m == NULL)
4351 return (ENOBUFS);
4352
4353 MCLGET(m, M_DONTWAIT);
4354 if ((m->m_flags & M_EXT) == 0) {
4355 m_freem(m);
4356 return (ENOBUFS);
4357 }
4358
4359 if (rxs->rxs_mbuf != NULL)
4360 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4361
4362 rxs->rxs_mbuf = m;
4363
4364 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4365 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4366 BUS_DMA_READ|BUS_DMA_NOWAIT);
4367 if (error) {
4368 /* XXX XXX XXX */
4369 aprint_error_dev(sc->sc_dev,
4370 "unable to load rx DMA map %d, error = %d\n",
4371 idx, error);
4372 panic("wm_add_rxbuf");
4373 }
4374
4375 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4376 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4377
4378 WM_INIT_RXDESC(sc, idx);
4379
4380 return (0);
4381 }
4382
4383 /*
4384 * wm_set_ral:
4385 *
4386 * Set an entery in the receive address list.
4387 */
4388 static void
4389 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4390 {
4391 uint32_t ral_lo, ral_hi;
4392
4393 if (enaddr != NULL) {
4394 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4395 (enaddr[3] << 24);
4396 ral_hi = enaddr[4] | (enaddr[5] << 8);
4397 ral_hi |= RAL_AV;
4398 } else {
4399 ral_lo = 0;
4400 ral_hi = 0;
4401 }
4402
4403 if (sc->sc_type >= WM_T_82544) {
4404 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4405 ral_lo);
4406 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4407 ral_hi);
4408 } else {
4409 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4410 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4411 }
4412 }
4413
4414 /*
4415 * wm_mchash:
4416 *
4417 * Compute the hash of the multicast address for the 4096-bit
4418 * multicast filter.
4419 */
4420 static uint32_t
4421 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4422 {
4423 static const int lo_shift[4] = { 4, 3, 2, 0 };
4424 static const int hi_shift[4] = { 4, 5, 6, 8 };
4425 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4426 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4427 uint32_t hash;
4428
4429 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4430 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4431 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4432 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4433 return (hash & 0x3ff);
4434 }
4435 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4436 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4437
4438 return (hash & 0xfff);
4439 }
4440
4441 /*
4442 * wm_set_filter:
4443 *
4444 * Set up the receive filter.
4445 */
4446 static void
4447 wm_set_filter(struct wm_softc *sc)
4448 {
4449 struct ethercom *ec = &sc->sc_ethercom;
4450 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4451 struct ether_multi *enm;
4452 struct ether_multistep step;
4453 bus_addr_t mta_reg;
4454 uint32_t hash, reg, bit;
4455 int i, size;
4456
4457 if (sc->sc_type >= WM_T_82544)
4458 mta_reg = WMREG_CORDOVA_MTA;
4459 else
4460 mta_reg = WMREG_MTA;
4461
4462 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4463
4464 if (ifp->if_flags & IFF_BROADCAST)
4465 sc->sc_rctl |= RCTL_BAM;
4466 if (ifp->if_flags & IFF_PROMISC) {
4467 sc->sc_rctl |= RCTL_UPE;
4468 goto allmulti;
4469 }
4470
4471 /*
4472 * Set the station address in the first RAL slot, and
4473 * clear the remaining slots.
4474 */
4475 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4476 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4477 size = WM_ICH8_RAL_TABSIZE;
4478 else
4479 size = WM_RAL_TABSIZE;
4480 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4481 for (i = 1; i < size; i++)
4482 wm_set_ral(sc, NULL, i);
4483
4484 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4485 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4486 size = WM_ICH8_MC_TABSIZE;
4487 else
4488 size = WM_MC_TABSIZE;
4489 /* Clear out the multicast table. */
4490 for (i = 0; i < size; i++)
4491 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4492
4493 ETHER_FIRST_MULTI(step, ec, enm);
4494 while (enm != NULL) {
4495 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4496 /*
4497 * We must listen to a range of multicast addresses.
4498 * For now, just accept all multicasts, rather than
4499 * trying to set only those filter bits needed to match
4500 * the range. (At this time, the only use of address
4501 * ranges is for IP multicast routing, for which the
4502 * range is big enough to require all bits set.)
4503 */
4504 goto allmulti;
4505 }
4506
4507 hash = wm_mchash(sc, enm->enm_addrlo);
4508
4509 reg = (hash >> 5);
4510 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4511 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4512 reg &= 0x1f;
4513 else
4514 reg &= 0x7f;
4515 bit = hash & 0x1f;
4516
4517 hash = CSR_READ(sc, mta_reg + (reg << 2));
4518 hash |= 1U << bit;
4519
4520 /* XXX Hardware bug?? */
4521 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4522 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4523 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4524 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4525 } else
4526 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4527
4528 ETHER_NEXT_MULTI(step, enm);
4529 }
4530
4531 ifp->if_flags &= ~IFF_ALLMULTI;
4532 goto setit;
4533
4534 allmulti:
4535 ifp->if_flags |= IFF_ALLMULTI;
4536 sc->sc_rctl |= RCTL_MPE;
4537
4538 setit:
4539 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4540 }
4541
4542 /*
4543 * wm_tbi_mediainit:
4544 *
4545 * Initialize media for use on 1000BASE-X devices.
4546 */
4547 static void
4548 wm_tbi_mediainit(struct wm_softc *sc)
4549 {
4550 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4551 const char *sep = "";
4552
4553 if (sc->sc_type < WM_T_82543)
4554 sc->sc_tipg = TIPG_WM_DFLT;
4555 else
4556 sc->sc_tipg = TIPG_LG_DFLT;
4557
4558 sc->sc_tbi_anegticks = 5;
4559
4560 /* Initialize our media structures */
4561 sc->sc_mii.mii_ifp = ifp;
4562
4563 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4564 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4565 wm_tbi_mediastatus);
4566
4567 /*
4568 * SWD Pins:
4569 *
4570 * 0 = Link LED (output)
4571 * 1 = Loss Of Signal (input)
4572 */
4573 sc->sc_ctrl |= CTRL_SWDPIO(0);
4574 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4575
4576 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4577
4578 #define ADD(ss, mm, dd) \
4579 do { \
4580 aprint_normal("%s%s", sep, ss); \
4581 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4582 sep = ", "; \
4583 } while (/*CONSTCOND*/0)
4584
4585 aprint_normal_dev(sc->sc_dev, "");
4586 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4587 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4588 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4589 aprint_normal("\n");
4590
4591 #undef ADD
4592
4593 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4594 }
4595
4596 /*
4597 * wm_tbi_mediastatus: [ifmedia interface function]
4598 *
4599 * Get the current interface media status on a 1000BASE-X device.
4600 */
4601 static void
4602 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4603 {
4604 struct wm_softc *sc = ifp->if_softc;
4605 uint32_t ctrl, status;
4606
4607 ifmr->ifm_status = IFM_AVALID;
4608 ifmr->ifm_active = IFM_ETHER;
4609
4610 status = CSR_READ(sc, WMREG_STATUS);
4611 if ((status & STATUS_LU) == 0) {
4612 ifmr->ifm_active |= IFM_NONE;
4613 return;
4614 }
4615
4616 ifmr->ifm_status |= IFM_ACTIVE;
4617 ifmr->ifm_active |= IFM_1000_SX;
4618 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4619 ifmr->ifm_active |= IFM_FDX;
4620 ctrl = CSR_READ(sc, WMREG_CTRL);
4621 if (ctrl & CTRL_RFCE)
4622 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4623 if (ctrl & CTRL_TFCE)
4624 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4625 }
4626
4627 /*
4628 * wm_tbi_mediachange: [ifmedia interface function]
4629 *
4630 * Set hardware to newly-selected media on a 1000BASE-X device.
4631 */
4632 static int
4633 wm_tbi_mediachange(struct ifnet *ifp)
4634 {
4635 struct wm_softc *sc = ifp->if_softc;
4636 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4637 uint32_t status;
4638 int i;
4639
4640 sc->sc_txcw = 0;
4641 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4642 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4643 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4644 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4645 sc->sc_txcw |= TXCW_ANE;
4646 } else {
4647 /*
4648 * If autonegotiation is turned off, force link up and turn on
4649 * full duplex
4650 */
4651 sc->sc_txcw &= ~TXCW_ANE;
4652 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4653 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4654 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4655 delay(1000);
4656 }
4657
4658 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4659 device_xname(sc->sc_dev),sc->sc_txcw));
4660 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4661 delay(10000);
4662
4663 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4664 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4665
4666 /*
4667 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4668 * optics detect a signal, 0 if they don't.
4669 */
4670 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4671 /* Have signal; wait for the link to come up. */
4672
4673 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4674 /*
4675 * Reset the link, and let autonegotiation do its thing
4676 */
4677 sc->sc_ctrl |= CTRL_LRST;
4678 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4679 delay(1000);
4680 sc->sc_ctrl &= ~CTRL_LRST;
4681 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4682 delay(1000);
4683 }
4684
4685 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4686 delay(10000);
4687 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4688 break;
4689 }
4690
4691 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4692 device_xname(sc->sc_dev),i));
4693
4694 status = CSR_READ(sc, WMREG_STATUS);
4695 DPRINTF(WM_DEBUG_LINK,
4696 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4697 device_xname(sc->sc_dev),status, STATUS_LU));
4698 if (status & STATUS_LU) {
4699 /* Link is up. */
4700 DPRINTF(WM_DEBUG_LINK,
4701 ("%s: LINK: set media -> link up %s\n",
4702 device_xname(sc->sc_dev),
4703 (status & STATUS_FD) ? "FDX" : "HDX"));
4704
4705 /*
4706 * NOTE: CTRL will update TFCE and RFCE automatically,
4707 * so we should update sc->sc_ctrl
4708 */
4709 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4710 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4711 sc->sc_fcrtl &= ~FCRTL_XONE;
4712 if (status & STATUS_FD)
4713 sc->sc_tctl |=
4714 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4715 else
4716 sc->sc_tctl |=
4717 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4718 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4719 sc->sc_fcrtl |= FCRTL_XONE;
4720 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4721 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4722 WMREG_OLD_FCRTL : WMREG_FCRTL,
4723 sc->sc_fcrtl);
4724 sc->sc_tbi_linkup = 1;
4725 } else {
4726 if (i == WM_LINKUP_TIMEOUT)
4727 wm_check_for_link(sc);
4728 /* Link is down. */
4729 DPRINTF(WM_DEBUG_LINK,
4730 ("%s: LINK: set media -> link down\n",
4731 device_xname(sc->sc_dev)));
4732 sc->sc_tbi_linkup = 0;
4733 }
4734 } else {
4735 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4736 device_xname(sc->sc_dev)));
4737 sc->sc_tbi_linkup = 0;
4738 }
4739
4740 wm_tbi_set_linkled(sc);
4741
4742 return (0);
4743 }
4744
4745 /*
4746 * wm_tbi_set_linkled:
4747 *
4748 * Update the link LED on 1000BASE-X devices.
4749 */
4750 static void
4751 wm_tbi_set_linkled(struct wm_softc *sc)
4752 {
4753
4754 if (sc->sc_tbi_linkup)
4755 sc->sc_ctrl |= CTRL_SWDPIN(0);
4756 else
4757 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4758
4759 /* 82540 or newer devices are active low */
4760 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4761
4762 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4763 }
4764
4765 /*
4766 * wm_tbi_check_link:
4767 *
4768 * Check the link on 1000BASE-X devices.
4769 */
4770 static void
4771 wm_tbi_check_link(struct wm_softc *sc)
4772 {
4773 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4774 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4775 uint32_t rxcw, ctrl, status;
4776
4777 status = CSR_READ(sc, WMREG_STATUS);
4778
4779 rxcw = CSR_READ(sc, WMREG_RXCW);
4780 ctrl = CSR_READ(sc, WMREG_CTRL);
4781
4782 /* set link status */
4783 if ((status & STATUS_LU) == 0) {
4784 DPRINTF(WM_DEBUG_LINK,
4785 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4786 sc->sc_tbi_linkup = 0;
4787 } else if (sc->sc_tbi_linkup == 0) {
4788 DPRINTF(WM_DEBUG_LINK,
4789 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4790 (status & STATUS_FD) ? "FDX" : "HDX"));
4791 sc->sc_tbi_linkup = 1;
4792 }
4793
4794 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4795 && ((status & STATUS_LU) == 0)) {
4796 sc->sc_tbi_linkup = 0;
4797 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4798 /* RXCFG storm! */
4799 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4800 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4801 wm_init(ifp);
4802 wm_start(ifp);
4803 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4804 /* If the timer expired, retry autonegotiation */
4805 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4806 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4807 sc->sc_tbi_ticks = 0;
4808 /*
4809 * Reset the link, and let autonegotiation do
4810 * its thing
4811 */
4812 sc->sc_ctrl |= CTRL_LRST;
4813 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4814 delay(1000);
4815 sc->sc_ctrl &= ~CTRL_LRST;
4816 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4817 delay(1000);
4818 CSR_WRITE(sc, WMREG_TXCW,
4819 sc->sc_txcw & ~TXCW_ANE);
4820 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4821 }
4822 }
4823 }
4824
4825 wm_tbi_set_linkled(sc);
4826 }
4827
4828 /*
4829 * wm_gmii_reset:
4830 *
4831 * Reset the PHY.
4832 */
4833 static void
4834 wm_gmii_reset(struct wm_softc *sc)
4835 {
4836 uint32_t reg;
4837 int func = 0; /* XXX gcc */
4838 int rv;
4839
4840 /* get phy semaphore */
4841 switch (sc->sc_type) {
4842 case WM_T_82571:
4843 case WM_T_82572:
4844 case WM_T_82573:
4845 case WM_T_82574:
4846 case WM_T_82583:
4847 /* XXX should get sw semaphore, too */
4848 rv = wm_get_swsm_semaphore(sc);
4849 break;
4850 case WM_T_80003:
4851 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4852 rv = wm_get_swfw_semaphore(sc,
4853 func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4854 break;
4855 case WM_T_ICH8:
4856 case WM_T_ICH9:
4857 case WM_T_ICH10:
4858 case WM_T_PCH:
4859 rv = wm_get_swfwhw_semaphore(sc);
4860 break;
4861 default:
4862 /* nothing to do*/
4863 rv = 0;
4864 break;
4865 }
4866 if (rv != 0) {
4867 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4868 __func__);
4869 return;
4870 }
4871
4872 switch (sc->sc_type) {
4873 case WM_T_82542_2_0:
4874 case WM_T_82542_2_1:
4875 /* null */
4876 break;
4877 case WM_T_82543:
4878 /*
4879 * With 82543, we need to force speed and duplex on the MAC
4880 * equal to what the PHY speed and duplex configuration is.
4881 * In addition, we need to perform a hardware reset on the PHY
4882 * to take it out of reset.
4883 */
4884 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4885 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4886
4887 /* The PHY reset pin is active-low. */
4888 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4889 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4890 CTRL_EXT_SWDPIN(4));
4891 reg |= CTRL_EXT_SWDPIO(4);
4892
4893 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4894 delay(10*1000);
4895
4896 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4897 delay(150);
4898 #if 0
4899 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4900 #endif
4901 delay(20*1000); /* XXX extra delay to get PHY ID? */
4902 break;
4903 case WM_T_82544: /* reset 10000us */
4904 case WM_T_82540:
4905 case WM_T_82545:
4906 case WM_T_82545_3:
4907 case WM_T_82546:
4908 case WM_T_82546_3:
4909 case WM_T_82541:
4910 case WM_T_82541_2:
4911 case WM_T_82547:
4912 case WM_T_82547_2:
4913 case WM_T_82571: /* reset 100us */
4914 case WM_T_82572:
4915 case WM_T_82573:
4916 case WM_T_82574:
4917 case WM_T_82583:
4918 case WM_T_80003:
4919 /* generic reset */
4920 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4921 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4922 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4923 delay(150);
4924
4925 if ((sc->sc_type == WM_T_82541)
4926 || (sc->sc_type == WM_T_82541_2)
4927 || (sc->sc_type == WM_T_82547)
4928 || (sc->sc_type == WM_T_82547_2)) {
4929 /* workaround for igp are done in igp_reset() */
4930 /* XXX add code to set LED after phy reset */
4931 }
4932 break;
4933 case WM_T_ICH8:
4934 case WM_T_ICH9:
4935 case WM_T_ICH10:
4936 case WM_T_PCH:
4937 /* generic reset */
4938 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4939 delay(100);
4940 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4941 delay(150);
4942 break;
4943 default:
4944 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4945 __func__);
4946 break;
4947 }
4948
4949 /* release PHY semaphore */
4950 switch (sc->sc_type) {
4951 case WM_T_82571:
4952 case WM_T_82572:
4953 case WM_T_82573:
4954 case WM_T_82574:
4955 case WM_T_82583:
4956 /* XXX sould put sw semaphore, too */
4957 wm_put_swsm_semaphore(sc);
4958 break;
4959 case WM_T_80003:
4960 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4961 break;
4962 case WM_T_ICH8:
4963 case WM_T_ICH9:
4964 case WM_T_ICH10:
4965 case WM_T_PCH:
4966 wm_put_swfwhw_semaphore(sc);
4967 break;
4968 default:
4969 /* nothing to do*/
4970 rv = 0;
4971 break;
4972 }
4973
4974 /* get_cfg_done */
4975 wm_get_cfg_done(sc);
4976
4977 /* extra setup */
4978 switch (sc->sc_type) {
4979 case WM_T_82542_2_0:
4980 case WM_T_82542_2_1:
4981 case WM_T_82543:
4982 case WM_T_82544:
4983 case WM_T_82540:
4984 case WM_T_82545:
4985 case WM_T_82545_3:
4986 case WM_T_82546:
4987 case WM_T_82546_3:
4988 case WM_T_82541_2:
4989 case WM_T_82547_2:
4990 case WM_T_82571:
4991 case WM_T_82572:
4992 case WM_T_82573:
4993 case WM_T_82574:
4994 case WM_T_82583:
4995 case WM_T_80003:
4996 /* null */
4997 break;
4998 case WM_T_82541:
4999 case WM_T_82547:
5000 /* XXX Configure actively LED after PHY reset */
5001 break;
5002 case WM_T_ICH8:
5003 case WM_T_ICH9:
5004 case WM_T_ICH10:
5005 case WM_T_PCH:
5006 /* Allow time for h/w to get to a quiescent state afer reset */
5007 delay(10*1000);
5008
5009 if (sc->sc_type == WM_T_PCH) {
5010 wm_hv_phy_workaround_ich8lan(sc);
5011
5012 /*
5013 * dummy read to clear the phy wakeup bit after lcd
5014 * reset
5015 */
5016 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5017 }
5018
5019 /*
5020 * XXX Configure the LCD with th extended configuration region
5021 * in NVM
5022 */
5023
5024 /* Configure the LCD with the OEM bits in NVM */
5025 if (sc->sc_type == WM_T_PCH) {
5026 /*
5027 * Disable LPLU.
5028 * XXX It seems that 82567 has LPLU, too.
5029 */
5030 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5031 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5032 reg |= HV_OEM_BITS_ANEGNOW;
5033 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5034 }
5035 break;
5036 default:
5037 panic("%s: unknown type\n", __func__);
5038 break;
5039 }
5040 }
5041
5042 /*
5043 * wm_gmii_mediainit:
5044 *
5045 * Initialize media for use on 1000BASE-T devices.
5046 */
5047 static void
5048 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5049 {
5050 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5051
5052 /* We have MII. */
5053 sc->sc_flags |= WM_F_HAS_MII;
5054
5055 if (sc->sc_type == WM_T_80003)
5056 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5057 else
5058 sc->sc_tipg = TIPG_1000T_DFLT;
5059
5060 /*
5061 * Let the chip set speed/duplex on its own based on
5062 * signals from the PHY.
5063 * XXXbouyer - I'm not sure this is right for the 80003,
5064 * the em driver only sets CTRL_SLU here - but it seems to work.
5065 */
5066 sc->sc_ctrl |= CTRL_SLU;
5067 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5068
5069 /* Initialize our media structures and probe the GMII. */
5070 sc->sc_mii.mii_ifp = ifp;
5071
5072 switch (prodid) {
5073 case PCI_PRODUCT_INTEL_PCH_M_LM:
5074 case PCI_PRODUCT_INTEL_PCH_M_LC:
5075 /* 82577 */
5076 sc->sc_phytype = WMPHY_82577;
5077 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5078 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5079 break;
5080 case PCI_PRODUCT_INTEL_PCH_D_DM:
5081 case PCI_PRODUCT_INTEL_PCH_D_DC:
5082 /* 82578 */
5083 sc->sc_phytype = WMPHY_82578;
5084 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5085 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5086 break;
5087 case PCI_PRODUCT_INTEL_82801I_BM:
5088 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5089 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5090 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5091 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5092 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5093 /* 82567 */
5094 sc->sc_phytype = WMPHY_BM;
5095 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5096 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5097 break;
5098 default:
5099 if (sc->sc_type >= WM_T_80003) {
5100 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5101 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5102 } else if (sc->sc_type >= WM_T_82544) {
5103 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5104 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5105 } else {
5106 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5107 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5108 }
5109 break;
5110
5111 }
5112 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5113
5114 wm_gmii_reset(sc);
5115
5116 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5117 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5118 wm_gmii_mediastatus);
5119
5120 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5121 MII_OFFSET_ANY, MIIF_DOPAUSE);
5122
5123 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5124 /* if failed, retry with *_bm_* */
5125 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5126 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5127
5128 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5129 MII_OFFSET_ANY, MIIF_DOPAUSE);
5130 }
5131 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5132 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5133 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5134 sc->sc_phytype = WMPHY_NONE;
5135 } else {
5136 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
5137 }
5138 }
5139
5140 /*
5141 * wm_gmii_mediastatus: [ifmedia interface function]
5142 *
5143 * Get the current interface media status on a 1000BASE-T device.
5144 */
5145 static void
5146 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5147 {
5148 struct wm_softc *sc = ifp->if_softc;
5149
5150 ether_mediastatus(ifp, ifmr);
5151 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
5152 sc->sc_flowflags;
5153 }
5154
5155 /*
5156 * wm_gmii_mediachange: [ifmedia interface function]
5157 *
5158 * Set hardware to newly-selected media on a 1000BASE-T device.
5159 */
5160 static int
5161 wm_gmii_mediachange(struct ifnet *ifp)
5162 {
5163 struct wm_softc *sc = ifp->if_softc;
5164 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5165 int rc;
5166
5167 if ((ifp->if_flags & IFF_UP) == 0)
5168 return 0;
5169
5170 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5171 sc->sc_ctrl |= CTRL_SLU;
5172 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5173 || (sc->sc_type > WM_T_82543)) {
5174 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5175 } else {
5176 sc->sc_ctrl &= ~CTRL_ASDE;
5177 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5178 if (ife->ifm_media & IFM_FDX)
5179 sc->sc_ctrl |= CTRL_FD;
5180 switch(IFM_SUBTYPE(ife->ifm_media)) {
5181 case IFM_10_T:
5182 sc->sc_ctrl |= CTRL_SPEED_10;
5183 break;
5184 case IFM_100_TX:
5185 sc->sc_ctrl |= CTRL_SPEED_100;
5186 break;
5187 case IFM_1000_T:
5188 sc->sc_ctrl |= CTRL_SPEED_1000;
5189 break;
5190 default:
5191 panic("wm_gmii_mediachange: bad media 0x%x",
5192 ife->ifm_media);
5193 }
5194 }
5195 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5196 if (sc->sc_type <= WM_T_82543)
5197 wm_gmii_reset(sc);
5198
5199 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5200 return 0;
5201 return rc;
5202 }
5203
5204 #define MDI_IO CTRL_SWDPIN(2)
5205 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5206 #define MDI_CLK CTRL_SWDPIN(3)
5207
5208 static void
5209 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5210 {
5211 uint32_t i, v;
5212
5213 v = CSR_READ(sc, WMREG_CTRL);
5214 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5215 v |= MDI_DIR | CTRL_SWDPIO(3);
5216
5217 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5218 if (data & i)
5219 v |= MDI_IO;
5220 else
5221 v &= ~MDI_IO;
5222 CSR_WRITE(sc, WMREG_CTRL, v);
5223 delay(10);
5224 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5225 delay(10);
5226 CSR_WRITE(sc, WMREG_CTRL, v);
5227 delay(10);
5228 }
5229 }
5230
5231 static uint32_t
5232 i82543_mii_recvbits(struct wm_softc *sc)
5233 {
5234 uint32_t v, i, data = 0;
5235
5236 v = CSR_READ(sc, WMREG_CTRL);
5237 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5238 v |= CTRL_SWDPIO(3);
5239
5240 CSR_WRITE(sc, WMREG_CTRL, v);
5241 delay(10);
5242 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5243 delay(10);
5244 CSR_WRITE(sc, WMREG_CTRL, v);
5245 delay(10);
5246
5247 for (i = 0; i < 16; i++) {
5248 data <<= 1;
5249 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5250 delay(10);
5251 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5252 data |= 1;
5253 CSR_WRITE(sc, WMREG_CTRL, v);
5254 delay(10);
5255 }
5256
5257 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5258 delay(10);
5259 CSR_WRITE(sc, WMREG_CTRL, v);
5260 delay(10);
5261
5262 return (data);
5263 }
5264
5265 #undef MDI_IO
5266 #undef MDI_DIR
5267 #undef MDI_CLK
5268
5269 /*
5270 * wm_gmii_i82543_readreg: [mii interface function]
5271 *
5272 * Read a PHY register on the GMII (i82543 version).
5273 */
5274 static int
5275 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5276 {
5277 struct wm_softc *sc = device_private(self);
5278 int rv;
5279
5280 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5281 i82543_mii_sendbits(sc, reg | (phy << 5) |
5282 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5283 rv = i82543_mii_recvbits(sc) & 0xffff;
5284
5285 DPRINTF(WM_DEBUG_GMII,
5286 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5287 device_xname(sc->sc_dev), phy, reg, rv));
5288
5289 return (rv);
5290 }
5291
5292 /*
5293 * wm_gmii_i82543_writereg: [mii interface function]
5294 *
5295 * Write a PHY register on the GMII (i82543 version).
5296 */
5297 static void
5298 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5299 {
5300 struct wm_softc *sc = device_private(self);
5301
5302 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5303 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5304 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5305 (MII_COMMAND_START << 30), 32);
5306 }
5307
5308 /*
5309 * wm_gmii_i82544_readreg: [mii interface function]
5310 *
5311 * Read a PHY register on the GMII.
5312 */
5313 static int
5314 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5315 {
5316 struct wm_softc *sc = device_private(self);
5317 uint32_t mdic = 0;
5318 int i, rv;
5319
5320 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5321 MDIC_REGADD(reg));
5322
5323 for (i = 0; i < 320; i++) {
5324 mdic = CSR_READ(sc, WMREG_MDIC);
5325 if (mdic & MDIC_READY)
5326 break;
5327 delay(10);
5328 }
5329
5330 if ((mdic & MDIC_READY) == 0) {
5331 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5332 device_xname(sc->sc_dev), phy, reg);
5333 rv = 0;
5334 } else if (mdic & MDIC_E) {
5335 #if 0 /* This is normal if no PHY is present. */
5336 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5337 device_xname(sc->sc_dev), phy, reg);
5338 #endif
5339 rv = 0;
5340 } else {
5341 rv = MDIC_DATA(mdic);
5342 if (rv == 0xffff)
5343 rv = 0;
5344 }
5345
5346 return (rv);
5347 }
5348
5349 /*
5350 * wm_gmii_i82544_writereg: [mii interface function]
5351 *
5352 * Write a PHY register on the GMII.
5353 */
5354 static void
5355 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5356 {
5357 struct wm_softc *sc = device_private(self);
5358 uint32_t mdic = 0;
5359 int i;
5360
5361 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5362 MDIC_REGADD(reg) | MDIC_DATA(val));
5363
5364 for (i = 0; i < 320; i++) {
5365 mdic = CSR_READ(sc, WMREG_MDIC);
5366 if (mdic & MDIC_READY)
5367 break;
5368 delay(10);
5369 }
5370
5371 if ((mdic & MDIC_READY) == 0)
5372 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5373 device_xname(sc->sc_dev), phy, reg);
5374 else if (mdic & MDIC_E)
5375 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5376 device_xname(sc->sc_dev), phy, reg);
5377 }
5378
5379 /*
5380 * wm_gmii_i80003_readreg: [mii interface function]
5381 *
5382 * Read a PHY register on the kumeran
5383 * This could be handled by the PHY layer if we didn't have to lock the
5384 * ressource ...
5385 */
5386 static int
5387 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5388 {
5389 struct wm_softc *sc = device_private(self);
5390 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5391 int rv;
5392
5393 if (phy != 1) /* only one PHY on kumeran bus */
5394 return 0;
5395
5396 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5397 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5398 __func__);
5399 return 0;
5400 }
5401
5402 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5403 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5404 reg >> GG82563_PAGE_SHIFT);
5405 } else {
5406 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5407 reg >> GG82563_PAGE_SHIFT);
5408 }
5409 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5410 delay(200);
5411 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5412 delay(200);
5413
5414 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5415 return (rv);
5416 }
5417
5418 /*
5419 * wm_gmii_i80003_writereg: [mii interface function]
5420 *
5421 * Write a PHY register on the kumeran.
5422 * This could be handled by the PHY layer if we didn't have to lock the
5423 * ressource ...
5424 */
5425 static void
5426 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5427 {
5428 struct wm_softc *sc = device_private(self);
5429 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5430
5431 if (phy != 1) /* only one PHY on kumeran bus */
5432 return;
5433
5434 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5435 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5436 __func__);
5437 return;
5438 }
5439
5440 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5441 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5442 reg >> GG82563_PAGE_SHIFT);
5443 } else {
5444 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5445 reg >> GG82563_PAGE_SHIFT);
5446 }
5447 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5448 delay(200);
5449 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5450 delay(200);
5451
5452 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5453 }
5454
5455 /*
5456 * wm_gmii_bm_readreg: [mii interface function]
5457 *
5458 * Read a PHY register on the kumeran
5459 * This could be handled by the PHY layer if we didn't have to lock the
5460 * ressource ...
5461 */
5462 static int
5463 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5464 {
5465 struct wm_softc *sc = device_private(self);
5466 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5467 int rv;
5468
5469 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5470 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5471 __func__);
5472 return 0;
5473 }
5474
5475 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5476 if (phy == 1)
5477 wm_gmii_i82544_writereg(self, phy, 0x1f,
5478 reg);
5479 else
5480 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5481 reg >> GG82563_PAGE_SHIFT);
5482
5483 }
5484
5485 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5486 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5487 return (rv);
5488 }
5489
5490 /*
5491 * wm_gmii_bm_writereg: [mii interface function]
5492 *
5493 * Write a PHY register on the kumeran.
5494 * This could be handled by the PHY layer if we didn't have to lock the
5495 * ressource ...
5496 */
5497 static void
5498 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5499 {
5500 struct wm_softc *sc = device_private(self);
5501 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5502
5503 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5504 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5505 __func__);
5506 return;
5507 }
5508
5509 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5510 if (phy == 1)
5511 wm_gmii_i82544_writereg(self, phy, 0x1f,
5512 reg);
5513 else
5514 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5515 reg >> GG82563_PAGE_SHIFT);
5516
5517 }
5518
5519 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5520 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5521 }
5522
5523 static void
5524 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
5525 {
5526 struct wm_softc *sc = device_private(self);
5527 uint16_t regnum = BM_PHY_REG_NUM(offset);
5528 uint16_t wuce;
5529
5530 /* XXX Gig must be disabled for MDIO accesses to page 800 */
5531 if (sc->sc_type == WM_T_PCH) {
5532 /* XXX e1000 driver do nothing... why? */
5533 }
5534
5535 /* Set page 769 */
5536 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5537 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5538
5539 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
5540
5541 wuce &= ~BM_WUC_HOST_WU_BIT;
5542 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
5543 wuce | BM_WUC_ENABLE_BIT);
5544
5545 /* Select page 800 */
5546 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5547 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
5548
5549 /* Write page 800 */
5550 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
5551
5552 if (rd)
5553 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
5554 else
5555 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
5556
5557 /* Set page 769 */
5558 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5559 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5560
5561 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
5562 }
5563
5564 /*
5565 * wm_gmii_hv_readreg: [mii interface function]
5566 *
5567 * Read a PHY register on the kumeran
5568 * This could be handled by the PHY layer if we didn't have to lock the
5569 * ressource ...
5570 */
5571 static int
5572 wm_gmii_hv_readreg(device_t self, int phy, int reg)
5573 {
5574 struct wm_softc *sc = device_private(self);
5575 uint16_t page = BM_PHY_REG_PAGE(reg);
5576 uint16_t regnum = BM_PHY_REG_NUM(reg);
5577 uint16_t val;
5578 int rv;
5579
5580 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5581 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5582 __func__);
5583 return 0;
5584 }
5585
5586 /* XXX Workaround failure in MDIO access while cable is disconnected */
5587 if (sc->sc_phytype == WMPHY_82577) {
5588 /* XXX must write */
5589 }
5590
5591 /* Page 800 works differently than the rest so it has its own func */
5592 if (page == BM_WUC_PAGE) {
5593 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
5594 return val;
5595 }
5596
5597 /*
5598 * Lower than page 768 works differently than the rest so it has its
5599 * own func
5600 */
5601 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5602 printf("gmii_hv_readreg!!!\n");
5603 return 0;
5604 }
5605
5606 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5607 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5608 page << BME1000_PAGE_SHIFT);
5609 }
5610
5611 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
5612 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5613 return (rv);
5614 }
5615
5616 /*
5617 * wm_gmii_hv_writereg: [mii interface function]
5618 *
5619 * Write a PHY register on the kumeran.
5620 * This could be handled by the PHY layer if we didn't have to lock the
5621 * ressource ...
5622 */
5623 static void
5624 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
5625 {
5626 struct wm_softc *sc = device_private(self);
5627 uint16_t page = BM_PHY_REG_PAGE(reg);
5628 uint16_t regnum = BM_PHY_REG_NUM(reg);
5629
5630 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5631 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5632 __func__);
5633 return;
5634 }
5635
5636 /* XXX Workaround failure in MDIO access while cable is disconnected */
5637
5638 /* Page 800 works differently than the rest so it has its own func */
5639 if (page == BM_WUC_PAGE) {
5640 uint16_t tmp;
5641
5642 tmp = val;
5643 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
5644 return;
5645 }
5646
5647 /*
5648 * Lower than page 768 works differently than the rest so it has its
5649 * own func
5650 */
5651 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5652 printf("gmii_hv_writereg!!!\n");
5653 return;
5654 }
5655
5656 /*
5657 * XXX Workaround MDIO accesses being disabled after entering IEEE
5658 * Power Down (whenever bit 11 of the PHY control register is set)
5659 */
5660
5661 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5662 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5663 page << BME1000_PAGE_SHIFT);
5664 }
5665
5666 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
5667 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5668 }
5669
5670 /*
5671 * wm_gmii_statchg: [mii interface function]
5672 *
5673 * Callback from MII layer when media changes.
5674 */
5675 static void
5676 wm_gmii_statchg(device_t self)
5677 {
5678 struct wm_softc *sc = device_private(self);
5679 struct mii_data *mii = &sc->sc_mii;
5680
5681 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5682 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5683 sc->sc_fcrtl &= ~FCRTL_XONE;
5684
5685 /*
5686 * Get flow control negotiation result.
5687 */
5688 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5689 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5690 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5691 mii->mii_media_active &= ~IFM_ETH_FMASK;
5692 }
5693
5694 if (sc->sc_flowflags & IFM_FLOW) {
5695 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5696 sc->sc_ctrl |= CTRL_TFCE;
5697 sc->sc_fcrtl |= FCRTL_XONE;
5698 }
5699 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5700 sc->sc_ctrl |= CTRL_RFCE;
5701 }
5702
5703 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5704 DPRINTF(WM_DEBUG_LINK,
5705 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5706 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5707 } else {
5708 DPRINTF(WM_DEBUG_LINK,
5709 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5710 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5711 }
5712
5713 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5714 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5715 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5716 : WMREG_FCRTL, sc->sc_fcrtl);
5717 if (sc->sc_type == WM_T_80003) {
5718 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5719 case IFM_1000_T:
5720 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5721 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5722 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5723 break;
5724 default:
5725 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5726 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5727 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5728 break;
5729 }
5730 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5731 }
5732 }
5733
5734 /*
5735 * wm_kmrn_readreg:
5736 *
5737 * Read a kumeran register
5738 */
5739 static int
5740 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5741 {
5742 int rv;
5743
5744 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5745 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5746 aprint_error_dev(sc->sc_dev,
5747 "%s: failed to get semaphore\n", __func__);
5748 return 0;
5749 }
5750 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5751 if (wm_get_swfwhw_semaphore(sc)) {
5752 aprint_error_dev(sc->sc_dev,
5753 "%s: failed to get semaphore\n", __func__);
5754 return 0;
5755 }
5756 }
5757
5758 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5759 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5760 KUMCTRLSTA_REN);
5761 delay(2);
5762
5763 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5764
5765 if (sc->sc_flags == WM_F_SWFW_SYNC)
5766 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5767 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5768 wm_put_swfwhw_semaphore(sc);
5769
5770 return (rv);
5771 }
5772
5773 /*
5774 * wm_kmrn_writereg:
5775 *
5776 * Write a kumeran register
5777 */
5778 static void
5779 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5780 {
5781
5782 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5783 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5784 aprint_error_dev(sc->sc_dev,
5785 "%s: failed to get semaphore\n", __func__);
5786 return;
5787 }
5788 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5789 if (wm_get_swfwhw_semaphore(sc)) {
5790 aprint_error_dev(sc->sc_dev,
5791 "%s: failed to get semaphore\n", __func__);
5792 return;
5793 }
5794 }
5795
5796 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5797 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5798 (val & KUMCTRLSTA_MASK));
5799
5800 if (sc->sc_flags == WM_F_SWFW_SYNC)
5801 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5802 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5803 wm_put_swfwhw_semaphore(sc);
5804 }
5805
5806 static int
5807 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5808 {
5809 uint32_t eecd = 0;
5810
5811 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5812 || sc->sc_type == WM_T_82583) {
5813 eecd = CSR_READ(sc, WMREG_EECD);
5814
5815 /* Isolate bits 15 & 16 */
5816 eecd = ((eecd >> 15) & 0x03);
5817
5818 /* If both bits are set, device is Flash type */
5819 if (eecd == 0x03)
5820 return 0;
5821 }
5822 return 1;
5823 }
5824
5825 static int
5826 wm_get_swsm_semaphore(struct wm_softc *sc)
5827 {
5828 int32_t timeout;
5829 uint32_t swsm;
5830
5831 /* Get the FW semaphore. */
5832 timeout = 1000 + 1; /* XXX */
5833 while (timeout) {
5834 swsm = CSR_READ(sc, WMREG_SWSM);
5835 swsm |= SWSM_SWESMBI;
5836 CSR_WRITE(sc, WMREG_SWSM, swsm);
5837 /* if we managed to set the bit we got the semaphore. */
5838 swsm = CSR_READ(sc, WMREG_SWSM);
5839 if (swsm & SWSM_SWESMBI)
5840 break;
5841
5842 delay(50);
5843 timeout--;
5844 }
5845
5846 if (timeout == 0) {
5847 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5848 /* Release semaphores */
5849 wm_put_swsm_semaphore(sc);
5850 return 1;
5851 }
5852 return 0;
5853 }
5854
5855 static void
5856 wm_put_swsm_semaphore(struct wm_softc *sc)
5857 {
5858 uint32_t swsm;
5859
5860 swsm = CSR_READ(sc, WMREG_SWSM);
5861 swsm &= ~(SWSM_SWESMBI);
5862 CSR_WRITE(sc, WMREG_SWSM, swsm);
5863 }
5864
5865 static int
5866 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5867 {
5868 uint32_t swfw_sync;
5869 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5870 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5871 int timeout = 200;
5872
5873 for(timeout = 0; timeout < 200; timeout++) {
5874 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5875 if (wm_get_swsm_semaphore(sc)) {
5876 aprint_error_dev(sc->sc_dev,
5877 "%s: failed to get semaphore\n",
5878 __func__);
5879 return 1;
5880 }
5881 }
5882 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5883 if ((swfw_sync & (swmask | fwmask)) == 0) {
5884 swfw_sync |= swmask;
5885 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5886 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5887 wm_put_swsm_semaphore(sc);
5888 return 0;
5889 }
5890 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5891 wm_put_swsm_semaphore(sc);
5892 delay(5000);
5893 }
5894 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5895 device_xname(sc->sc_dev), mask, swfw_sync);
5896 return 1;
5897 }
5898
5899 static void
5900 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5901 {
5902 uint32_t swfw_sync;
5903
5904 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5905 while (wm_get_swsm_semaphore(sc) != 0)
5906 continue;
5907 }
5908 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5909 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5910 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5911 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5912 wm_put_swsm_semaphore(sc);
5913 }
5914
5915 static int
5916 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5917 {
5918 uint32_t ext_ctrl;
5919 int timeout = 200;
5920
5921 for(timeout = 0; timeout < 200; timeout++) {
5922 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5923 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5924 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5925
5926 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5927 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5928 return 0;
5929 delay(5000);
5930 }
5931 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5932 device_xname(sc->sc_dev), ext_ctrl);
5933 return 1;
5934 }
5935
5936 static void
5937 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5938 {
5939 uint32_t ext_ctrl;
5940 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5941 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5942 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5943 }
5944
5945 static int
5946 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5947 {
5948 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5949 uint8_t bank_high_byte;
5950 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5951
5952 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
5953 /* Value of bit 22 corresponds to the flash bank we're on. */
5954 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5955 } else {
5956 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5957 if ((bank_high_byte & 0xc0) == 0x80)
5958 *bank = 0;
5959 else {
5960 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5961 &bank_high_byte);
5962 if ((bank_high_byte & 0xc0) == 0x80)
5963 *bank = 1;
5964 else {
5965 aprint_error_dev(sc->sc_dev,
5966 "EEPROM not present\n");
5967 return -1;
5968 }
5969 }
5970 }
5971
5972 return 0;
5973 }
5974
5975 /******************************************************************************
5976 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5977 * register.
5978 *
5979 * sc - Struct containing variables accessed by shared code
5980 * offset - offset of word in the EEPROM to read
5981 * data - word read from the EEPROM
5982 * words - number of words to read
5983 *****************************************************************************/
5984 static int
5985 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5986 {
5987 int32_t error = 0;
5988 uint32_t flash_bank = 0;
5989 uint32_t act_offset = 0;
5990 uint32_t bank_offset = 0;
5991 uint16_t word = 0;
5992 uint16_t i = 0;
5993
5994 /* We need to know which is the valid flash bank. In the event
5995 * that we didn't allocate eeprom_shadow_ram, we may not be
5996 * managing flash_bank. So it cannot be trusted and needs
5997 * to be updated with each read.
5998 */
5999 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6000 if (error) {
6001 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6002 __func__);
6003 return error;
6004 }
6005
6006 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6007 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6008
6009 error = wm_get_swfwhw_semaphore(sc);
6010 if (error) {
6011 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6012 __func__);
6013 return error;
6014 }
6015
6016 for (i = 0; i < words; i++) {
6017 /* The NVM part needs a byte offset, hence * 2 */
6018 act_offset = bank_offset + ((offset + i) * 2);
6019 error = wm_read_ich8_word(sc, act_offset, &word);
6020 if (error) {
6021 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6022 __func__);
6023 break;
6024 }
6025 data[i] = word;
6026 }
6027
6028 wm_put_swfwhw_semaphore(sc);
6029 return error;
6030 }
6031
6032 /******************************************************************************
6033 * This function does initial flash setup so that a new read/write/erase cycle
6034 * can be started.
6035 *
6036 * sc - The pointer to the hw structure
6037 ****************************************************************************/
6038 static int32_t
6039 wm_ich8_cycle_init(struct wm_softc *sc)
6040 {
6041 uint16_t hsfsts;
6042 int32_t error = 1;
6043 int32_t i = 0;
6044
6045 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6046
6047 /* May be check the Flash Des Valid bit in Hw status */
6048 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6049 return error;
6050 }
6051
6052 /* Clear FCERR in Hw status by writing 1 */
6053 /* Clear DAEL in Hw status by writing a 1 */
6054 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6055
6056 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6057
6058 /* Either we should have a hardware SPI cycle in progress bit to check
6059 * against, in order to start a new cycle or FDONE bit should be changed
6060 * in the hardware so that it is 1 after harware reset, which can then be
6061 * used as an indication whether a cycle is in progress or has been
6062 * completed .. we should also have some software semaphore mechanism to
6063 * guard FDONE or the cycle in progress bit so that two threads access to
6064 * those bits can be sequentiallized or a way so that 2 threads dont
6065 * start the cycle at the same time */
6066
6067 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6068 /* There is no cycle running at present, so we can start a cycle */
6069 /* Begin by setting Flash Cycle Done. */
6070 hsfsts |= HSFSTS_DONE;
6071 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6072 error = 0;
6073 } else {
6074 /* otherwise poll for sometime so the current cycle has a chance
6075 * to end before giving up. */
6076 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6077 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6078 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6079 error = 0;
6080 break;
6081 }
6082 delay(1);
6083 }
6084 if (error == 0) {
6085 /* Successful in waiting for previous cycle to timeout,
6086 * now set the Flash Cycle Done. */
6087 hsfsts |= HSFSTS_DONE;
6088 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6089 }
6090 }
6091 return error;
6092 }
6093
6094 /******************************************************************************
6095 * This function starts a flash cycle and waits for its completion
6096 *
6097 * sc - The pointer to the hw structure
6098 ****************************************************************************/
6099 static int32_t
6100 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6101 {
6102 uint16_t hsflctl;
6103 uint16_t hsfsts;
6104 int32_t error = 1;
6105 uint32_t i = 0;
6106
6107 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6108 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6109 hsflctl |= HSFCTL_GO;
6110 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6111
6112 /* wait till FDONE bit is set to 1 */
6113 do {
6114 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6115 if (hsfsts & HSFSTS_DONE)
6116 break;
6117 delay(1);
6118 i++;
6119 } while (i < timeout);
6120 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
6121 error = 0;
6122 }
6123 return error;
6124 }
6125
6126 /******************************************************************************
6127 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6128 *
6129 * sc - The pointer to the hw structure
6130 * index - The index of the byte or word to read.
6131 * size - Size of data to read, 1=byte 2=word
6132 * data - Pointer to the word to store the value read.
6133 *****************************************************************************/
6134 static int32_t
6135 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6136 uint32_t size, uint16_t* data)
6137 {
6138 uint16_t hsfsts;
6139 uint16_t hsflctl;
6140 uint32_t flash_linear_address;
6141 uint32_t flash_data = 0;
6142 int32_t error = 1;
6143 int32_t count = 0;
6144
6145 if (size < 1 || size > 2 || data == 0x0 ||
6146 index > ICH_FLASH_LINEAR_ADDR_MASK)
6147 return error;
6148
6149 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6150 sc->sc_ich8_flash_base;
6151
6152 do {
6153 delay(1);
6154 /* Steps */
6155 error = wm_ich8_cycle_init(sc);
6156 if (error)
6157 break;
6158
6159 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6160 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6161 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
6162 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6163 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6164
6165 /* Write the last 24 bits of index into Flash Linear address field in
6166 * Flash Address */
6167 /* TODO: TBD maybe check the index against the size of flash */
6168
6169 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6170
6171 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6172
6173 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
6174 * sequence a few more times, else read in (shift in) the Flash Data0,
6175 * the order is least significant byte first msb to lsb */
6176 if (error == 0) {
6177 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6178 if (size == 1) {
6179 *data = (uint8_t)(flash_data & 0x000000FF);
6180 } else if (size == 2) {
6181 *data = (uint16_t)(flash_data & 0x0000FFFF);
6182 }
6183 break;
6184 } else {
6185 /* If we've gotten here, then things are probably completely hosed,
6186 * but if the error condition is detected, it won't hurt to give
6187 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
6188 */
6189 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6190 if (hsfsts & HSFSTS_ERR) {
6191 /* Repeat for some time before giving up. */
6192 continue;
6193 } else if ((hsfsts & HSFSTS_DONE) == 0) {
6194 break;
6195 }
6196 }
6197 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6198
6199 return error;
6200 }
6201
6202 /******************************************************************************
6203 * Reads a single byte from the NVM using the ICH8 flash access registers.
6204 *
6205 * sc - pointer to wm_hw structure
6206 * index - The index of the byte to read.
6207 * data - Pointer to a byte to store the value read.
6208 *****************************************************************************/
6209 static int32_t
6210 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6211 {
6212 int32_t status;
6213 uint16_t word = 0;
6214
6215 status = wm_read_ich8_data(sc, index, 1, &word);
6216 if (status == 0) {
6217 *data = (uint8_t)word;
6218 }
6219
6220 return status;
6221 }
6222
6223 /******************************************************************************
6224 * Reads a word from the NVM using the ICH8 flash access registers.
6225 *
6226 * sc - pointer to wm_hw structure
6227 * index - The starting byte index of the word to read.
6228 * data - Pointer to a word to store the value read.
6229 *****************************************************************************/
6230 static int32_t
6231 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6232 {
6233 int32_t status;
6234
6235 status = wm_read_ich8_data(sc, index, 2, data);
6236 return status;
6237 }
6238
6239 static int
6240 wm_check_mng_mode(struct wm_softc *sc)
6241 {
6242 int rv;
6243
6244 switch (sc->sc_type) {
6245 case WM_T_ICH8:
6246 case WM_T_ICH9:
6247 case WM_T_ICH10:
6248 case WM_T_PCH:
6249 rv = wm_check_mng_mode_ich8lan(sc);
6250 break;
6251 case WM_T_82574:
6252 case WM_T_82583:
6253 rv = wm_check_mng_mode_82574(sc);
6254 break;
6255 case WM_T_82571:
6256 case WM_T_82572:
6257 case WM_T_82573:
6258 case WM_T_80003:
6259 rv = wm_check_mng_mode_generic(sc);
6260 break;
6261 default:
6262 /* noting to do */
6263 rv = 0;
6264 break;
6265 }
6266
6267 return rv;
6268 }
6269
6270 static int
6271 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6272 {
6273 uint32_t fwsm;
6274
6275 fwsm = CSR_READ(sc, WMREG_FWSM);
6276
6277 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6278 return 1;
6279
6280 return 0;
6281 }
6282
6283 static int
6284 wm_check_mng_mode_82574(struct wm_softc *sc)
6285 {
6286 uint16_t data;
6287
6288 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6289
6290 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6291 return 1;
6292
6293 return 0;
6294 }
6295
6296 static int
6297 wm_check_mng_mode_generic(struct wm_softc *sc)
6298 {
6299 uint32_t fwsm;
6300
6301 fwsm = CSR_READ(sc, WMREG_FWSM);
6302
6303 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6304 return 1;
6305
6306 return 0;
6307 }
6308
6309 static int
6310 wm_check_reset_block(struct wm_softc *sc)
6311 {
6312 uint32_t reg;
6313
6314 switch (sc->sc_type) {
6315 case WM_T_ICH8:
6316 case WM_T_ICH9:
6317 case WM_T_ICH10:
6318 case WM_T_PCH:
6319 reg = CSR_READ(sc, WMREG_FWSM);
6320 if ((reg & FWSM_RSPCIPHY) != 0)
6321 return 0;
6322 else
6323 return -1;
6324 break;
6325 case WM_T_82571:
6326 case WM_T_82572:
6327 case WM_T_82573:
6328 case WM_T_82574:
6329 case WM_T_82583:
6330 case WM_T_80003:
6331 reg = CSR_READ(sc, WMREG_MANC);
6332 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6333 return -1;
6334 else
6335 return 0;
6336 break;
6337 default:
6338 /* no problem */
6339 break;
6340 }
6341
6342 return 0;
6343 }
6344
6345 static void
6346 wm_get_hw_control(struct wm_softc *sc)
6347 {
6348 uint32_t reg;
6349
6350 switch (sc->sc_type) {
6351 case WM_T_82573:
6352 #if 0
6353 case WM_T_82574:
6354 case WM_T_82583:
6355 /*
6356 * FreeBSD's em driver has the function for 82574 to checks
6357 * the management mode, but it's not used. Why?
6358 */
6359 #endif
6360 reg = CSR_READ(sc, WMREG_SWSM);
6361 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6362 break;
6363 case WM_T_82571:
6364 case WM_T_82572:
6365 case WM_T_80003:
6366 case WM_T_ICH8:
6367 case WM_T_ICH9:
6368 case WM_T_ICH10:
6369 case WM_T_PCH:
6370 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6371 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6372 break;
6373 default:
6374 break;
6375 }
6376 }
6377
6378 /* XXX Currently TBI only */
6379 static int
6380 wm_check_for_link(struct wm_softc *sc)
6381 {
6382 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6383 uint32_t rxcw;
6384 uint32_t ctrl;
6385 uint32_t status;
6386 uint32_t sig;
6387
6388 rxcw = CSR_READ(sc, WMREG_RXCW);
6389 ctrl = CSR_READ(sc, WMREG_CTRL);
6390 status = CSR_READ(sc, WMREG_STATUS);
6391
6392 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6393
6394 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6395 device_xname(sc->sc_dev), __func__,
6396 ((ctrl & CTRL_SWDPIN(1)) == sig),
6397 ((status & STATUS_LU) != 0),
6398 ((rxcw & RXCW_C) != 0)
6399 ));
6400
6401 /*
6402 * SWDPIN LU RXCW
6403 * 0 0 0
6404 * 0 0 1 (should not happen)
6405 * 0 1 0 (should not happen)
6406 * 0 1 1 (should not happen)
6407 * 1 0 0 Disable autonego and force linkup
6408 * 1 0 1 got /C/ but not linkup yet
6409 * 1 1 0 (linkup)
6410 * 1 1 1 If IFM_AUTO, back to autonego
6411 *
6412 */
6413 if (((ctrl & CTRL_SWDPIN(1)) == sig)
6414 && ((status & STATUS_LU) == 0)
6415 && ((rxcw & RXCW_C) == 0)) {
6416 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6417 __func__));
6418 sc->sc_tbi_linkup = 0;
6419 /* Disable auto-negotiation in the TXCW register */
6420 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6421
6422 /*
6423 * Force link-up and also force full-duplex.
6424 *
6425 * NOTE: CTRL was updated TFCE and RFCE automatically,
6426 * so we should update sc->sc_ctrl
6427 */
6428 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6429 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6430 } else if(((status & STATUS_LU) != 0)
6431 && ((rxcw & RXCW_C) != 0)
6432 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6433 sc->sc_tbi_linkup = 1;
6434 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6435 __func__));
6436 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6437 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6438 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6439 && ((rxcw & RXCW_C) != 0)) {
6440 DPRINTF(WM_DEBUG_LINK, ("/C/"));
6441 } else {
6442 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6443 status));
6444 }
6445
6446 return 0;
6447 }
6448
6449 /*
6450 * Workaround for pch's PHYs
6451 * XXX should be moved to new PHY driver?
6452 */
6453 static void
6454 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
6455 {
6456
6457 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
6458
6459 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
6460
6461 /* 82578 */
6462 if (sc->sc_phytype == WMPHY_82578) {
6463 /* PCH rev. < 3 */
6464 if (sc->sc_rev < 3) {
6465 /* XXX 6 bit shift? Why? Is it page2? */
6466 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
6467 0x66c0);
6468 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
6469 0xffff);
6470 }
6471
6472 /* XXX phy rev. < 2 */
6473 }
6474
6475 /* Select page 0 */
6476
6477 /* XXX acquire semaphore */
6478 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
6479 /* XXX release semaphore */
6480
6481 /*
6482 * Configure the K1 Si workaround during phy reset assuming there is
6483 * link so that it disables K1 if link is in 1Gbps.
6484 */
6485 wm_k1_gig_workaround_hv(sc, 1);
6486 }
6487
6488 static void
6489 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
6490 {
6491 int k1_enable = sc->sc_nvm_k1_enabled;
6492
6493 /* XXX acquire semaphore */
6494
6495 if (link) {
6496 k1_enable = 0;
6497
6498 /* Link stall fix for link up */
6499 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
6500 } else {
6501 /* Link stall fix for link down */
6502 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
6503 }
6504
6505 wm_configure_k1_ich8lan(sc, k1_enable);
6506
6507 /* XXX release semaphore */
6508 }
6509
6510 static void
6511 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
6512 {
6513 uint32_t ctrl, ctrl_ext, tmp;
6514 uint16_t kmrn_reg;
6515
6516 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
6517
6518 if (k1_enable)
6519 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
6520 else
6521 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
6522
6523 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
6524
6525 delay(20);
6526
6527 ctrl = CSR_READ(sc, WMREG_CTRL);
6528 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6529
6530 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
6531 tmp |= CTRL_FRCSPD;
6532
6533 CSR_WRITE(sc, WMREG_CTRL, tmp);
6534 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
6535 delay(20);
6536
6537 CSR_WRITE(sc, WMREG_CTRL, ctrl);
6538 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6539 delay(20);
6540 }
6541