if_dge.c revision 1.38.4.4 1 /* $NetBSD: if_dge.c,v 1.38.4.4 2016/07/09 20:25:04 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2004, SUNET, Swedish University Computer Network.
5 * All rights reserved.
6 *
7 * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * SUNET, Swedish University Computer Network.
21 * 4. The name of SUNET may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
39 * All rights reserved.
40 *
41 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed for the NetBSD Project by
54 * Wasabi Systems, Inc.
55 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
56 * or promote products derived from this software without specific prior
57 * written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69 * POSSIBILITY OF SUCH DAMAGE.
70 */
71
72 /*
73 * Device driver for the Intel 82597EX Ten Gigabit Ethernet controller.
74 *
75 * TODO (in no specific order):
76 * HW VLAN support.
77 * TSE offloading (needs kernel changes...)
78 * RAIDC (receive interrupt delay adaptation)
79 * Use memory > 4GB.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_dge.c,v 1.38.4.4 2016/07/09 20:25:04 skrll Exp $");
84
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/callout.h>
88 #include <sys/mbuf.h>
89 #include <sys/malloc.h>
90 #include <sys/kernel.h>
91 #include <sys/socket.h>
92 #include <sys/ioctl.h>
93 #include <sys/errno.h>
94 #include <sys/device.h>
95 #include <sys/queue.h>
96
97 #include <sys/rndsource.h>
98
99 #include <net/if.h>
100 #include <net/if_dl.h>
101 #include <net/if_media.h>
102 #include <net/if_ether.h>
103
104 #include <net/bpf.h>
105
106 #include <netinet/in.h> /* XXX for struct ip */
107 #include <netinet/in_systm.h> /* XXX for struct ip */
108 #include <netinet/ip.h> /* XXX for struct ip */
109 #include <netinet/tcp.h> /* XXX for struct tcphdr */
110
111 #include <sys/bus.h>
112 #include <sys/intr.h>
113 #include <machine/endian.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/mii_bitbang.h>
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121 #include <dev/pci/pcidevs.h>
122
123 #include <dev/pci/if_dgereg.h>
124
125 /*
126 * The receive engine may sometimes become off-by-one when writing back
127 * chained descriptors. Avoid this by allocating a large chunk of
128 * memory and use if instead (to avoid chained descriptors).
129 * This only happens with chained descriptors under heavy load.
130 */
131 #define DGE_OFFBYONE_RXBUG
132
133 #define DGE_EVENT_COUNTERS
134 #define DGE_DEBUG
135
136 #ifdef DGE_DEBUG
137 #define DGE_DEBUG_LINK 0x01
138 #define DGE_DEBUG_TX 0x02
139 #define DGE_DEBUG_RX 0x04
140 #define DGE_DEBUG_CKSUM 0x08
141 int dge_debug = 0;
142
143 #define DPRINTF(x, y) if (dge_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* DGE_DEBUG */
147
148 /*
149 * Transmit descriptor list size. We allow up to 100 DMA segments per
150 * packet (Intel reports of jumbo frame packets with as
151 * many as 80 DMA segments when using 16k buffers).
152 */
153 #define DGE_NTXSEGS 100
154 #define DGE_IFQUEUELEN 20000
155 #define DGE_TXQUEUELEN 2048
156 #define DGE_TXQUEUELEN_MASK (DGE_TXQUEUELEN - 1)
157 #define DGE_TXQUEUE_GC (DGE_TXQUEUELEN / 8)
158 #define DGE_NTXDESC 1024
159 #define DGE_NTXDESC_MASK (DGE_NTXDESC - 1)
160 #define DGE_NEXTTX(x) (((x) + 1) & DGE_NTXDESC_MASK)
161 #define DGE_NEXTTXS(x) (((x) + 1) & DGE_TXQUEUELEN_MASK)
162
163 /*
164 * Receive descriptor list size.
165 * Packet is of size MCLBYTES, and for jumbo packets buffers may
166 * be chained. Due to the nature of the card (high-speed), keep this
167 * ring large. With 2k buffers the ring can store 400 jumbo packets,
168 * which at full speed will be received in just under 3ms.
169 */
170 #define DGE_NRXDESC 2048
171 #define DGE_NRXDESC_MASK (DGE_NRXDESC - 1)
172 #define DGE_NEXTRX(x) (((x) + 1) & DGE_NRXDESC_MASK)
173 /*
174 * # of descriptors between head and written descriptors.
175 * This is to work-around two erratas.
176 */
177 #define DGE_RXSPACE 10
178 #define DGE_PREVRX(x) (((x) - DGE_RXSPACE) & DGE_NRXDESC_MASK)
179 /*
180 * Receive descriptor fetch threshholds. These are values recommended
181 * by Intel, do not touch them unless you know what you are doing.
182 */
183 #define RXDCTL_PTHRESH_VAL 128
184 #define RXDCTL_HTHRESH_VAL 16
185 #define RXDCTL_WTHRESH_VAL 16
186
187
188 /*
189 * Tweakable parameters; default values.
190 */
191 #define FCRTH 0x30000 /* Send XOFF water mark */
192 #define FCRTL 0x28000 /* Send XON water mark */
193 #define RDTR 0x20 /* Interrupt delay after receive, .8192us units */
194 #define TIDV 0x20 /* Interrupt delay after send, .8192us units */
195
196 /*
197 * Control structures are DMA'd to the i82597 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make serveral things
199 * easier.
200 */
201 struct dge_control_data {
202 /*
203 * The transmit descriptors.
204 */
205 struct dge_tdes wcd_txdescs[DGE_NTXDESC];
206
207 /*
208 * The receive descriptors.
209 */
210 struct dge_rdes wcd_rxdescs[DGE_NRXDESC];
211 };
212
213 #define DGE_CDOFF(x) offsetof(struct dge_control_data, x)
214 #define DGE_CDTXOFF(x) DGE_CDOFF(wcd_txdescs[(x)])
215 #define DGE_CDRXOFF(x) DGE_CDOFF(wcd_rxdescs[(x)])
216
217 /*
218 * The DGE interface have a higher max MTU size than normal jumbo frames.
219 */
220 #define DGE_MAX_MTU 16288 /* Max MTU size for this interface */
221
222 /*
223 * Software state for transmit jobs.
224 */
225 struct dge_txsoft {
226 struct mbuf *txs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t txs_dmamap; /* our DMA map */
228 int txs_firstdesc; /* first descriptor in packet */
229 int txs_lastdesc; /* last descriptor in packet */
230 int txs_ndesc; /* # of descriptors used */
231 };
232
233 /*
234 * Software state for receive buffers. Each descriptor gets a
235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
236 * more than one buffer, we chain them together.
237 */
238 struct dge_rxsoft {
239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
240 bus_dmamap_t rxs_dmamap; /* our DMA map */
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct dge_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_dma_tag_t sc_dmat; /* bus DMA tag */
251 struct ethercom sc_ethercom; /* ethernet common data */
252
253 int sc_flags; /* flags; see below */
254 int sc_bus_speed; /* PCI/PCIX bus speed */
255 int sc_pcix_offset; /* PCIX capability register offset */
256
257 const struct dge_product *sc_dgep; /* Pointer to the dge_product entry */
258 pci_chipset_tag_t sc_pc;
259 pcitag_t sc_pt;
260 int sc_mmrbc; /* Max PCIX memory read byte count */
261
262 void *sc_ih; /* interrupt cookie */
263
264 struct ifmedia sc_media;
265
266 bus_dmamap_t sc_cddmamap; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
268
269 int sc_align_tweak;
270
271 /*
272 * Software state for the transmit and receive descriptors.
273 */
274 struct dge_txsoft sc_txsoft[DGE_TXQUEUELEN];
275 struct dge_rxsoft sc_rxsoft[DGE_NRXDESC];
276
277 /*
278 * Control data structures.
279 */
280 struct dge_control_data *sc_control_data;
281 #define sc_txdescs sc_control_data->wcd_txdescs
282 #define sc_rxdescs sc_control_data->wcd_rxdescs
283
284 #ifdef DGE_EVENT_COUNTERS
285 /* Event counters. */
286 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
287 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
288 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
289 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
290 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
291 struct evcnt sc_ev_rxintr; /* Rx interrupts */
292 struct evcnt sc_ev_linkintr; /* Link interrupts */
293
294 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
295 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
296 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
297 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
298
299 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
300 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
301 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
302
303 struct evcnt sc_ev_txseg[DGE_NTXSEGS]; /* Tx packets w/ N segments */
304 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
305 #endif /* DGE_EVENT_COUNTERS */
306
307 int sc_txfree; /* number of free Tx descriptors */
308 int sc_txnext; /* next ready Tx descriptor */
309
310 int sc_txsfree; /* number of free Tx jobs */
311 int sc_txsnext; /* next free Tx job */
312 int sc_txsdirty; /* dirty Tx jobs */
313
314 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
315 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
316
317 int sc_rxptr; /* next ready Rx descriptor/queue ent */
318 int sc_rxdiscard;
319 int sc_rxlen;
320 struct mbuf *sc_rxhead;
321 struct mbuf *sc_rxtail;
322 struct mbuf **sc_rxtailp;
323
324 uint32_t sc_ctrl0; /* prototype CTRL0 register */
325 uint32_t sc_icr; /* prototype interrupt bits */
326 uint32_t sc_tctl; /* prototype TCTL register */
327 uint32_t sc_rctl; /* prototype RCTL register */
328
329 int sc_mchash_type; /* multicast filter offset */
330
331 uint16_t sc_eeprom[EEPROM_SIZE];
332
333 krndsource_t rnd_source; /* random source */
334 #ifdef DGE_OFFBYONE_RXBUG
335 void *sc_bugbuf;
336 SLIST_HEAD(, rxbugentry) sc_buglist;
337 bus_dmamap_t sc_bugmap;
338 struct rxbugentry *sc_entry;
339 #endif
340 };
341
342 #define DGE_RXCHAIN_RESET(sc) \
343 do { \
344 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
345 *(sc)->sc_rxtailp = NULL; \
346 (sc)->sc_rxlen = 0; \
347 } while (/*CONSTCOND*/0)
348
349 #define DGE_RXCHAIN_LINK(sc, m) \
350 do { \
351 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
352 (sc)->sc_rxtailp = &(m)->m_next; \
353 } while (/*CONSTCOND*/0)
354
355 /* sc_flags */
356 #define DGE_F_BUS64 0x20 /* bus is 64-bit */
357 #define DGE_F_PCIX 0x40 /* bus is PCI-X */
358
359 #ifdef DGE_EVENT_COUNTERS
360 #define DGE_EVCNT_INCR(ev) (ev)->ev_count++
361 #else
362 #define DGE_EVCNT_INCR(ev) /* nothing */
363 #endif
364
365 #define CSR_READ(sc, reg) \
366 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
367 #define CSR_WRITE(sc, reg, val) \
368 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
369
370 #define DGE_CDTXADDR(sc, x) ((sc)->sc_cddma + DGE_CDTXOFF((x)))
371 #define DGE_CDRXADDR(sc, x) ((sc)->sc_cddma + DGE_CDRXOFF((x)))
372
373 #define DGE_CDTXSYNC(sc, x, n, ops) \
374 do { \
375 int __x, __n; \
376 \
377 __x = (x); \
378 __n = (n); \
379 \
380 /* If it will wrap around, sync to the end of the ring. */ \
381 if ((__x + __n) > DGE_NTXDESC) { \
382 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
383 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * \
384 (DGE_NTXDESC - __x), (ops)); \
385 __n -= (DGE_NTXDESC - __x); \
386 __x = 0; \
387 } \
388 \
389 /* Now sync whatever is left. */ \
390 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
391 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * __n, (ops)); \
392 } while (/*CONSTCOND*/0)
393
394 #define DGE_CDRXSYNC(sc, x, ops) \
395 do { \
396 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
397 DGE_CDRXOFF((x)), sizeof(struct dge_rdes), (ops)); \
398 } while (/*CONSTCOND*/0)
399
400 #ifdef DGE_OFFBYONE_RXBUG
401 #define DGE_INIT_RXDESC(sc, x) \
402 do { \
403 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
404 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
405 struct mbuf *__m = __rxs->rxs_mbuf; \
406 \
407 __rxd->dr_baddrl = htole32(sc->sc_bugmap->dm_segs[0].ds_addr + \
408 (mtod((__m), char *) - (char *)sc->sc_bugbuf)); \
409 __rxd->dr_baddrh = 0; \
410 __rxd->dr_len = 0; \
411 __rxd->dr_cksum = 0; \
412 __rxd->dr_status = 0; \
413 __rxd->dr_errors = 0; \
414 __rxd->dr_special = 0; \
415 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
416 \
417 CSR_WRITE((sc), DGE_RDT, (x)); \
418 } while (/*CONSTCOND*/0)
419 #else
420 #define DGE_INIT_RXDESC(sc, x) \
421 do { \
422 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
423 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
424 struct mbuf *__m = __rxs->rxs_mbuf; \
425 \
426 /* \
427 * Note: We scoot the packet forward 2 bytes in the buffer \
428 * so that the payload after the Ethernet header is aligned \
429 * to a 4-byte boundary. \
430 * \
431 * XXX BRAINDAMAGE ALERT! \
432 * The stupid chip uses the same size for every buffer, which \
433 * is set in the Receive Control register. We are using the 2K \
434 * size option, but what we REALLY want is (2K - 2)! For this \
435 * reason, we can't "scoot" packets longer than the standard \
436 * Ethernet MTU. On strict-alignment platforms, if the total \
437 * size exceeds (2K - 2) we set align_tweak to 0 and let \
438 * the upper layer copy the headers. \
439 */ \
440 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
441 \
442 __rxd->dr_baddrl = \
443 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
444 (sc)->sc_align_tweak); \
445 __rxd->dr_baddrh = 0; \
446 __rxd->dr_len = 0; \
447 __rxd->dr_cksum = 0; \
448 __rxd->dr_status = 0; \
449 __rxd->dr_errors = 0; \
450 __rxd->dr_special = 0; \
451 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
452 \
453 CSR_WRITE((sc), DGE_RDT, (x)); \
454 } while (/*CONSTCOND*/0)
455 #endif
456
457 #ifdef DGE_OFFBYONE_RXBUG
458 /*
459 * Allocation constants. Much memory may be used for this.
460 */
461 #ifndef DGE_BUFFER_SIZE
462 #define DGE_BUFFER_SIZE DGE_MAX_MTU
463 #endif
464 #define DGE_NBUFFERS (4*DGE_NRXDESC)
465 #define DGE_RXMEM (DGE_NBUFFERS*DGE_BUFFER_SIZE)
466
467 struct rxbugentry {
468 SLIST_ENTRY(rxbugentry) rb_entry;
469 int rb_slot;
470 };
471
472 static int
473 dge_alloc_rcvmem(struct dge_softc *sc)
474 {
475 char *kva;
476 bus_dma_segment_t seg;
477 int i, rseg, state, error;
478 struct rxbugentry *entry;
479
480 state = error = 0;
481
482 if (bus_dmamem_alloc(sc->sc_dmat, DGE_RXMEM, PAGE_SIZE, 0,
483 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
484 aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n");
485 return ENOBUFS;
486 }
487
488 state = 1;
489 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, DGE_RXMEM, (void **)&kva,
490 BUS_DMA_NOWAIT)) {
491 aprint_error_dev(sc->sc_dev, "can't map DMA buffers (%d bytes)\n",
492 (int)DGE_RXMEM);
493 error = ENOBUFS;
494 goto out;
495 }
496
497 state = 2;
498 if (bus_dmamap_create(sc->sc_dmat, DGE_RXMEM, 1, DGE_RXMEM, 0,
499 BUS_DMA_NOWAIT, &sc->sc_bugmap)) {
500 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
501 error = ENOBUFS;
502 goto out;
503 }
504
505 state = 3;
506 if (bus_dmamap_load(sc->sc_dmat, sc->sc_bugmap,
507 kva, DGE_RXMEM, NULL, BUS_DMA_NOWAIT)) {
508 aprint_error_dev(sc->sc_dev, "can't load DMA map\n");
509 error = ENOBUFS;
510 goto out;
511 }
512
513 state = 4;
514 sc->sc_bugbuf = (void *)kva;
515 SLIST_INIT(&sc->sc_buglist);
516
517 /*
518 * Now divide it up into DGE_BUFFER_SIZE pieces and save the addresses
519 * in an array.
520 */
521 if ((entry = malloc(sizeof(*entry) * DGE_NBUFFERS,
522 M_DEVBUF, M_NOWAIT)) == NULL) {
523 error = ENOBUFS;
524 goto out;
525 }
526 sc->sc_entry = entry;
527 for (i = 0; i < DGE_NBUFFERS; i++) {
528 entry[i].rb_slot = i;
529 SLIST_INSERT_HEAD(&sc->sc_buglist, &entry[i], rb_entry);
530 }
531 out:
532 if (error != 0) {
533 switch (state) {
534 case 4:
535 bus_dmamap_unload(sc->sc_dmat, sc->sc_bugmap);
536 case 3:
537 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bugmap);
538 case 2:
539 bus_dmamem_unmap(sc->sc_dmat, kva, DGE_RXMEM);
540 case 1:
541 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
542 break;
543 default:
544 break;
545 }
546 }
547
548 return error;
549 }
550
551 /*
552 * Allocate a jumbo buffer.
553 */
554 static void *
555 dge_getbuf(struct dge_softc *sc)
556 {
557 struct rxbugentry *entry;
558
559 entry = SLIST_FIRST(&sc->sc_buglist);
560
561 if (entry == NULL) {
562 printf("%s: no free RX buffers\n", device_xname(sc->sc_dev));
563 return(NULL);
564 }
565
566 SLIST_REMOVE_HEAD(&sc->sc_buglist, rb_entry);
567 return (char *)sc->sc_bugbuf + entry->rb_slot * DGE_BUFFER_SIZE;
568 }
569
570 /*
571 * Release a jumbo buffer.
572 */
573 static void
574 dge_freebuf(struct mbuf *m, void *buf, size_t size, void *arg)
575 {
576 struct rxbugentry *entry;
577 struct dge_softc *sc;
578 int i, s;
579
580 /* Extract the softc struct pointer. */
581 sc = (struct dge_softc *)arg;
582
583 if (sc == NULL)
584 panic("dge_freebuf: can't find softc pointer!");
585
586 /* calculate the slot this buffer belongs to */
587
588 i = ((char *)buf - (char *)sc->sc_bugbuf) / DGE_BUFFER_SIZE;
589
590 if ((i < 0) || (i >= DGE_NBUFFERS))
591 panic("dge_freebuf: asked to free buffer %d!", i);
592
593 s = splvm();
594 entry = sc->sc_entry + i;
595 SLIST_INSERT_HEAD(&sc->sc_buglist, entry, rb_entry);
596
597 if (__predict_true(m != NULL))
598 pool_cache_put(mb_cache, m);
599 splx(s);
600 }
601 #endif
602
603 static void dge_start(struct ifnet *);
604 static void dge_watchdog(struct ifnet *);
605 static int dge_ioctl(struct ifnet *, u_long, void *);
606 static int dge_init(struct ifnet *);
607 static void dge_stop(struct ifnet *, int);
608
609 static bool dge_shutdown(device_t, int);
610
611 static void dge_reset(struct dge_softc *);
612 static void dge_rxdrain(struct dge_softc *);
613 static int dge_add_rxbuf(struct dge_softc *, int);
614
615 static void dge_set_filter(struct dge_softc *);
616
617 static int dge_intr(void *);
618 static void dge_txintr(struct dge_softc *);
619 static void dge_rxintr(struct dge_softc *);
620 static void dge_linkintr(struct dge_softc *, uint32_t);
621
622 static int dge_match(device_t, cfdata_t, void *);
623 static void dge_attach(device_t, device_t, void *);
624
625 static int dge_read_eeprom(struct dge_softc *sc);
626 static int dge_eeprom_clockin(struct dge_softc *sc);
627 static void dge_eeprom_clockout(struct dge_softc *sc, int bit);
628 static uint16_t dge_eeprom_word(struct dge_softc *sc, int addr);
629 static int dge_xgmii_mediachange(struct ifnet *);
630 static void dge_xgmii_mediastatus(struct ifnet *, struct ifmediareq *);
631 static void dge_xgmii_reset(struct dge_softc *);
632 static void dge_xgmii_writereg(struct dge_softc *, int, int, int);
633
634
635 CFATTACH_DECL_NEW(dge, sizeof(struct dge_softc),
636 dge_match, dge_attach, NULL, NULL);
637
638 #ifdef DGE_EVENT_COUNTERS
639 #if DGE_NTXSEGS > 100
640 #error Update dge_txseg_evcnt_names
641 #endif
642 static char (*dge_txseg_evcnt_names)[DGE_NTXSEGS][8 /* "txseg00" + \0 */];
643 #endif /* DGE_EVENT_COUNTERS */
644
645 /*
646 * Devices supported by this driver.
647 */
648 static const struct dge_product {
649 pci_vendor_id_t dgep_vendor;
650 pci_product_id_t dgep_product;
651 const char *dgep_name;
652 int dgep_flags;
653 #define DGEP_F_10G_LR 0x01
654 #define DGEP_F_10G_SR 0x02
655 } dge_products[] = {
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX,
657 "Intel i82597EX 10GbE-LR Ethernet",
658 DGEP_F_10G_LR },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR,
661 "Intel i82597EX 10GbE-SR Ethernet",
662 DGEP_F_10G_SR },
663
664 { 0, 0,
665 NULL,
666 0 },
667 };
668
669 static const struct dge_product *
670 dge_lookup(const struct pci_attach_args *pa)
671 {
672 const struct dge_product *dgep;
673
674 for (dgep = dge_products; dgep->dgep_name != NULL; dgep++) {
675 if (PCI_VENDOR(pa->pa_id) == dgep->dgep_vendor &&
676 PCI_PRODUCT(pa->pa_id) == dgep->dgep_product)
677 return dgep;
678 }
679 return NULL;
680 }
681
682 static int
683 dge_match(device_t parent, cfdata_t cf, void *aux)
684 {
685 struct pci_attach_args *pa = aux;
686
687 if (dge_lookup(pa) != NULL)
688 return (1);
689
690 return (0);
691 }
692
693 static void
694 dge_attach(device_t parent, device_t self, void *aux)
695 {
696 struct dge_softc *sc = device_private(self);
697 struct pci_attach_args *pa = aux;
698 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
699 pci_chipset_tag_t pc = pa->pa_pc;
700 pci_intr_handle_t ih;
701 const char *intrstr = NULL;
702 bus_dma_segment_t seg;
703 int i, rseg, error;
704 uint8_t enaddr[ETHER_ADDR_LEN];
705 pcireg_t preg, memtype;
706 uint32_t reg;
707 char intrbuf[PCI_INTRSTR_LEN];
708 const struct dge_product *dgep;
709
710 sc->sc_dgep = dgep = dge_lookup(pa);
711 if (dgep == NULL) {
712 printf("\n");
713 panic("dge_attach: impossible");
714 }
715
716 sc->sc_dev = self;
717 sc->sc_dmat = pa->pa_dmat;
718 sc->sc_pc = pa->pa_pc;
719 sc->sc_pt = pa->pa_tag;
720
721 pci_aprint_devinfo_fancy(pa, "Ethernet controller",
722 dgep->dgep_name, 1);
723
724 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, DGE_PCI_BAR);
725 if (pci_mapreg_map(pa, DGE_PCI_BAR, memtype, 0,
726 &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
727 aprint_error_dev(sc->sc_dev,
728 "unable to map device registers\n");
729 return;
730 }
731
732 /* Enable bus mastering */
733 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
734 preg |= PCI_COMMAND_MASTER_ENABLE;
735 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
736
737 /*
738 * Map and establish our interrupt.
739 */
740 if (pci_intr_map(pa, &ih)) {
741 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
742 return;
743 }
744 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
745 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, dge_intr, sc);
746 if (sc->sc_ih == NULL) {
747 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
748 if (intrstr != NULL)
749 aprint_error(" at %s", intrstr);
750 aprint_error("\n");
751 return;
752 }
753 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
754
755 /*
756 * Determine a few things about the bus we're connected to.
757 */
758 reg = CSR_READ(sc, DGE_STATUS);
759 if (reg & STATUS_BUS64)
760 sc->sc_flags |= DGE_F_BUS64;
761
762 sc->sc_flags |= DGE_F_PCIX;
763 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
764 PCI_CAP_PCIX,
765 &sc->sc_pcix_offset, NULL) == 0)
766 aprint_error_dev(sc->sc_dev, "unable to find PCIX "
767 "capability\n");
768
769 if (sc->sc_flags & DGE_F_PCIX) {
770 switch (reg & STATUS_PCIX_MSK) {
771 case STATUS_PCIX_66:
772 sc->sc_bus_speed = 66;
773 break;
774 case STATUS_PCIX_100:
775 sc->sc_bus_speed = 100;
776 break;
777 case STATUS_PCIX_133:
778 sc->sc_bus_speed = 133;
779 break;
780 default:
781 aprint_error_dev(sc->sc_dev,
782 "unknown PCIXSPD %d; assuming 66MHz\n",
783 reg & STATUS_PCIX_MSK);
784 sc->sc_bus_speed = 66;
785 }
786 } else
787 sc->sc_bus_speed = (reg & STATUS_BUS64) ? 66 : 33;
788 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
789 (sc->sc_flags & DGE_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
790 (sc->sc_flags & DGE_F_PCIX) ? "PCIX" : "PCI");
791
792 /*
793 * Allocate the control data structures, and create and load the
794 * DMA map for it.
795 */
796 if ((error = bus_dmamem_alloc(sc->sc_dmat,
797 sizeof(struct dge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
798 0)) != 0) {
799 aprint_error_dev(sc->sc_dev,
800 "unable to allocate control data, error = %d\n",
801 error);
802 goto fail_0;
803 }
804
805 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
806 sizeof(struct dge_control_data), (void **)&sc->sc_control_data,
807 0)) != 0) {
808 aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n",
809 error);
810 goto fail_1;
811 }
812
813 if ((error = bus_dmamap_create(sc->sc_dmat,
814 sizeof(struct dge_control_data), 1,
815 sizeof(struct dge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
816 aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, "
817 "error = %d\n", error);
818 goto fail_2;
819 }
820
821 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
822 sc->sc_control_data, sizeof(struct dge_control_data), NULL,
823 0)) != 0) {
824 aprint_error_dev(sc->sc_dev,
825 "unable to load control data DMA map, error = %d\n",
826 error);
827 goto fail_3;
828 }
829
830 #ifdef DGE_OFFBYONE_RXBUG
831 if (dge_alloc_rcvmem(sc) != 0)
832 return; /* Already complained */
833 #endif
834 /*
835 * Create the transmit buffer DMA maps.
836 */
837 for (i = 0; i < DGE_TXQUEUELEN; i++) {
838 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_MAX_MTU,
839 DGE_NTXSEGS, MCLBYTES, 0, 0,
840 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
841 aprint_error_dev(sc->sc_dev, "unable to create Tx DMA map %d, "
842 "error = %d\n", i, error);
843 goto fail_4;
844 }
845 }
846
847 /*
848 * Create the receive buffer DMA maps.
849 */
850 for (i = 0; i < DGE_NRXDESC; i++) {
851 #ifdef DGE_OFFBYONE_RXBUG
852 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_BUFFER_SIZE, 1,
853 DGE_BUFFER_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
854 #else
855 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
856 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
857 #endif
858 aprint_error_dev(sc->sc_dev, "unable to create Rx DMA map %d, "
859 "error = %d\n", i, error);
860 goto fail_5;
861 }
862 sc->sc_rxsoft[i].rxs_mbuf = NULL;
863 }
864
865 /*
866 * Set bits in ctrl0 register.
867 * Should get the software defined pins out of EEPROM?
868 */
869 sc->sc_ctrl0 |= CTRL0_RPE | CTRL0_TPE; /* XON/XOFF */
870 sc->sc_ctrl0 |= CTRL0_SDP3_DIR | CTRL0_SDP2_DIR | CTRL0_SDP1_DIR |
871 CTRL0_SDP0_DIR | CTRL0_SDP3 | CTRL0_SDP2 | CTRL0_SDP0;
872
873 /*
874 * Reset the chip to a known state.
875 */
876 dge_reset(sc);
877
878 /*
879 * Reset the PHY.
880 */
881 dge_xgmii_reset(sc);
882
883 /*
884 * Read in EEPROM data.
885 */
886 if (dge_read_eeprom(sc)) {
887 aprint_error_dev(sc->sc_dev, "couldn't read EEPROM\n");
888 return;
889 }
890
891 /*
892 * Get the ethernet address.
893 */
894 enaddr[0] = sc->sc_eeprom[EE_ADDR01] & 0377;
895 enaddr[1] = sc->sc_eeprom[EE_ADDR01] >> 8;
896 enaddr[2] = sc->sc_eeprom[EE_ADDR23] & 0377;
897 enaddr[3] = sc->sc_eeprom[EE_ADDR23] >> 8;
898 enaddr[4] = sc->sc_eeprom[EE_ADDR45] & 0377;
899 enaddr[5] = sc->sc_eeprom[EE_ADDR45] >> 8;
900
901 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
902 ether_sprintf(enaddr));
903
904 /*
905 * Setup media stuff.
906 */
907 ifmedia_init(&sc->sc_media, IFM_IMASK, dge_xgmii_mediachange,
908 dge_xgmii_mediastatus);
909 if (dgep->dgep_flags & DGEP_F_10G_SR) {
910 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_SR, 0, NULL);
911 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_10G_SR);
912 } else { /* XXX default is LR */
913 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_LR, 0, NULL);
914 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_10G_LR);
915 }
916
917 ifp = &sc->sc_ethercom.ec_if;
918 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
919 ifp->if_softc = sc;
920 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
921 ifp->if_ioctl = dge_ioctl;
922 ifp->if_start = dge_start;
923 ifp->if_watchdog = dge_watchdog;
924 ifp->if_init = dge_init;
925 ifp->if_stop = dge_stop;
926 IFQ_SET_MAXLEN(&ifp->if_snd, max(DGE_IFQUEUELEN, IFQ_MAXLEN));
927 IFQ_SET_READY(&ifp->if_snd);
928
929 sc->sc_ethercom.ec_capabilities |=
930 ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
931
932 /*
933 * We can perform TCPv4 and UDPv4 checkums in-bound.
934 */
935 ifp->if_capabilities |=
936 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
937 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
938 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
939
940 /*
941 * Attach the interface.
942 */
943 if_attach(ifp);
944 ether_ifattach(ifp, enaddr);
945 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
946 RND_TYPE_NET, RND_FLAG_DEFAULT);
947
948 #ifdef DGE_EVENT_COUNTERS
949 /* Fix segment event naming */
950 if (dge_txseg_evcnt_names == NULL) {
951 dge_txseg_evcnt_names =
952 malloc(sizeof(*dge_txseg_evcnt_names), M_DEVBUF, M_WAITOK);
953 for (i = 0; i < DGE_NTXSEGS; i++)
954 snprintf((*dge_txseg_evcnt_names)[i],
955 sizeof((*dge_txseg_evcnt_names)[i]), "txseg%d", i);
956 }
957
958 /* Attach event counters. */
959 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
960 NULL, device_xname(sc->sc_dev), "txsstall");
961 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
962 NULL, device_xname(sc->sc_dev), "txdstall");
963 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
964 NULL, device_xname(sc->sc_dev), "txforceintr");
965 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
966 NULL, device_xname(sc->sc_dev), "txdw");
967 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
968 NULL, device_xname(sc->sc_dev), "txqe");
969 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
970 NULL, device_xname(sc->sc_dev), "rxintr");
971 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
972 NULL, device_xname(sc->sc_dev), "linkintr");
973
974 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
975 NULL, device_xname(sc->sc_dev), "rxipsum");
976 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
977 NULL, device_xname(sc->sc_dev), "rxtusum");
978 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
979 NULL, device_xname(sc->sc_dev), "txipsum");
980 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
981 NULL, device_xname(sc->sc_dev), "txtusum");
982
983 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
984 NULL, device_xname(sc->sc_dev), "txctx init");
985 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
986 NULL, device_xname(sc->sc_dev), "txctx hit");
987 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
988 NULL, device_xname(sc->sc_dev), "txctx miss");
989
990 for (i = 0; i < DGE_NTXSEGS; i++)
991 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
992 NULL, device_xname(sc->sc_dev), (*dge_txseg_evcnt_names)[i]);
993
994 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
995 NULL, device_xname(sc->sc_dev), "txdrop");
996
997 #endif /* DGE_EVENT_COUNTERS */
998
999 /*
1000 * Make sure the interface is shutdown during reboot.
1001 */
1002 if (pmf_device_register1(self, NULL, NULL, dge_shutdown))
1003 pmf_class_network_register(self, ifp);
1004 else
1005 aprint_error_dev(self, "couldn't establish power handler\n");
1006
1007 return;
1008
1009 /*
1010 * Free any resources we've allocated during the failed attach
1011 * attempt. Do this in reverse order and fall through.
1012 */
1013 fail_5:
1014 for (i = 0; i < DGE_NRXDESC; i++) {
1015 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1016 bus_dmamap_destroy(sc->sc_dmat,
1017 sc->sc_rxsoft[i].rxs_dmamap);
1018 }
1019 fail_4:
1020 for (i = 0; i < DGE_TXQUEUELEN; i++) {
1021 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1022 bus_dmamap_destroy(sc->sc_dmat,
1023 sc->sc_txsoft[i].txs_dmamap);
1024 }
1025 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1026 fail_3:
1027 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1028 fail_2:
1029 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1030 sizeof(struct dge_control_data));
1031 fail_1:
1032 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1033 fail_0:
1034 return;
1035 }
1036
1037 /*
1038 * dge_shutdown:
1039 *
1040 * Make sure the interface is stopped at reboot time.
1041 */
1042 static bool
1043 dge_shutdown(device_t self, int howto)
1044 {
1045 struct dge_softc *sc;
1046
1047 sc = device_private(self);
1048 dge_stop(&sc->sc_ethercom.ec_if, 1);
1049
1050 return true;
1051 }
1052
1053 /*
1054 * dge_tx_cksum:
1055 *
1056 * Set up TCP/IP checksumming parameters for the
1057 * specified packet.
1058 */
1059 static int
1060 dge_tx_cksum(struct dge_softc *sc, struct dge_txsoft *txs, uint8_t *fieldsp)
1061 {
1062 struct mbuf *m0 = txs->txs_mbuf;
1063 struct dge_ctdes *t;
1064 uint32_t ipcs, tucs;
1065 struct ether_header *eh;
1066 int offset, iphl;
1067 uint8_t fields = 0;
1068
1069 /*
1070 * XXX It would be nice if the mbuf pkthdr had offset
1071 * fields for the protocol headers.
1072 */
1073
1074 eh = mtod(m0, struct ether_header *);
1075 switch (htons(eh->ether_type)) {
1076 case ETHERTYPE_IP:
1077 offset = ETHER_HDR_LEN;
1078 break;
1079
1080 case ETHERTYPE_VLAN:
1081 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1082 break;
1083
1084 default:
1085 /*
1086 * Don't support this protocol or encapsulation.
1087 */
1088 *fieldsp = 0;
1089 return (0);
1090 }
1091
1092 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1093
1094 /*
1095 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1096 * offload feature, if we load the context descriptor, we
1097 * MUST provide valid values for IPCSS and TUCSS fields.
1098 */
1099
1100 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1101 DGE_EVCNT_INCR(&sc->sc_ev_txipsum);
1102 fields |= TDESC_POPTS_IXSM;
1103 ipcs = DGE_TCPIP_IPCSS(offset) |
1104 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1105 DGE_TCPIP_IPCSE(offset + iphl - 1);
1106 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1107 /* Use the cached value. */
1108 ipcs = sc->sc_txctx_ipcs;
1109 } else {
1110 /* Just initialize it to the likely value anyway. */
1111 ipcs = DGE_TCPIP_IPCSS(offset) |
1112 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1113 DGE_TCPIP_IPCSE(offset + iphl - 1);
1114 }
1115 DPRINTF(DGE_DEBUG_CKSUM,
1116 ("%s: CKSUM: offset %d ipcs 0x%x\n",
1117 device_xname(sc->sc_dev), offset, ipcs));
1118
1119 offset += iphl;
1120
1121 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1122 DGE_EVCNT_INCR(&sc->sc_ev_txtusum);
1123 fields |= TDESC_POPTS_TXSM;
1124 tucs = DGE_TCPIP_TUCSS(offset) |
1125 DGE_TCPIP_TUCSO(offset + M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1126 DGE_TCPIP_TUCSE(0) /* rest of packet */;
1127 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1128 /* Use the cached value. */
1129 tucs = sc->sc_txctx_tucs;
1130 } else {
1131 /* Just initialize it to a valid TCP context. */
1132 tucs = DGE_TCPIP_TUCSS(offset) |
1133 DGE_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1134 DGE_TCPIP_TUCSE(0) /* rest of packet */;
1135 }
1136
1137 DPRINTF(DGE_DEBUG_CKSUM,
1138 ("%s: CKSUM: offset %d tucs 0x%x\n",
1139 device_xname(sc->sc_dev), offset, tucs));
1140
1141 if (sc->sc_txctx_ipcs == ipcs &&
1142 sc->sc_txctx_tucs == tucs) {
1143 /* Cached context is fine. */
1144 DGE_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1145 } else {
1146 /* Fill in the context descriptor. */
1147 #ifdef DGE_EVENT_COUNTERS
1148 if (sc->sc_txctx_ipcs == 0xffffffff &&
1149 sc->sc_txctx_tucs == 0xffffffff)
1150 DGE_EVCNT_INCR(&sc->sc_ev_txctx_init);
1151 else
1152 DGE_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1153 #endif
1154 t = (struct dge_ctdes *)&sc->sc_txdescs[sc->sc_txnext];
1155 t->dc_tcpip_ipcs = htole32(ipcs);
1156 t->dc_tcpip_tucs = htole32(tucs);
1157 t->dc_tcpip_cmdlen = htole32(TDESC_DTYP_CTD);
1158 t->dc_tcpip_seg = 0;
1159 DGE_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1160
1161 sc->sc_txctx_ipcs = ipcs;
1162 sc->sc_txctx_tucs = tucs;
1163
1164 sc->sc_txnext = DGE_NEXTTX(sc->sc_txnext);
1165 txs->txs_ndesc++;
1166 }
1167
1168 *fieldsp = fields;
1169
1170 return (0);
1171 }
1172
1173 /*
1174 * dge_start: [ifnet interface function]
1175 *
1176 * Start packet transmission on the interface.
1177 */
1178 static void
1179 dge_start(struct ifnet *ifp)
1180 {
1181 struct dge_softc *sc = ifp->if_softc;
1182 struct mbuf *m0;
1183 struct dge_txsoft *txs;
1184 bus_dmamap_t dmamap;
1185 int error, nexttx, lasttx = -1, ofree, seg;
1186 uint32_t cksumcmd;
1187 uint8_t cksumfields;
1188
1189 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1190 return;
1191
1192 /*
1193 * Remember the previous number of free descriptors.
1194 */
1195 ofree = sc->sc_txfree;
1196
1197 /*
1198 * Loop through the send queue, setting up transmit descriptors
1199 * until we drain the queue, or use up all available transmit
1200 * descriptors.
1201 */
1202 for (;;) {
1203 /* Grab a packet off the queue. */
1204 IFQ_POLL(&ifp->if_snd, m0);
1205 if (m0 == NULL)
1206 break;
1207
1208 DPRINTF(DGE_DEBUG_TX,
1209 ("%s: TX: have packet to transmit: %p\n",
1210 device_xname(sc->sc_dev), m0));
1211
1212 /* Get a work queue entry. */
1213 if (sc->sc_txsfree < DGE_TXQUEUE_GC) {
1214 dge_txintr(sc);
1215 if (sc->sc_txsfree == 0) {
1216 DPRINTF(DGE_DEBUG_TX,
1217 ("%s: TX: no free job descriptors\n",
1218 device_xname(sc->sc_dev)));
1219 DGE_EVCNT_INCR(&sc->sc_ev_txsstall);
1220 break;
1221 }
1222 }
1223
1224 txs = &sc->sc_txsoft[sc->sc_txsnext];
1225 dmamap = txs->txs_dmamap;
1226
1227 /*
1228 * Load the DMA map. If this fails, the packet either
1229 * didn't fit in the allotted number of segments, or we
1230 * were short on resources. For the too-many-segments
1231 * case, we simply report an error and drop the packet,
1232 * since we can't sanely copy a jumbo packet to a single
1233 * buffer.
1234 */
1235 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1236 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1237 if (error) {
1238 if (error == EFBIG) {
1239 DGE_EVCNT_INCR(&sc->sc_ev_txdrop);
1240 printf("%s: Tx packet consumes too many "
1241 "DMA segments, dropping...\n",
1242 device_xname(sc->sc_dev));
1243 IFQ_DEQUEUE(&ifp->if_snd, m0);
1244 m_freem(m0);
1245 continue;
1246 }
1247 /*
1248 * Short on resources, just stop for now.
1249 */
1250 DPRINTF(DGE_DEBUG_TX,
1251 ("%s: TX: dmamap load failed: %d\n",
1252 device_xname(sc->sc_dev), error));
1253 break;
1254 }
1255
1256 /*
1257 * Ensure we have enough descriptors free to describe
1258 * the packet. Note, we always reserve one descriptor
1259 * at the end of the ring due to the semantics of the
1260 * TDT register, plus one more in the event we need
1261 * to re-load checksum offload context.
1262 */
1263 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1264 /*
1265 * Not enough free descriptors to transmit this
1266 * packet. We haven't committed anything yet,
1267 * so just unload the DMA map, put the packet
1268 * pack on the queue, and punt. Notify the upper
1269 * layer that there are no more slots left.
1270 */
1271 DPRINTF(DGE_DEBUG_TX,
1272 ("%s: TX: need %d descriptors, have %d\n",
1273 device_xname(sc->sc_dev), dmamap->dm_nsegs,
1274 sc->sc_txfree - 1));
1275 ifp->if_flags |= IFF_OACTIVE;
1276 bus_dmamap_unload(sc->sc_dmat, dmamap);
1277 DGE_EVCNT_INCR(&sc->sc_ev_txdstall);
1278 break;
1279 }
1280
1281 IFQ_DEQUEUE(&ifp->if_snd, m0);
1282
1283 /*
1284 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1285 */
1286
1287 /* Sync the DMA map. */
1288 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1289 BUS_DMASYNC_PREWRITE);
1290
1291 DPRINTF(DGE_DEBUG_TX,
1292 ("%s: TX: packet has %d DMA segments\n",
1293 device_xname(sc->sc_dev), dmamap->dm_nsegs));
1294
1295 DGE_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1296
1297 /*
1298 * Store a pointer to the packet so that we can free it
1299 * later.
1300 *
1301 * Initially, we consider the number of descriptors the
1302 * packet uses the number of DMA segments. This may be
1303 * incremented by 1 if we do checksum offload (a descriptor
1304 * is used to set the checksum context).
1305 */
1306 txs->txs_mbuf = m0;
1307 txs->txs_firstdesc = sc->sc_txnext;
1308 txs->txs_ndesc = dmamap->dm_nsegs;
1309
1310 /*
1311 * Set up checksum offload parameters for
1312 * this packet.
1313 */
1314 if (m0->m_pkthdr.csum_flags &
1315 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1316 if (dge_tx_cksum(sc, txs, &cksumfields) != 0) {
1317 /* Error message already displayed. */
1318 bus_dmamap_unload(sc->sc_dmat, dmamap);
1319 continue;
1320 }
1321 } else {
1322 cksumfields = 0;
1323 }
1324
1325 cksumcmd = TDESC_DCMD_IDE | TDESC_DTYP_DATA;
1326
1327 /*
1328 * Initialize the transmit descriptor.
1329 */
1330 for (nexttx = sc->sc_txnext, seg = 0;
1331 seg < dmamap->dm_nsegs;
1332 seg++, nexttx = DGE_NEXTTX(nexttx)) {
1333 /*
1334 * Note: we currently only use 32-bit DMA
1335 * addresses.
1336 */
1337 sc->sc_txdescs[nexttx].dt_baddrh = 0;
1338 sc->sc_txdescs[nexttx].dt_baddrl =
1339 htole32(dmamap->dm_segs[seg].ds_addr);
1340 sc->sc_txdescs[nexttx].dt_ctl =
1341 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1342 sc->sc_txdescs[nexttx].dt_status = 0;
1343 sc->sc_txdescs[nexttx].dt_popts = cksumfields;
1344 sc->sc_txdescs[nexttx].dt_vlan = 0;
1345 lasttx = nexttx;
1346
1347 DPRINTF(DGE_DEBUG_TX,
1348 ("%s: TX: desc %d: low 0x%08lx, len 0x%04lx\n",
1349 device_xname(sc->sc_dev), nexttx,
1350 (unsigned long)le32toh(dmamap->dm_segs[seg].ds_addr),
1351 (unsigned long)le32toh(dmamap->dm_segs[seg].ds_len)));
1352 }
1353
1354 KASSERT(lasttx != -1);
1355
1356 /*
1357 * Set up the command byte on the last descriptor of
1358 * the packet. If we're in the interrupt delay window,
1359 * delay the interrupt.
1360 */
1361 sc->sc_txdescs[lasttx].dt_ctl |=
1362 htole32(TDESC_DCMD_EOP | TDESC_DCMD_RS);
1363
1364 txs->txs_lastdesc = lasttx;
1365
1366 DPRINTF(DGE_DEBUG_TX,
1367 ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(sc->sc_dev),
1368 lasttx, le32toh(sc->sc_txdescs[lasttx].dt_ctl)));
1369
1370 /* Sync the descriptors we're using. */
1371 DGE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1372 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1373
1374 /* Give the packet to the chip. */
1375 CSR_WRITE(sc, DGE_TDT, nexttx);
1376
1377 DPRINTF(DGE_DEBUG_TX,
1378 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
1379
1380 DPRINTF(DGE_DEBUG_TX,
1381 ("%s: TX: finished transmitting packet, job %d\n",
1382 device_xname(sc->sc_dev), sc->sc_txsnext));
1383
1384 /* Advance the tx pointer. */
1385 sc->sc_txfree -= txs->txs_ndesc;
1386 sc->sc_txnext = nexttx;
1387
1388 sc->sc_txsfree--;
1389 sc->sc_txsnext = DGE_NEXTTXS(sc->sc_txsnext);
1390
1391 /* Pass the packet to any BPF listeners. */
1392 bpf_mtap(ifp, m0);
1393 }
1394
1395 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1396 /* No more slots; notify upper layer. */
1397 ifp->if_flags |= IFF_OACTIVE;
1398 }
1399
1400 if (sc->sc_txfree != ofree) {
1401 /* Set a watchdog timer in case the chip flakes out. */
1402 ifp->if_timer = 5;
1403 }
1404 }
1405
1406 /*
1407 * dge_watchdog: [ifnet interface function]
1408 *
1409 * Watchdog timer handler.
1410 */
1411 static void
1412 dge_watchdog(struct ifnet *ifp)
1413 {
1414 struct dge_softc *sc = ifp->if_softc;
1415
1416 /*
1417 * Since we're using delayed interrupts, sweep up
1418 * before we report an error.
1419 */
1420 dge_txintr(sc);
1421
1422 if (sc->sc_txfree != DGE_NTXDESC) {
1423 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1424 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
1425 sc->sc_txnext);
1426 ifp->if_oerrors++;
1427
1428 /* Reset the interface. */
1429 (void) dge_init(ifp);
1430 }
1431
1432 /* Try to get more packets going. */
1433 dge_start(ifp);
1434 }
1435
1436 /*
1437 * dge_ioctl: [ifnet interface function]
1438 *
1439 * Handle control requests from the operator.
1440 */
1441 static int
1442 dge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1443 {
1444 struct dge_softc *sc = ifp->if_softc;
1445 struct ifreq *ifr = (struct ifreq *) data;
1446 pcireg_t preg;
1447 int s, error, mmrbc;
1448
1449 s = splnet();
1450
1451 switch (cmd) {
1452 case SIOCSIFMEDIA:
1453 case SIOCGIFMEDIA:
1454 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1455 break;
1456
1457 case SIOCSIFMTU:
1458 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > DGE_MAX_MTU)
1459 error = EINVAL;
1460 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
1461 break;
1462 else if (ifp->if_flags & IFF_UP)
1463 error = (*ifp->if_init)(ifp);
1464 else
1465 error = 0;
1466 break;
1467
1468 case SIOCSIFFLAGS:
1469 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1470 break;
1471 /* extract link flags */
1472 if ((ifp->if_flags & IFF_LINK0) == 0 &&
1473 (ifp->if_flags & IFF_LINK1) == 0)
1474 mmrbc = PCIX_MMRBC_512;
1475 else if ((ifp->if_flags & IFF_LINK0) == 0 &&
1476 (ifp->if_flags & IFF_LINK1) != 0)
1477 mmrbc = PCIX_MMRBC_1024;
1478 else if ((ifp->if_flags & IFF_LINK0) != 0 &&
1479 (ifp->if_flags & IFF_LINK1) == 0)
1480 mmrbc = PCIX_MMRBC_2048;
1481 else
1482 mmrbc = PCIX_MMRBC_4096;
1483 if (mmrbc != sc->sc_mmrbc) {
1484 preg = pci_conf_read(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD);
1485 preg &= ~PCIX_MMRBC_MSK;
1486 preg |= mmrbc;
1487 pci_conf_write(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD, preg);
1488 sc->sc_mmrbc = mmrbc;
1489 }
1490 /* FALLTHROUGH */
1491 default:
1492 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1493 break;
1494
1495 error = 0;
1496
1497 if (cmd == SIOCSIFCAP)
1498 error = (*ifp->if_init)(ifp);
1499 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1500 ;
1501 else if (ifp->if_flags & IFF_RUNNING) {
1502 /*
1503 * Multicast list has changed; set the hardware filter
1504 * accordingly.
1505 */
1506 dge_set_filter(sc);
1507 }
1508 break;
1509 }
1510
1511 /* Try to get more packets going. */
1512 dge_start(ifp);
1513
1514 splx(s);
1515 return (error);
1516 }
1517
1518 /*
1519 * dge_intr:
1520 *
1521 * Interrupt service routine.
1522 */
1523 static int
1524 dge_intr(void *arg)
1525 {
1526 struct dge_softc *sc = arg;
1527 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1528 uint32_t icr;
1529 int wantinit, handled = 0;
1530
1531 for (wantinit = 0; wantinit == 0;) {
1532 icr = CSR_READ(sc, DGE_ICR);
1533 if ((icr & sc->sc_icr) == 0)
1534 break;
1535
1536 rnd_add_uint32(&sc->rnd_source, icr);
1537
1538 handled = 1;
1539
1540 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1541 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1542 DPRINTF(DGE_DEBUG_RX,
1543 ("%s: RX: got Rx intr 0x%08x\n",
1544 device_xname(sc->sc_dev),
1545 icr & (ICR_RXDMT0|ICR_RXT0)));
1546 DGE_EVCNT_INCR(&sc->sc_ev_rxintr);
1547 }
1548 #endif
1549 dge_rxintr(sc);
1550
1551 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1552 if (icr & ICR_TXDW) {
1553 DPRINTF(DGE_DEBUG_TX,
1554 ("%s: TX: got TXDW interrupt\n",
1555 device_xname(sc->sc_dev)));
1556 DGE_EVCNT_INCR(&sc->sc_ev_txdw);
1557 }
1558 if (icr & ICR_TXQE)
1559 DGE_EVCNT_INCR(&sc->sc_ev_txqe);
1560 #endif
1561 dge_txintr(sc);
1562
1563 if (icr & (ICR_LSC|ICR_RXSEQ)) {
1564 DGE_EVCNT_INCR(&sc->sc_ev_linkintr);
1565 dge_linkintr(sc, icr);
1566 }
1567
1568 if (icr & ICR_RXO) {
1569 printf("%s: Receive overrun\n", device_xname(sc->sc_dev));
1570 wantinit = 1;
1571 }
1572 }
1573
1574 if (handled) {
1575 if (wantinit)
1576 dge_init(ifp);
1577
1578 /* Try to get more packets going. */
1579 dge_start(ifp);
1580 }
1581
1582 return (handled);
1583 }
1584
1585 /*
1586 * dge_txintr:
1587 *
1588 * Helper; handle transmit interrupts.
1589 */
1590 static void
1591 dge_txintr(struct dge_softc *sc)
1592 {
1593 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1594 struct dge_txsoft *txs;
1595 uint8_t status;
1596 int i;
1597
1598 ifp->if_flags &= ~IFF_OACTIVE;
1599
1600 /*
1601 * Go through the Tx list and free mbufs for those
1602 * frames which have been transmitted.
1603 */
1604 for (i = sc->sc_txsdirty; sc->sc_txsfree != DGE_TXQUEUELEN;
1605 i = DGE_NEXTTXS(i), sc->sc_txsfree++) {
1606 txs = &sc->sc_txsoft[i];
1607
1608 DPRINTF(DGE_DEBUG_TX,
1609 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
1610
1611 DGE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1612 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1613
1614 status =
1615 sc->sc_txdescs[txs->txs_lastdesc].dt_status;
1616 if ((status & TDESC_STA_DD) == 0) {
1617 DGE_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1618 BUS_DMASYNC_PREREAD);
1619 break;
1620 }
1621
1622 DPRINTF(DGE_DEBUG_TX,
1623 ("%s: TX: job %d done: descs %d..%d\n",
1624 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
1625 txs->txs_lastdesc));
1626
1627 ifp->if_opackets++;
1628 sc->sc_txfree += txs->txs_ndesc;
1629 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1630 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1631 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1632 m_freem(txs->txs_mbuf);
1633 txs->txs_mbuf = NULL;
1634 }
1635
1636 /* Update the dirty transmit buffer pointer. */
1637 sc->sc_txsdirty = i;
1638 DPRINTF(DGE_DEBUG_TX,
1639 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
1640
1641 /*
1642 * If there are no more pending transmissions, cancel the watchdog
1643 * timer.
1644 */
1645 if (sc->sc_txsfree == DGE_TXQUEUELEN)
1646 ifp->if_timer = 0;
1647 }
1648
1649 /*
1650 * dge_rxintr:
1651 *
1652 * Helper; handle receive interrupts.
1653 */
1654 static void
1655 dge_rxintr(struct dge_softc *sc)
1656 {
1657 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1658 struct dge_rxsoft *rxs;
1659 struct mbuf *m;
1660 int i, len;
1661 uint8_t status, errors;
1662
1663 for (i = sc->sc_rxptr;; i = DGE_NEXTRX(i)) {
1664 rxs = &sc->sc_rxsoft[i];
1665
1666 DPRINTF(DGE_DEBUG_RX,
1667 ("%s: RX: checking descriptor %d\n",
1668 device_xname(sc->sc_dev), i));
1669
1670 DGE_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1671
1672 status = sc->sc_rxdescs[i].dr_status;
1673 errors = sc->sc_rxdescs[i].dr_errors;
1674 len = le16toh(sc->sc_rxdescs[i].dr_len);
1675
1676 if ((status & RDESC_STS_DD) == 0) {
1677 /*
1678 * We have processed all of the receive descriptors.
1679 */
1680 DGE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1681 break;
1682 }
1683
1684 if (__predict_false(sc->sc_rxdiscard)) {
1685 DPRINTF(DGE_DEBUG_RX,
1686 ("%s: RX: discarding contents of descriptor %d\n",
1687 device_xname(sc->sc_dev), i));
1688 DGE_INIT_RXDESC(sc, i);
1689 if (status & RDESC_STS_EOP) {
1690 /* Reset our state. */
1691 DPRINTF(DGE_DEBUG_RX,
1692 ("%s: RX: resetting rxdiscard -> 0\n",
1693 device_xname(sc->sc_dev)));
1694 sc->sc_rxdiscard = 0;
1695 }
1696 continue;
1697 }
1698
1699 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1700 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1701
1702 m = rxs->rxs_mbuf;
1703
1704 /*
1705 * Add a new receive buffer to the ring.
1706 */
1707 if (dge_add_rxbuf(sc, i) != 0) {
1708 /*
1709 * Failed, throw away what we've done so
1710 * far, and discard the rest of the packet.
1711 */
1712 ifp->if_ierrors++;
1713 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1714 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1715 DGE_INIT_RXDESC(sc, i);
1716 if ((status & RDESC_STS_EOP) == 0)
1717 sc->sc_rxdiscard = 1;
1718 if (sc->sc_rxhead != NULL)
1719 m_freem(sc->sc_rxhead);
1720 DGE_RXCHAIN_RESET(sc);
1721 DPRINTF(DGE_DEBUG_RX,
1722 ("%s: RX: Rx buffer allocation failed, "
1723 "dropping packet%s\n", device_xname(sc->sc_dev),
1724 sc->sc_rxdiscard ? " (discard)" : ""));
1725 continue;
1726 }
1727 DGE_INIT_RXDESC(sc, DGE_PREVRX(i)); /* Write the descriptor */
1728
1729 DGE_RXCHAIN_LINK(sc, m);
1730
1731 m->m_len = len;
1732
1733 DPRINTF(DGE_DEBUG_RX,
1734 ("%s: RX: buffer at %p len %d\n",
1735 device_xname(sc->sc_dev), m->m_data, len));
1736
1737 /*
1738 * If this is not the end of the packet, keep
1739 * looking.
1740 */
1741 if ((status & RDESC_STS_EOP) == 0) {
1742 sc->sc_rxlen += len;
1743 DPRINTF(DGE_DEBUG_RX,
1744 ("%s: RX: not yet EOP, rxlen -> %d\n",
1745 device_xname(sc->sc_dev), sc->sc_rxlen));
1746 continue;
1747 }
1748
1749 /*
1750 * Okay, we have the entire packet now...
1751 */
1752 *sc->sc_rxtailp = NULL;
1753 m = sc->sc_rxhead;
1754 len += sc->sc_rxlen;
1755
1756 DGE_RXCHAIN_RESET(sc);
1757
1758 DPRINTF(DGE_DEBUG_RX,
1759 ("%s: RX: have entire packet, len -> %d\n",
1760 device_xname(sc->sc_dev), len));
1761
1762 /*
1763 * If an error occurred, update stats and drop the packet.
1764 */
1765 if (errors &
1766 (RDESC_ERR_CE|RDESC_ERR_SE|RDESC_ERR_P|RDESC_ERR_RXE)) {
1767 ifp->if_ierrors++;
1768 if (errors & RDESC_ERR_SE)
1769 printf("%s: symbol error\n",
1770 device_xname(sc->sc_dev));
1771 else if (errors & RDESC_ERR_P)
1772 printf("%s: parity error\n",
1773 device_xname(sc->sc_dev));
1774 else if (errors & RDESC_ERR_CE)
1775 printf("%s: CRC error\n",
1776 device_xname(sc->sc_dev));
1777 m_freem(m);
1778 continue;
1779 }
1780
1781 /*
1782 * No errors. Receive the packet.
1783 */
1784 m_set_rcvif(m, ifp);
1785 m->m_pkthdr.len = len;
1786
1787 /*
1788 * Set up checksum info for this packet.
1789 */
1790 if (status & RDESC_STS_IPCS) {
1791 DGE_EVCNT_INCR(&sc->sc_ev_rxipsum);
1792 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1793 if (errors & RDESC_ERR_IPE)
1794 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1795 }
1796 if (status & RDESC_STS_TCPCS) {
1797 /*
1798 * Note: we don't know if this was TCP or UDP,
1799 * so we just set both bits, and expect the
1800 * upper layers to deal.
1801 */
1802 DGE_EVCNT_INCR(&sc->sc_ev_rxtusum);
1803 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1804 if (errors & RDESC_ERR_TCPE)
1805 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1806 }
1807
1808 ifp->if_ipackets++;
1809
1810 /* Pass this up to any BPF listeners. */
1811 bpf_mtap(ifp, m);
1812
1813 /* Pass it on. */
1814 if_percpuq_enqueue(ifp->if_percpuq, m);
1815 }
1816
1817 /* Update the receive pointer. */
1818 sc->sc_rxptr = i;
1819
1820 DPRINTF(DGE_DEBUG_RX,
1821 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
1822 }
1823
1824 /*
1825 * dge_linkintr:
1826 *
1827 * Helper; handle link interrupts.
1828 */
1829 static void
1830 dge_linkintr(struct dge_softc *sc, uint32_t icr)
1831 {
1832 uint32_t status;
1833
1834 if (icr & ICR_LSC) {
1835 status = CSR_READ(sc, DGE_STATUS);
1836 if (status & STATUS_LINKUP) {
1837 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
1838 device_xname(sc->sc_dev)));
1839 } else {
1840 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1841 device_xname(sc->sc_dev)));
1842 }
1843 } else if (icr & ICR_RXSEQ) {
1844 DPRINTF(DGE_DEBUG_LINK,
1845 ("%s: LINK: Receive sequence error\n",
1846 device_xname(sc->sc_dev)));
1847 }
1848 /* XXX - fix errata */
1849 }
1850
1851 /*
1852 * dge_reset:
1853 *
1854 * Reset the i82597 chip.
1855 */
1856 static void
1857 dge_reset(struct dge_softc *sc)
1858 {
1859 int i;
1860
1861 /*
1862 * Do a chip reset.
1863 */
1864 CSR_WRITE(sc, DGE_CTRL0, CTRL0_RST | sc->sc_ctrl0);
1865
1866 delay(10000);
1867
1868 for (i = 0; i < 1000; i++) {
1869 if ((CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) == 0)
1870 break;
1871 delay(20);
1872 }
1873
1874 if (CSR_READ(sc, DGE_CTRL0) & CTRL0_RST)
1875 printf("%s: WARNING: reset failed to complete\n",
1876 device_xname(sc->sc_dev));
1877 /*
1878 * Reset the EEPROM logic.
1879 * This will cause the chip to reread its default values,
1880 * which doesn't happen otherwise (errata).
1881 */
1882 CSR_WRITE(sc, DGE_CTRL1, CTRL1_EE_RST);
1883 delay(10000);
1884 }
1885
1886 /*
1887 * dge_init: [ifnet interface function]
1888 *
1889 * Initialize the interface. Must be called at splnet().
1890 */
1891 static int
1892 dge_init(struct ifnet *ifp)
1893 {
1894 struct dge_softc *sc = ifp->if_softc;
1895 struct dge_rxsoft *rxs;
1896 int i, error = 0;
1897 uint32_t reg;
1898
1899 /*
1900 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
1901 * There is a small but measurable benefit to avoiding the adjusment
1902 * of the descriptor so that the headers are aligned, for normal mtu,
1903 * on such platforms. One possibility is that the DMA itself is
1904 * slightly more efficient if the front of the entire packet (instead
1905 * of the front of the headers) is aligned.
1906 *
1907 * Note we must always set align_tweak to 0 if we are using
1908 * jumbo frames.
1909 */
1910 #ifdef __NO_STRICT_ALIGNMENT
1911 sc->sc_align_tweak = 0;
1912 #else
1913 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1914 sc->sc_align_tweak = 0;
1915 else
1916 sc->sc_align_tweak = 2;
1917 #endif /* __NO_STRICT_ALIGNMENT */
1918
1919 /* Cancel any pending I/O. */
1920 dge_stop(ifp, 0);
1921
1922 /* Reset the chip to a known state. */
1923 dge_reset(sc);
1924
1925 /* Initialize the transmit descriptor ring. */
1926 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1927 DGE_CDTXSYNC(sc, 0, DGE_NTXDESC,
1928 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1929 sc->sc_txfree = DGE_NTXDESC;
1930 sc->sc_txnext = 0;
1931
1932 sc->sc_txctx_ipcs = 0xffffffff;
1933 sc->sc_txctx_tucs = 0xffffffff;
1934
1935 CSR_WRITE(sc, DGE_TDBAH, 0);
1936 CSR_WRITE(sc, DGE_TDBAL, DGE_CDTXADDR(sc, 0));
1937 CSR_WRITE(sc, DGE_TDLEN, sizeof(sc->sc_txdescs));
1938 CSR_WRITE(sc, DGE_TDH, 0);
1939 CSR_WRITE(sc, DGE_TDT, 0);
1940 CSR_WRITE(sc, DGE_TIDV, TIDV);
1941
1942 #if 0
1943 CSR_WRITE(sc, DGE_TXDCTL, TXDCTL_PTHRESH(0) |
1944 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1945 #endif
1946 CSR_WRITE(sc, DGE_RXDCTL,
1947 RXDCTL_PTHRESH(RXDCTL_PTHRESH_VAL) |
1948 RXDCTL_HTHRESH(RXDCTL_HTHRESH_VAL) |
1949 RXDCTL_WTHRESH(RXDCTL_WTHRESH_VAL));
1950
1951 /* Initialize the transmit job descriptors. */
1952 for (i = 0; i < DGE_TXQUEUELEN; i++)
1953 sc->sc_txsoft[i].txs_mbuf = NULL;
1954 sc->sc_txsfree = DGE_TXQUEUELEN;
1955 sc->sc_txsnext = 0;
1956 sc->sc_txsdirty = 0;
1957
1958 /*
1959 * Initialize the receive descriptor and receive job
1960 * descriptor rings.
1961 */
1962 CSR_WRITE(sc, DGE_RDBAH, 0);
1963 CSR_WRITE(sc, DGE_RDBAL, DGE_CDRXADDR(sc, 0));
1964 CSR_WRITE(sc, DGE_RDLEN, sizeof(sc->sc_rxdescs));
1965 CSR_WRITE(sc, DGE_RDH, DGE_RXSPACE);
1966 CSR_WRITE(sc, DGE_RDT, 0);
1967 CSR_WRITE(sc, DGE_RDTR, RDTR | 0x80000000);
1968 CSR_WRITE(sc, DGE_FCRTL, FCRTL | FCRTL_XONE);
1969 CSR_WRITE(sc, DGE_FCRTH, FCRTH);
1970
1971 for (i = 0; i < DGE_NRXDESC; i++) {
1972 rxs = &sc->sc_rxsoft[i];
1973 if (rxs->rxs_mbuf == NULL) {
1974 if ((error = dge_add_rxbuf(sc, i)) != 0) {
1975 printf("%s: unable to allocate or map rx "
1976 "buffer %d, error = %d\n",
1977 device_xname(sc->sc_dev), i, error);
1978 /*
1979 * XXX Should attempt to run with fewer receive
1980 * XXX buffers instead of just failing.
1981 */
1982 dge_rxdrain(sc);
1983 goto out;
1984 }
1985 }
1986 DGE_INIT_RXDESC(sc, i);
1987 }
1988 sc->sc_rxptr = DGE_RXSPACE;
1989 sc->sc_rxdiscard = 0;
1990 DGE_RXCHAIN_RESET(sc);
1991
1992 if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) {
1993 sc->sc_ctrl0 |= CTRL0_JFE;
1994 CSR_WRITE(sc, DGE_MFS, ETHER_MAX_LEN_JUMBO << 16);
1995 }
1996
1997 /* Write the control registers. */
1998 CSR_WRITE(sc, DGE_CTRL0, sc->sc_ctrl0);
1999
2000 /*
2001 * Set up checksum offload parameters.
2002 */
2003 reg = CSR_READ(sc, DGE_RXCSUM);
2004 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
2005 reg |= RXCSUM_IPOFL;
2006 else
2007 reg &= ~RXCSUM_IPOFL;
2008 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
2009 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2010 else {
2011 reg &= ~RXCSUM_TUOFL;
2012 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0)
2013 reg &= ~RXCSUM_IPOFL;
2014 }
2015 CSR_WRITE(sc, DGE_RXCSUM, reg);
2016
2017 /*
2018 * Set up the interrupt registers.
2019 */
2020 CSR_WRITE(sc, DGE_IMC, 0xffffffffU);
2021 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2022 ICR_RXO | ICR_RXT0;
2023
2024 CSR_WRITE(sc, DGE_IMS, sc->sc_icr);
2025
2026 /*
2027 * Set up the transmit control register.
2028 */
2029 sc->sc_tctl = TCTL_TCE|TCTL_TPDE|TCTL_TXEN;
2030 CSR_WRITE(sc, DGE_TCTL, sc->sc_tctl);
2031
2032 /*
2033 * Set up the receive control register; we actually program
2034 * the register when we set the receive filter. Use multicast
2035 * address offset type 0.
2036 */
2037 sc->sc_mchash_type = 0;
2038
2039 sc->sc_rctl = RCTL_RXEN | RCTL_RDMTS_12 | RCTL_RPDA_MC |
2040 RCTL_CFF | RCTL_SECRC | RCTL_MO(sc->sc_mchash_type);
2041
2042 #ifdef DGE_OFFBYONE_RXBUG
2043 sc->sc_rctl |= RCTL_BSIZE_16k;
2044 #else
2045 switch(MCLBYTES) {
2046 case 2048:
2047 sc->sc_rctl |= RCTL_BSIZE_2k;
2048 break;
2049 case 4096:
2050 sc->sc_rctl |= RCTL_BSIZE_4k;
2051 break;
2052 case 8192:
2053 sc->sc_rctl |= RCTL_BSIZE_8k;
2054 break;
2055 case 16384:
2056 sc->sc_rctl |= RCTL_BSIZE_16k;
2057 break;
2058 default:
2059 panic("dge_init: MCLBYTES %d unsupported", MCLBYTES);
2060 }
2061 #endif
2062
2063 /* Set the receive filter. */
2064 /* Also sets RCTL */
2065 dge_set_filter(sc);
2066
2067 /* ...all done! */
2068 ifp->if_flags |= IFF_RUNNING;
2069 ifp->if_flags &= ~IFF_OACTIVE;
2070
2071 out:
2072 if (error)
2073 printf("%s: interface not running\n", device_xname(sc->sc_dev));
2074 return (error);
2075 }
2076
2077 /*
2078 * dge_rxdrain:
2079 *
2080 * Drain the receive queue.
2081 */
2082 static void
2083 dge_rxdrain(struct dge_softc *sc)
2084 {
2085 struct dge_rxsoft *rxs;
2086 int i;
2087
2088 for (i = 0; i < DGE_NRXDESC; i++) {
2089 rxs = &sc->sc_rxsoft[i];
2090 if (rxs->rxs_mbuf != NULL) {
2091 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2092 m_freem(rxs->rxs_mbuf);
2093 rxs->rxs_mbuf = NULL;
2094 }
2095 }
2096 }
2097
2098 /*
2099 * dge_stop: [ifnet interface function]
2100 *
2101 * Stop transmission on the interface.
2102 */
2103 static void
2104 dge_stop(struct ifnet *ifp, int disable)
2105 {
2106 struct dge_softc *sc = ifp->if_softc;
2107 struct dge_txsoft *txs;
2108 int i;
2109
2110 /* Stop the transmit and receive processes. */
2111 CSR_WRITE(sc, DGE_TCTL, 0);
2112 CSR_WRITE(sc, DGE_RCTL, 0);
2113
2114 /* Release any queued transmit buffers. */
2115 for (i = 0; i < DGE_TXQUEUELEN; i++) {
2116 txs = &sc->sc_txsoft[i];
2117 if (txs->txs_mbuf != NULL) {
2118 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2119 m_freem(txs->txs_mbuf);
2120 txs->txs_mbuf = NULL;
2121 }
2122 }
2123
2124 /* Mark the interface as down and cancel the watchdog timer. */
2125 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2126 ifp->if_timer = 0;
2127
2128 if (disable)
2129 dge_rxdrain(sc);
2130 }
2131
2132 /*
2133 * dge_add_rxbuf:
2134 *
2135 * Add a receive buffer to the indiciated descriptor.
2136 */
2137 static int
2138 dge_add_rxbuf(struct dge_softc *sc, int idx)
2139 {
2140 struct dge_rxsoft *rxs = &sc->sc_rxsoft[idx];
2141 struct mbuf *m;
2142 int error;
2143 #ifdef DGE_OFFBYONE_RXBUG
2144 void *buf;
2145 #endif
2146
2147 MGETHDR(m, M_DONTWAIT, MT_DATA);
2148 if (m == NULL)
2149 return (ENOBUFS);
2150
2151 #ifdef DGE_OFFBYONE_RXBUG
2152 if ((buf = dge_getbuf(sc)) == NULL)
2153 return ENOBUFS;
2154
2155 m->m_len = m->m_pkthdr.len = DGE_BUFFER_SIZE;
2156 MEXTADD(m, buf, DGE_BUFFER_SIZE, M_DEVBUF, dge_freebuf, sc);
2157 m->m_flags |= M_EXT_RW;
2158
2159 if (rxs->rxs_mbuf != NULL)
2160 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2161 rxs->rxs_mbuf = m;
2162
2163 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, buf,
2164 DGE_BUFFER_SIZE, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
2165 #else
2166 MCLGET(m, M_DONTWAIT);
2167 if ((m->m_flags & M_EXT) == 0) {
2168 m_freem(m);
2169 return (ENOBUFS);
2170 }
2171
2172 if (rxs->rxs_mbuf != NULL)
2173 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2174
2175 rxs->rxs_mbuf = m;
2176
2177 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2178 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2179 BUS_DMA_READ|BUS_DMA_NOWAIT);
2180 #endif
2181 if (error) {
2182 printf("%s: unable to load rx DMA map %d, error = %d\n",
2183 device_xname(sc->sc_dev), idx, error);
2184 panic("dge_add_rxbuf"); /* XXX XXX XXX */
2185 }
2186 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2187 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2188
2189 return (0);
2190 }
2191
2192 /*
2193 * dge_set_ral:
2194 *
2195 * Set an entry in the receive address list.
2196 */
2197 static void
2198 dge_set_ral(struct dge_softc *sc, const uint8_t *enaddr, int idx)
2199 {
2200 uint32_t ral_lo, ral_hi;
2201
2202 if (enaddr != NULL) {
2203 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2204 (enaddr[3] << 24);
2205 ral_hi = enaddr[4] | (enaddr[5] << 8);
2206 ral_hi |= RAH_AV;
2207 } else {
2208 ral_lo = 0;
2209 ral_hi = 0;
2210 }
2211 CSR_WRITE(sc, RA_ADDR(DGE_RAL, idx), ral_lo);
2212 CSR_WRITE(sc, RA_ADDR(DGE_RAH, idx), ral_hi);
2213 }
2214
2215 /*
2216 * dge_mchash:
2217 *
2218 * Compute the hash of the multicast address for the 4096-bit
2219 * multicast filter.
2220 */
2221 static uint32_t
2222 dge_mchash(struct dge_softc *sc, const uint8_t *enaddr)
2223 {
2224 static const int lo_shift[4] = { 4, 3, 2, 0 };
2225 static const int hi_shift[4] = { 4, 5, 6, 8 };
2226 uint32_t hash;
2227
2228 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2229 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2230
2231 return (hash & 0xfff);
2232 }
2233
2234 /*
2235 * dge_set_filter:
2236 *
2237 * Set up the receive filter.
2238 */
2239 static void
2240 dge_set_filter(struct dge_softc *sc)
2241 {
2242 struct ethercom *ec = &sc->sc_ethercom;
2243 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2244 struct ether_multi *enm;
2245 struct ether_multistep step;
2246 uint32_t hash, reg, bit;
2247 int i;
2248
2249 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2250
2251 if (ifp->if_flags & IFF_BROADCAST)
2252 sc->sc_rctl |= RCTL_BAM;
2253 if (ifp->if_flags & IFF_PROMISC) {
2254 sc->sc_rctl |= RCTL_UPE;
2255 goto allmulti;
2256 }
2257
2258 /*
2259 * Set the station address in the first RAL slot, and
2260 * clear the remaining slots.
2261 */
2262 dge_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2263 for (i = 1; i < RA_TABSIZE; i++)
2264 dge_set_ral(sc, NULL, i);
2265
2266 /* Clear out the multicast table. */
2267 for (i = 0; i < MC_TABSIZE; i++)
2268 CSR_WRITE(sc, DGE_MTA + (i << 2), 0);
2269
2270 ETHER_FIRST_MULTI(step, ec, enm);
2271 while (enm != NULL) {
2272 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2273 /*
2274 * We must listen to a range of multicast addresses.
2275 * For now, just accept all multicasts, rather than
2276 * trying to set only those filter bits needed to match
2277 * the range. (At this time, the only use of address
2278 * ranges is for IP multicast routing, for which the
2279 * range is big enough to require all bits set.)
2280 */
2281 goto allmulti;
2282 }
2283
2284 hash = dge_mchash(sc, enm->enm_addrlo);
2285
2286 reg = (hash >> 5) & 0x7f;
2287 bit = hash & 0x1f;
2288
2289 hash = CSR_READ(sc, DGE_MTA + (reg << 2));
2290 hash |= 1U << bit;
2291
2292 CSR_WRITE(sc, DGE_MTA + (reg << 2), hash);
2293
2294 ETHER_NEXT_MULTI(step, enm);
2295 }
2296
2297 ifp->if_flags &= ~IFF_ALLMULTI;
2298 goto setit;
2299
2300 allmulti:
2301 ifp->if_flags |= IFF_ALLMULTI;
2302 sc->sc_rctl |= RCTL_MPE;
2303
2304 setit:
2305 CSR_WRITE(sc, DGE_RCTL, sc->sc_rctl);
2306 }
2307
2308 /*
2309 * Read in the EEPROM info and verify checksum.
2310 */
2311 int
2312 dge_read_eeprom(struct dge_softc *sc)
2313 {
2314 uint16_t cksum;
2315 int i;
2316
2317 cksum = 0;
2318 for (i = 0; i < EEPROM_SIZE; i++) {
2319 sc->sc_eeprom[i] = dge_eeprom_word(sc, i);
2320 cksum += sc->sc_eeprom[i];
2321 }
2322 return cksum != EEPROM_CKSUM;
2323 }
2324
2325
2326 /*
2327 * Read a 16-bit word from address addr in the serial EEPROM.
2328 */
2329 uint16_t
2330 dge_eeprom_word(struct dge_softc *sc, int addr)
2331 {
2332 uint32_t reg;
2333 uint16_t rval = 0;
2334 int i;
2335
2336 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_SK|EECD_DI|EECD_CS);
2337
2338 /* Lower clock pulse (and data in to chip) */
2339 CSR_WRITE(sc, DGE_EECD, reg);
2340 /* Select chip */
2341 CSR_WRITE(sc, DGE_EECD, reg|EECD_CS);
2342
2343 /* Send read command */
2344 dge_eeprom_clockout(sc, 1);
2345 dge_eeprom_clockout(sc, 1);
2346 dge_eeprom_clockout(sc, 0);
2347
2348 /* Send address */
2349 for (i = 5; i >= 0; i--)
2350 dge_eeprom_clockout(sc, (addr >> i) & 1);
2351
2352 /* Read data */
2353 for (i = 0; i < 16; i++) {
2354 rval <<= 1;
2355 rval |= dge_eeprom_clockin(sc);
2356 }
2357
2358 /* Deselect chip */
2359 CSR_WRITE(sc, DGE_EECD, reg);
2360
2361 return rval;
2362 }
2363
2364 /*
2365 * Clock out a single bit to the EEPROM.
2366 */
2367 void
2368 dge_eeprom_clockout(struct dge_softc *sc, int bit)
2369 {
2370 int reg;
2371
2372 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_SK);
2373 if (bit)
2374 reg |= EECD_DI;
2375
2376 CSR_WRITE(sc, DGE_EECD, reg);
2377 delay(2);
2378 CSR_WRITE(sc, DGE_EECD, reg|EECD_SK);
2379 delay(2);
2380 CSR_WRITE(sc, DGE_EECD, reg);
2381 delay(2);
2382 }
2383
2384 /*
2385 * Clock in a single bit from EEPROM.
2386 */
2387 int
2388 dge_eeprom_clockin(struct dge_softc *sc)
2389 {
2390 int reg, rv;
2391
2392 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_DO|EECD_SK);
2393
2394 CSR_WRITE(sc, DGE_EECD, reg|EECD_SK); /* Raise clock */
2395 delay(2);
2396 rv = (CSR_READ(sc, DGE_EECD) & EECD_DO) != 0; /* Get bit */
2397 CSR_WRITE(sc, DGE_EECD, reg); /* Lower clock */
2398 delay(2);
2399
2400 return rv;
2401 }
2402
2403 static void
2404 dge_xgmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2405 {
2406 struct dge_softc *sc = ifp->if_softc;
2407
2408 ifmr->ifm_status = IFM_AVALID;
2409 if (sc->sc_dgep->dgep_flags & DGEP_F_10G_SR ) {
2410 ifmr->ifm_active = IFM_ETHER|IFM_10G_SR;
2411 } else {
2412 ifmr->ifm_active = IFM_ETHER|IFM_10G_LR;
2413 }
2414
2415 if (CSR_READ(sc, DGE_STATUS) & STATUS_LINKUP)
2416 ifmr->ifm_status |= IFM_ACTIVE;
2417 }
2418
2419 static inline int
2420 phwait(struct dge_softc *sc, int p, int r, int d, int type)
2421 {
2422 int i, mdic;
2423
2424 CSR_WRITE(sc, DGE_MDIO,
2425 MDIO_PHY(p) | MDIO_REG(r) | MDIO_DEV(d) | type | MDIO_CMD);
2426 for (i = 0; i < 10; i++) {
2427 delay(10);
2428 if (((mdic = CSR_READ(sc, DGE_MDIO)) & MDIO_CMD) == 0)
2429 break;
2430 }
2431 return mdic;
2432 }
2433
2434 static void
2435 dge_xgmii_writereg(struct dge_softc *sc, int phy, int reg, int val)
2436 {
2437 int mdic;
2438
2439 CSR_WRITE(sc, DGE_MDIRW, val);
2440 if (((mdic = phwait(sc, phy, reg, 1, MDIO_ADDR)) & MDIO_CMD)) {
2441 printf("%s: address cycle timeout; phy %d reg %d\n",
2442 device_xname(sc->sc_dev), phy, reg);
2443 return;
2444 }
2445 if (((mdic = phwait(sc, phy, reg, 1, MDIO_WRITE)) & MDIO_CMD)) {
2446 printf("%s: write cycle timeout; phy %d reg %d\n",
2447 device_xname(sc->sc_dev), phy, reg);
2448 return;
2449 }
2450 }
2451
2452 static void
2453 dge_xgmii_reset(struct dge_softc *sc)
2454 {
2455 dge_xgmii_writereg(sc, 0, 0, BMCR_RESET);
2456 }
2457
2458 static int
2459 dge_xgmii_mediachange(struct ifnet *ifp)
2460 {
2461 return 0;
2462 }
2463