if_mec.c revision 1.35 1 /* $NetBSD: if_mec.c,v 1.35 2009/05/09 18:31:46 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004, 2008 Izumi Tsutsui. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed for the
42 * NetBSD Project. See http://www.NetBSD.org/ for
43 * information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 /*
60 * MACE MAC-110 Ethernet driver
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.35 2009/05/09 18:31:46 tsutsui Exp $");
65
66 #include "opt_ddb.h"
67 #include "bpfilter.h"
68 #include "rnd.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/device.h>
73 #include <sys/callout.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/ioctl.h>
79 #include <sys/errno.h>
80
81 #if NRND > 0
82 #include <sys/rnd.h>
83 #endif
84
85 #include <net/if.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 #include <net/if_ether.h>
89
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
95
96 #if NBPFILTER > 0
97 #include <net/bpf.h>
98 #endif
99
100 #include <machine/bus.h>
101 #include <machine/intr.h>
102 #include <machine/machtype.h>
103
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106
107 #include <sgimips/mace/macevar.h>
108 #include <sgimips/mace/if_mecreg.h>
109
110 #include <dev/arcbios/arcbios.h>
111 #include <dev/arcbios/arcbiosvar.h>
112
113 /* #define MEC_DEBUG */
114
115 #ifdef MEC_DEBUG
116 #define MEC_DEBUG_RESET 0x01
117 #define MEC_DEBUG_START 0x02
118 #define MEC_DEBUG_STOP 0x04
119 #define MEC_DEBUG_INTR 0x08
120 #define MEC_DEBUG_RXINTR 0x10
121 #define MEC_DEBUG_TXINTR 0x20
122 #define MEC_DEBUG_TXSEGS 0x40
123 uint32_t mec_debug = 0;
124 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
125 #else
126 #define DPRINTF(x, y) /* nothing */
127 #endif
128
129 /* #define MEC_EVENT_COUNTERS */
130
131 #ifdef MEC_EVENT_COUNTERS
132 #define MEC_EVCNT_INCR(ev) (ev)->ev_count++
133 #else
134 #define MEC_EVCNT_INCR(ev) do {} while (/* CONSTCOND */ 0)
135 #endif
136
137 /*
138 * Transmit descriptor list size
139 */
140 #define MEC_NTXDESC 64
141 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
142 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
143 #define MEC_NTXDESC_RSVD 4
144 #define MEC_NTXDESC_INTR 8
145
146 /*
147 * software state for TX
148 */
149 struct mec_txsoft {
150 struct mbuf *txs_mbuf; /* head of our mbuf chain */
151 bus_dmamap_t txs_dmamap; /* our DMA map */
152 uint32_t txs_flags;
153 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
154 #define MEC_TXS_TXDPTR 0x00000080 /* concat txd_ptr is used */
155 };
156
157 /*
158 * Transmit buffer descriptor
159 */
160 #define MEC_TXDESCSIZE 128
161 #define MEC_NTXPTR 3
162 #define MEC_TXD_BUFOFFSET sizeof(uint64_t)
163 #define MEC_TXD_BUFOFFSET1 \
164 (sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR)
165 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
166 #define MEC_TXD_BUFSIZE1 (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1)
167 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
168 #define MEC_TXD_ALIGN 8
169 #define MEC_TXD_ALIGNMASK (MEC_TXD_ALIGN - 1)
170 #define MEC_TXD_ROUNDUP(addr) \
171 (((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK)
172 #define MEC_NTXSEG 16
173
174 struct mec_txdesc {
175 volatile uint64_t txd_cmd;
176 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
177 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
178 #define TXCMD_BUFSTART(x) ((x) << 16)
179 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
180 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
181 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
182 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
183 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
184 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
185
186 #define txd_stat txd_cmd
187 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
188 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
189 #define MEC_TXSTAT_COLCNT_SHIFT 16
190 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
191 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
192 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
193 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
194 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
195 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
196 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
197 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
198 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
199 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
200 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
201
202 union {
203 uint64_t txptr[MEC_NTXPTR];
204 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
205 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
206 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
207 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
208 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
209
210 uint8_t txbuf[MEC_TXD_BUFSIZE];
211 } txd_data;
212 #define txd_ptr txd_data.txptr
213 #define txd_buf txd_data.txbuf
214 };
215
216 /*
217 * Receive buffer size
218 */
219 #define MEC_NRXDESC 16
220 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
221 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
222
223 /*
224 * Receive buffer description
225 */
226 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
227 #define MEC_RXD_NRXPAD 3
228 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
229 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
230 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
231
232 struct mec_rxdesc {
233 volatile uint64_t rxd_stat;
234 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
235 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
236 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
237 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
238 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
239 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
240 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
241 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
242 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
243 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
244 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
245 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
246 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
247 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
248 #define RXSTAT_CKSUM(x) (((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32)
249 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
250 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
251 uint64_t rxd_pad1[MEC_RXD_NRXPAD];
252 uint8_t rxd_buf[MEC_RXD_BUFSIZE];
253 };
254
255 /*
256 * control structures for DMA ops
257 */
258 struct mec_control_data {
259 /*
260 * TX descriptors and buffers
261 */
262 struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
263
264 /*
265 * RX descriptors and buffers
266 */
267 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
268 };
269
270 /*
271 * It _seems_ there are some restrictions on descriptor address:
272 *
273 * - Base address of txdescs should be 8kbyte aligned
274 * - Each txdesc should be 128byte aligned
275 * - Each rxdesc should be 4kbyte aligned
276 *
277 * So we should specify 8k align to allocalte txdescs.
278 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
279 * so rxdescs are also allocated at 4kbyte aligned.
280 */
281 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
282
283 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
284 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
285 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
286
287 /*
288 * software state per device
289 */
290 struct mec_softc {
291 device_t sc_dev; /* generic device structures */
292
293 bus_space_tag_t sc_st; /* bus_space tag */
294 bus_space_handle_t sc_sh; /* bus_space handle */
295 bus_dma_tag_t sc_dmat; /* bus_dma tag */
296 void *sc_sdhook; /* shutdown hook */
297
298 struct ethercom sc_ethercom; /* Ethernet common part */
299
300 struct mii_data sc_mii; /* MII/media information */
301 int sc_phyaddr; /* MII address */
302 struct callout sc_tick_ch; /* tick callout */
303
304 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
305
306 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
307 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
308
309 /* pointer to allocated control data */
310 struct mec_control_data *sc_control_data;
311 #define sc_txdesc sc_control_data->mcd_txdesc
312 #define sc_rxdesc sc_control_data->mcd_rxdesc
313
314 /* software state for TX descs */
315 struct mec_txsoft sc_txsoft[MEC_NTXDESC];
316
317 int sc_txpending; /* number of TX requests pending */
318 int sc_txdirty; /* first dirty TX descriptor */
319 int sc_txlast; /* last used TX descriptor */
320
321 int sc_rxptr; /* next ready RX buffer */
322
323 #if NRND > 0
324 rndsource_element_t sc_rnd_source; /* random source */
325 #endif
326 #ifdef MEC_EVENT_COUNTERS
327 struct evcnt sc_ev_txpkts; /* TX packets queued total */
328 struct evcnt sc_ev_txdpad; /* TX packets padded in txdesc buf */
329 struct evcnt sc_ev_txdbuf; /* TX packets copied to txdesc buf */
330 struct evcnt sc_ev_txptr1; /* TX packets using concat ptr1 */
331 struct evcnt sc_ev_txptr1a; /* TX packets w/ptr1 ~160bytes */
332 struct evcnt sc_ev_txptr1b; /* TX packets w/ptr1 ~256bytes */
333 struct evcnt sc_ev_txptr1c; /* TX packets w/ptr1 ~512bytes */
334 struct evcnt sc_ev_txptr1d; /* TX packets w/ptr1 ~1024bytes */
335 struct evcnt sc_ev_txptr1e; /* TX packets w/ptr1 >1024bytes */
336 struct evcnt sc_ev_txptr2; /* TX packets using concat ptr1,2 */
337 struct evcnt sc_ev_txptr2a; /* TX packets w/ptr2 ~160bytes */
338 struct evcnt sc_ev_txptr2b; /* TX packets w/ptr2 ~256bytes */
339 struct evcnt sc_ev_txptr2c; /* TX packets w/ptr2 ~512bytes */
340 struct evcnt sc_ev_txptr2d; /* TX packets w/ptr2 ~1024bytes */
341 struct evcnt sc_ev_txptr2e; /* TX packets w/ptr2 >1024bytes */
342 struct evcnt sc_ev_txptr3; /* TX packets using concat ptr1,2,3 */
343 struct evcnt sc_ev_txptr3a; /* TX packets w/ptr3 ~160bytes */
344 struct evcnt sc_ev_txptr3b; /* TX packets w/ptr3 ~256bytes */
345 struct evcnt sc_ev_txptr3c; /* TX packets w/ptr3 ~512bytes */
346 struct evcnt sc_ev_txptr3d; /* TX packets w/ptr3 ~1024bytes */
347 struct evcnt sc_ev_txptr3e; /* TX packets w/ptr3 >1024bytes */
348 struct evcnt sc_ev_txmbuf; /* TX packets copied to new mbufs */
349 struct evcnt sc_ev_txmbufa; /* TX packets w/mbuf ~160bytes */
350 struct evcnt sc_ev_txmbufb; /* TX packets w/mbuf ~256bytes */
351 struct evcnt sc_ev_txmbufc; /* TX packets w/mbuf ~512bytes */
352 struct evcnt sc_ev_txmbufd; /* TX packets w/mbuf ~1024bytes */
353 struct evcnt sc_ev_txmbufe; /* TX packets w/mbuf >1024bytes */
354 struct evcnt sc_ev_txptrs; /* TX packets using ptrs total */
355 struct evcnt sc_ev_txptrc0; /* TX packets w/ptrs no hdr chain */
356 struct evcnt sc_ev_txptrc1; /* TX packets w/ptrs 1 hdr chain */
357 struct evcnt sc_ev_txptrc2; /* TX packets w/ptrs 2 hdr chains */
358 struct evcnt sc_ev_txptrc3; /* TX packets w/ptrs 3 hdr chains */
359 struct evcnt sc_ev_txptrc4; /* TX packets w/ptrs 4 hdr chains */
360 struct evcnt sc_ev_txptrc5; /* TX packets w/ptrs 5 hdr chains */
361 struct evcnt sc_ev_txptrc6; /* TX packets w/ptrs >5 hdr chains */
362 struct evcnt sc_ev_txptrh0; /* TX packets w/ptrs ~8bytes hdr */
363 struct evcnt sc_ev_txptrh1; /* TX packets w/ptrs ~16bytes hdr */
364 struct evcnt sc_ev_txptrh2; /* TX packets w/ptrs ~32bytes hdr */
365 struct evcnt sc_ev_txptrh3; /* TX packets w/ptrs ~64bytes hdr */
366 struct evcnt sc_ev_txptrh4; /* TX packets w/ptrs ~80bytes hdr */
367 struct evcnt sc_ev_txptrh5; /* TX packets w/ptrs ~96bytes hdr */
368 struct evcnt sc_ev_txdstall; /* TX stalled due to no txdesc */
369 struct evcnt sc_ev_txempty; /* TX empty interrupts */
370 struct evcnt sc_ev_txsent; /* TX sent interrupts */
371 #endif
372 };
373
374 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
375 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
376
377 #define MEC_TXDESCSYNC(sc, x, ops) \
378 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
379 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
380 #define MEC_TXCMDSYNC(sc, x, ops) \
381 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
382 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
383
384 #define MEC_RXSTATSYNC(sc, x, ops) \
385 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
386 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
387 #define MEC_RXBUFSYNC(sc, x, len, ops) \
388 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
389 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
390 MEC_ETHER_ALIGN + (len), (ops))
391
392 /* XXX these values should be moved to <net/if_ether.h> ? */
393 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
394 #define MEC_ETHER_ALIGN 2
395
396 static int mec_match(device_t, cfdata_t, void *);
397 static void mec_attach(device_t, device_t, void *);
398
399 static int mec_mii_readreg(device_t, int, int);
400 static void mec_mii_writereg(device_t, int, int, int);
401 static int mec_mii_wait(struct mec_softc *);
402 static void mec_statchg(device_t);
403
404 static void enaddr_aton(const char *, uint8_t *);
405
406 static int mec_init(struct ifnet * ifp);
407 static void mec_start(struct ifnet *);
408 static void mec_watchdog(struct ifnet *);
409 static void mec_tick(void *);
410 static int mec_ioctl(struct ifnet *, u_long, void *);
411 static void mec_reset(struct mec_softc *);
412 static void mec_setfilter(struct mec_softc *);
413 static int mec_intr(void *arg);
414 static void mec_stop(struct ifnet *, int);
415 static void mec_rxintr(struct mec_softc *);
416 static void mec_rxcsum(struct mec_softc *, struct mbuf *, uint16_t,
417 uint32_t);
418 static void mec_txintr(struct mec_softc *, uint32_t);
419 static void mec_shutdown(void *);
420
421 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
422 mec_match, mec_attach, NULL, NULL);
423
424 static int mec_matched = 0;
425
426 static int
427 mec_match(device_t parent, cfdata_t cf, void *aux)
428 {
429
430 /* allow only one device */
431 if (mec_matched)
432 return 0;
433
434 mec_matched = 1;
435 return 1;
436 }
437
438 static void
439 mec_attach(device_t parent, device_t self, void *aux)
440 {
441 struct mec_softc *sc = device_private(self);
442 struct mace_attach_args *maa = aux;
443 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
444 uint64_t address, command;
445 const char *macaddr;
446 struct mii_softc *child;
447 bus_dma_segment_t seg;
448 int i, err, rseg;
449 bool mac_is_fake;
450
451 sc->sc_dev = self;
452 sc->sc_st = maa->maa_st;
453 if (bus_space_subregion(sc->sc_st, maa->maa_sh,
454 maa->maa_offset, 0, &sc->sc_sh) != 0) {
455 aprint_error(": can't map i/o space\n");
456 return;
457 }
458
459 /* set up DMA structures */
460 sc->sc_dmat = maa->maa_dmat;
461
462 /*
463 * Allocate the control data structures, and create and load the
464 * DMA map for it.
465 */
466 if ((err = bus_dmamem_alloc(sc->sc_dmat,
467 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
468 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
469 aprint_error(": unable to allocate control data, error = %d\n",
470 err);
471 goto fail_0;
472 }
473 /*
474 * XXX needs re-think...
475 * control data structures contain whole RX data buffer, so
476 * BUS_DMA_COHERENT (which disables cache) may cause some performance
477 * issue on copying data from the RX buffer to mbuf on normal memory,
478 * though we have to make sure all bus_dmamap_sync(9) ops are called
479 * properly in that case.
480 */
481 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
482 sizeof(struct mec_control_data),
483 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
484 aprint_error(": unable to map control data, error = %d\n", err);
485 goto fail_1;
486 }
487 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
488
489 if ((err = bus_dmamap_create(sc->sc_dmat,
490 sizeof(struct mec_control_data), 1,
491 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
492 aprint_error(": unable to create control data DMA map,"
493 " error = %d\n", err);
494 goto fail_2;
495 }
496 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
497 sc->sc_control_data, sizeof(struct mec_control_data), NULL,
498 BUS_DMA_NOWAIT)) != 0) {
499 aprint_error(": unable to load control data DMA map,"
500 " error = %d\n", err);
501 goto fail_3;
502 }
503
504 /* create TX buffer DMA maps */
505 for (i = 0; i < MEC_NTXDESC; i++) {
506 if ((err = bus_dmamap_create(sc->sc_dmat,
507 MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0,
508 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
509 aprint_error(": unable to create tx DMA map %d,"
510 " error = %d\n", i, err);
511 goto fail_4;
512 }
513 }
514
515 callout_init(&sc->sc_tick_ch, 0);
516
517 /* get Ethernet address from ARCBIOS */
518 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
519 aprint_error(": unable to get MAC address!\n");
520 goto fail_4;
521 }
522 /*
523 * On some machines the DS2502 chip storing the serial number/
524 * mac address is on the pci riser board - if this board is
525 * missing, ARCBIOS will not know a good ethernet address (but
526 * otherwise the machine will work fine).
527 */
528 mac_is_fake = false;
529 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
530 uint32_t ui = 0;
531 const char * netaddr =
532 ARCBIOS->GetEnvironmentVariable("netaddr");
533
534 /*
535 * Create a MAC address by abusing the "netaddr" env var
536 */
537 sc->sc_enaddr[0] = 0xf2;
538 sc->sc_enaddr[1] = 0x0b;
539 sc->sc_enaddr[2] = 0xa4;
540 if (netaddr) {
541 mac_is_fake = true;
542 while (*netaddr) {
543 int v = 0;
544 while (*netaddr && *netaddr != '.') {
545 if (*netaddr >= '0' && *netaddr <= '9')
546 v = v*10 + (*netaddr - '0');
547 netaddr++;
548 }
549 ui <<= 8;
550 ui |= v;
551 if (*netaddr == '.')
552 netaddr++;
553 }
554 }
555 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
556 }
557 if (!mac_is_fake)
558 enaddr_aton(macaddr, sc->sc_enaddr);
559
560 /* set the Ethernet address */
561 address = 0;
562 for (i = 0; i < ETHER_ADDR_LEN; i++) {
563 address = address << 8;
564 address |= sc->sc_enaddr[i];
565 }
566 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
567
568 /* reset device */
569 mec_reset(sc);
570
571 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
572
573 aprint_normal(": MAC-110 Ethernet, rev %u\n",
574 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
575
576 if (mac_is_fake)
577 aprint_normal_dev(self,
578 "could not get ethernet address from firmware"
579 " - generated one from the \"netaddr\" environment"
580 " variable\n");
581 aprint_normal_dev(self, "Ethernet address %s\n",
582 ether_sprintf(sc->sc_enaddr));
583
584 /* Done, now attach everything */
585
586 sc->sc_mii.mii_ifp = ifp;
587 sc->sc_mii.mii_readreg = mec_mii_readreg;
588 sc->sc_mii.mii_writereg = mec_mii_writereg;
589 sc->sc_mii.mii_statchg = mec_statchg;
590
591 /* Set up PHY properties */
592 sc->sc_ethercom.ec_mii = &sc->sc_mii;
593 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
594 ether_mediastatus);
595 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
596 MII_OFFSET_ANY, 0);
597
598 child = LIST_FIRST(&sc->sc_mii.mii_phys);
599 if (child == NULL) {
600 /* No PHY attached */
601 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
602 0, NULL);
603 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
604 } else {
605 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
606 sc->sc_phyaddr = child->mii_phy;
607 }
608
609 strcpy(ifp->if_xname, device_xname(self));
610 ifp->if_softc = sc;
611 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
612 ifp->if_ioctl = mec_ioctl;
613 ifp->if_start = mec_start;
614 ifp->if_watchdog = mec_watchdog;
615 ifp->if_init = mec_init;
616 ifp->if_stop = mec_stop;
617 ifp->if_mtu = ETHERMTU;
618 IFQ_SET_READY(&ifp->if_snd);
619
620 /* mec has dumb RX cksum support */
621 ifp->if_capabilities = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx;
622
623 /* We can support 802.1Q VLAN-sized frames. */
624 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
625
626 /* attach the interface */
627 if_attach(ifp);
628 ether_ifattach(ifp, sc->sc_enaddr);
629
630 /* establish interrupt */
631 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
632
633 #if NRND > 0
634 rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
635 RND_TYPE_NET, 0);
636 #endif
637
638 #ifdef MEC_EVENT_COUNTERS
639 evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC,
640 NULL, device_xname(self), "TX pkts queued total");
641 evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC,
642 NULL, device_xname(self), "TX pkts padded in txdesc buf");
643 evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC,
644 NULL, device_xname(self), "TX pkts copied to txdesc buf");
645 evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC,
646 NULL, device_xname(self), "TX pkts using concat ptr1");
647 evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC,
648 NULL, device_xname(self), "TX pkts w/ptr1 ~160bytes");
649 evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC,
650 NULL, device_xname(self), "TX pkts w/ptr1 ~256bytes");
651 evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC,
652 NULL, device_xname(self), "TX pkts w/ptr1 ~512bytes");
653 evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC,
654 NULL, device_xname(self), "TX pkts w/ptr1 ~1024bytes");
655 evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC,
656 NULL, device_xname(self), "TX pkts w/ptr1 >1024bytes");
657 evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC,
658 NULL, device_xname(self), "TX pkts using concat ptr1,2");
659 evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC,
660 NULL, device_xname(self), "TX pkts w/ptr2 ~160bytes");
661 evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC,
662 NULL, device_xname(self), "TX pkts w/ptr2 ~256bytes");
663 evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC,
664 NULL, device_xname(self), "TX pkts w/ptr2 ~512bytes");
665 evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC,
666 NULL, device_xname(self), "TX pkts w/ptr2 ~1024bytes");
667 evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC,
668 NULL, device_xname(self), "TX pkts w/ptr2 >1024bytes");
669 evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC,
670 NULL, device_xname(self), "TX pkts using concat ptr1,2,3");
671 evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC,
672 NULL, device_xname(self), "TX pkts w/ptr3 ~160bytes");
673 evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC,
674 NULL, device_xname(self), "TX pkts w/ptr3 ~256bytes");
675 evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC,
676 NULL, device_xname(self), "TX pkts w/ptr3 ~512bytes");
677 evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC,
678 NULL, device_xname(self), "TX pkts w/ptr3 ~1024bytes");
679 evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC,
680 NULL, device_xname(self), "TX pkts w/ptr3 >1024bytes");
681 evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC,
682 NULL, device_xname(self), "TX pkts copied to new mbufs");
683 evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC,
684 NULL, device_xname(self), "TX pkts w/mbuf ~160bytes");
685 evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC,
686 NULL, device_xname(self), "TX pkts w/mbuf ~256bytes");
687 evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC,
688 NULL, device_xname(self), "TX pkts w/mbuf ~512bytes");
689 evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC,
690 NULL, device_xname(self), "TX pkts w/mbuf ~1024bytes");
691 evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC,
692 NULL, device_xname(self), "TX pkts w/mbuf >1024bytes");
693 evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC,
694 NULL, device_xname(self), "TX pkts using ptrs total");
695 evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC,
696 NULL, device_xname(self), "TX pkts w/ptrs no hdr chain");
697 evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC,
698 NULL, device_xname(self), "TX pkts w/ptrs 1 hdr chain");
699 evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC,
700 NULL, device_xname(self), "TX pkts w/ptrs 2 hdr chains");
701 evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC,
702 NULL, device_xname(self), "TX pkts w/ptrs 3 hdr chains");
703 evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC,
704 NULL, device_xname(self), "TX pkts w/ptrs 4 hdr chains");
705 evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC,
706 NULL, device_xname(self), "TX pkts w/ptrs 5 hdr chains");
707 evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC,
708 NULL, device_xname(self), "TX pkts w/ptrs >5 hdr chains");
709 evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC,
710 NULL, device_xname(self), "TX pkts w/ptrs ~8bytes hdr");
711 evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC,
712 NULL, device_xname(self), "TX pkts w/ptrs ~16bytes hdr");
713 evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC,
714 NULL, device_xname(self), "TX pkts w/ptrs ~32bytes hdr");
715 evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC,
716 NULL, device_xname(self), "TX pkts w/ptrs ~64bytes hdr");
717 evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC,
718 NULL, device_xname(self), "TX pkts w/ptrs ~80bytes hdr");
719 evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC,
720 NULL, device_xname(self), "TX pkts w/ptrs ~96bytes hdr");
721 evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC,
722 NULL, device_xname(self), "TX stalled due to no txdesc");
723 evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC,
724 NULL, device_xname(self), "TX empty interrupts");
725 evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC,
726 NULL, device_xname(self), "TX sent interrupts");
727 #endif
728
729 /* set shutdown hook to reset interface on powerdown */
730 sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);
731
732 return;
733
734 /*
735 * Free any resources we've allocated during the failed attach
736 * attempt. Do this in reverse order and fall though.
737 */
738 fail_4:
739 for (i = 0; i < MEC_NTXDESC; i++) {
740 if (sc->sc_txsoft[i].txs_dmamap != NULL)
741 bus_dmamap_destroy(sc->sc_dmat,
742 sc->sc_txsoft[i].txs_dmamap);
743 }
744 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
745 fail_3:
746 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
747 fail_2:
748 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
749 sizeof(struct mec_control_data));
750 fail_1:
751 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
752 fail_0:
753 return;
754 }
755
756 static int
757 mec_mii_readreg(device_t self, int phy, int reg)
758 {
759 struct mec_softc *sc = device_private(self);
760 bus_space_tag_t st = sc->sc_st;
761 bus_space_handle_t sh = sc->sc_sh;
762 uint64_t val;
763 int i;
764
765 if (mec_mii_wait(sc) != 0)
766 return 0;
767
768 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
769 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
770 delay(25);
771 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
772 delay(25);
773 mec_mii_wait(sc);
774
775 for (i = 0; i < 20; i++) {
776 delay(30);
777
778 val = bus_space_read_8(st, sh, MEC_PHY_DATA);
779
780 if ((val & MEC_PHY_DATA_BUSY) == 0)
781 return val & MEC_PHY_DATA_VALUE;
782 }
783 return 0;
784 }
785
786 static void
787 mec_mii_writereg(device_t self, int phy, int reg, int val)
788 {
789 struct mec_softc *sc = device_private(self);
790 bus_space_tag_t st = sc->sc_st;
791 bus_space_handle_t sh = sc->sc_sh;
792
793 if (mec_mii_wait(sc) != 0) {
794 printf("timed out writing %x: %x\n", reg, val);
795 return;
796 }
797
798 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
799 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
800
801 delay(60);
802
803 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
804
805 delay(60);
806
807 mec_mii_wait(sc);
808 }
809
810 static int
811 mec_mii_wait(struct mec_softc *sc)
812 {
813 uint32_t busy;
814 int i, s;
815
816 for (i = 0; i < 100; i++) {
817 delay(30);
818
819 s = splhigh();
820 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
821 splx(s);
822
823 if ((busy & MEC_PHY_DATA_BUSY) == 0)
824 return 0;
825 #if 0
826 if (busy == 0xffff) /* XXX ? */
827 return 0;
828 #endif
829 }
830
831 printf("%s: MII timed out\n", device_xname(sc->sc_dev));
832 return 1;
833 }
834
835 static void
836 mec_statchg(device_t self)
837 {
838 struct mec_softc *sc = device_private(self);
839 bus_space_tag_t st = sc->sc_st;
840 bus_space_handle_t sh = sc->sc_sh;
841 uint32_t control;
842
843 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
844 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
845 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
846
847 /* must also set IPG here for duplex stuff ... */
848 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
849 control |= MEC_MAC_FULL_DUPLEX;
850 } else {
851 /* set IPG */
852 control |= MEC_MAC_IPG_DEFAULT;
853 }
854
855 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
856 }
857
858 /*
859 * XXX
860 * maybe this function should be moved to common part
861 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
862 */
863 static void
864 enaddr_aton(const char *str, uint8_t *eaddr)
865 {
866 int i;
867 char c;
868
869 for (i = 0; i < ETHER_ADDR_LEN; i++) {
870 if (*str == ':')
871 str++;
872
873 c = *str++;
874 if (isdigit(c)) {
875 eaddr[i] = (c - '0');
876 } else if (isxdigit(c)) {
877 eaddr[i] = (toupper(c) + 10 - 'A');
878 }
879 c = *str++;
880 if (isdigit(c)) {
881 eaddr[i] = (eaddr[i] << 4) | (c - '0');
882 } else if (isxdigit(c)) {
883 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
884 }
885 }
886 }
887
888 static int
889 mec_init(struct ifnet *ifp)
890 {
891 struct mec_softc *sc = ifp->if_softc;
892 bus_space_tag_t st = sc->sc_st;
893 bus_space_handle_t sh = sc->sc_sh;
894 struct mec_rxdesc *rxd;
895 int i, rc;
896
897 /* cancel any pending I/O */
898 mec_stop(ifp, 0);
899
900 /* reset device */
901 mec_reset(sc);
902
903 /* setup filter for multicast or promisc mode */
904 mec_setfilter(sc);
905
906 /* set the TX ring pointer to the base address */
907 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
908
909 sc->sc_txpending = 0;
910 sc->sc_txdirty = 0;
911 sc->sc_txlast = MEC_NTXDESC - 1;
912
913 /* put RX buffers into FIFO */
914 for (i = 0; i < MEC_NRXDESC; i++) {
915 rxd = &sc->sc_rxdesc[i];
916 rxd->rxd_stat = 0;
917 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
918 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
919 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
920 }
921 sc->sc_rxptr = 0;
922
923 #if 0 /* XXX no info */
924 bus_space_write_8(st, sh, MEC_TIMER, 0);
925 #endif
926
927 /*
928 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
929 * spurious interrupts when TX buffers are empty
930 */
931 bus_space_write_8(st, sh, MEC_DMA_CONTROL,
932 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
933 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
934 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
935 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
936
937 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
938
939 if ((rc = ether_mediachange(ifp)) != 0)
940 return rc;
941
942 ifp->if_flags |= IFF_RUNNING;
943 ifp->if_flags &= ~IFF_OACTIVE;
944 mec_start(ifp);
945
946 return 0;
947 }
948
949 static void
950 mec_reset(struct mec_softc *sc)
951 {
952 bus_space_tag_t st = sc->sc_st;
953 bus_space_handle_t sh = sc->sc_sh;
954 uint64_t control;
955
956 /* stop DMA first */
957 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
958
959 /* reset chip */
960 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
961 delay(1000);
962 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
963 delay(1000);
964
965 /* Default to 100/half and let auto-negotiation work its magic */
966 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
967 MEC_MAC_IPG_DEFAULT;
968
969 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
970 /* stop DMA again for sanity */
971 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
972
973 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
974 bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
975 }
976
977 static void
978 mec_start(struct ifnet *ifp)
979 {
980 struct mec_softc *sc = ifp->if_softc;
981 struct mbuf *m0, *m;
982 struct mec_txdesc *txd;
983 struct mec_txsoft *txs;
984 bus_dmamap_t dmamap;
985 bus_space_tag_t st = sc->sc_st;
986 bus_space_handle_t sh = sc->sc_sh;
987 int error, firsttx, nexttx, opending;
988 int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i;
989 uint32_t txdcmd;
990
991 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
992 return;
993
994 /*
995 * Remember the previous txpending and the first transmit descriptor.
996 */
997 opending = sc->sc_txpending;
998 firsttx = MEC_NEXTTX(sc->sc_txlast);
999
1000 DPRINTF(MEC_DEBUG_START,
1001 ("%s: opending = %d, firsttx = %d\n", __func__, opending, firsttx));
1002
1003 while (sc->sc_txpending < MEC_NTXDESC - 1) {
1004 /* Grab a packet off the queue. */
1005 IFQ_POLL(&ifp->if_snd, m0);
1006 if (m0 == NULL)
1007 break;
1008 m = NULL;
1009
1010 /*
1011 * Get the next available transmit descriptor.
1012 */
1013 nexttx = MEC_NEXTTX(sc->sc_txlast);
1014 txd = &sc->sc_txdesc[nexttx];
1015 txs = &sc->sc_txsoft[nexttx];
1016 dmamap = txs->txs_dmamap;
1017 txs->txs_flags = 0;
1018
1019 buflen = 0;
1020 bufoff = 0;
1021 resid = 0;
1022 nptr = 0; /* XXX gcc */
1023 pseg = 0; /* XXX gcc */
1024
1025 len = m0->m_pkthdr.len;
1026
1027 DPRINTF(MEC_DEBUG_START,
1028 ("%s: len = %d, nexttx = %d, txpending = %d\n",
1029 __func__, len, nexttx, sc->sc_txpending));
1030
1031 if (len <= MEC_TXD_BUFSIZE) {
1032 /*
1033 * If a TX packet will fit into small txdesc buffer,
1034 * just copy it into there. Maybe it's faster than
1035 * checking alignment and calling bus_dma(9) etc.
1036 */
1037 DPRINTF(MEC_DEBUG_START, ("%s: short packet\n",
1038 __func__));
1039 IFQ_DEQUEUE(&ifp->if_snd, m0);
1040
1041 /*
1042 * I don't know if MEC chip does auto padding,
1043 * but do it manually for safety.
1044 */
1045 if (len < ETHER_PAD_LEN) {
1046 MEC_EVCNT_INCR(&sc->sc_ev_txdpad);
1047 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
1048 m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1049 memset(txd->txd_buf + bufoff + len, 0,
1050 ETHER_PAD_LEN - len);
1051 len = buflen = ETHER_PAD_LEN;
1052 } else {
1053 MEC_EVCNT_INCR(&sc->sc_ev_txdbuf);
1054 bufoff = MEC_TXD_BUFSTART(len);
1055 m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1056 buflen = len;
1057 }
1058 } else {
1059 /*
1060 * If the packet won't fit the static buffer in txdesc,
1061 * we have to use the concatenate pointers to handle it.
1062 */
1063 DPRINTF(MEC_DEBUG_START, ("%s: long packet\n",
1064 __func__));
1065 txs->txs_flags = MEC_TXS_TXDPTR;
1066
1067 /*
1068 * Call bus_dmamap_load_mbuf(9) first to see
1069 * how many chains the TX mbuf has.
1070 */
1071 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1072 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1073 if (error == 0) {
1074 /*
1075 * Check chains which might contain headers.
1076 * They might be so much fragmented and
1077 * it's better to copy them into txdesc buffer
1078 * since they would be small enough.
1079 */
1080 nsegs = dmamap->dm_nsegs;
1081 for (pseg = 0; pseg < nsegs; pseg++) {
1082 slen = dmamap->dm_segs[pseg].ds_len;
1083 if (buflen + slen >
1084 MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN)
1085 break;
1086 buflen += slen;
1087 }
1088 /*
1089 * Check if the rest chains can be fit into
1090 * the concatinate pointers.
1091 */
1092 align = dmamap->dm_segs[pseg].ds_addr &
1093 MEC_TXD_ALIGNMASK;
1094 if (align > 0) {
1095 /*
1096 * If the first chain isn't uint64_t
1097 * aligned, append the unaligned part
1098 * into txdesc buffer too.
1099 */
1100 resid = MEC_TXD_ALIGN - align;
1101 buflen += resid;
1102 for (; pseg < nsegs; pseg++) {
1103 slen =
1104 dmamap->dm_segs[pseg].ds_len;
1105 if (slen > resid)
1106 break;
1107 resid -= slen;
1108 }
1109 } else if (pseg == 0) {
1110 /*
1111 * In this case, the first chain is
1112 * uint64_t aligned but it's too long
1113 * to put into txdesc buf.
1114 * We have to put some data into
1115 * txdesc buf even in this case,
1116 * so put MEC_TXD_ALIGN bytes there.
1117 */
1118 buflen = resid = MEC_TXD_ALIGN;
1119 }
1120 nptr = nsegs - pseg;
1121 if (nptr <= MEC_NTXPTR) {
1122 bufoff = MEC_TXD_BUFSTART(buflen);
1123
1124 /*
1125 * Check if all the rest chains are
1126 * uint64_t aligned.
1127 */
1128 align = 0;
1129 for (i = pseg + 1; i < nsegs; i++)
1130 align |=
1131 dmamap->dm_segs[i].ds_addr
1132 & MEC_TXD_ALIGNMASK;
1133 if (align != 0) {
1134 /* chains are not aligned */
1135 error = -1;
1136 }
1137 } else {
1138 /* The TX mbuf chains doesn't fit. */
1139 error = -1;
1140 }
1141 if (error == -1)
1142 bus_dmamap_unload(sc->sc_dmat, dmamap);
1143 }
1144 if (error != 0) {
1145 /*
1146 * The TX mbuf chains can't be put into
1147 * the concatinate buffers. In this case,
1148 * we have to allocate a new contiguous mbuf
1149 * and copy data into it.
1150 *
1151 * Even in this case, the Ethernet header in
1152 * the TX mbuf might be unaligned and trailing
1153 * data might be word aligned, so put 2 byte
1154 * (MEC_ETHER_ALIGN) padding at the top of the
1155 * allocated mbuf and copy TX packets.
1156 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN)
1157 * at the top of the new mbuf won't be uint64_t
1158 * alignd, but we have to put some data into
1159 * txdesc buffer anyway even if the buffer
1160 * is uint64_t aligned.
1161 */
1162 DPRINTF(MEC_DEBUG_START|MEC_DEBUG_TXSEGS,
1163 ("%s: re-allocating mbuf\n", __func__));
1164
1165 MGETHDR(m, M_DONTWAIT, MT_DATA);
1166 if (m == NULL) {
1167 printf("%s: unable to allocate "
1168 "TX mbuf\n",
1169 device_xname(sc->sc_dev));
1170 break;
1171 }
1172 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1173 MCLGET(m, M_DONTWAIT);
1174 if ((m->m_flags & M_EXT) == 0) {
1175 printf("%s: unable to allocate "
1176 "TX cluster\n",
1177 device_xname(sc->sc_dev));
1178 m_freem(m);
1179 break;
1180 }
1181 }
1182 m->m_data += MEC_ETHER_ALIGN;
1183
1184 /*
1185 * Copy whole data (including unaligned part)
1186 * for following bpf_mtap().
1187 */
1188 m_copydata(m0, 0, len, mtod(m, void *));
1189 m->m_pkthdr.len = m->m_len = len;
1190 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1191 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1192 if (dmamap->dm_nsegs > 1) {
1193 /* should not happen, but for sanity */
1194 bus_dmamap_unload(sc->sc_dmat, dmamap);
1195 error = -1;
1196 }
1197 if (error != 0) {
1198 printf("%s: unable to load TX buffer, "
1199 "error = %d\n",
1200 device_xname(sc->sc_dev), error);
1201 m_freem(m);
1202 break;
1203 }
1204 /*
1205 * Only the first segment should be put into
1206 * the concatinate pointer in this case.
1207 */
1208 pseg = 0;
1209 nptr = 1;
1210
1211 /*
1212 * Set lenght of unaligned part which will be
1213 * copied into txdesc buffer.
1214 */
1215 buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN;
1216 bufoff = MEC_TXD_BUFSTART(buflen);
1217 resid = buflen;
1218 #ifdef MEC_EVENT_COUNTERS
1219 MEC_EVCNT_INCR(&sc->sc_ev_txmbuf);
1220 if (len <= 160)
1221 MEC_EVCNT_INCR(&sc->sc_ev_txmbufa);
1222 else if (len <= 256)
1223 MEC_EVCNT_INCR(&sc->sc_ev_txmbufb);
1224 else if (len <= 512)
1225 MEC_EVCNT_INCR(&sc->sc_ev_txmbufc);
1226 else if (len <= 1024)
1227 MEC_EVCNT_INCR(&sc->sc_ev_txmbufd);
1228 else
1229 MEC_EVCNT_INCR(&sc->sc_ev_txmbufe);
1230 #endif
1231 }
1232 #ifdef MEC_EVENT_COUNTERS
1233 else {
1234 MEC_EVCNT_INCR(&sc->sc_ev_txptrs);
1235 if (nptr == 1) {
1236 MEC_EVCNT_INCR(&sc->sc_ev_txptr1);
1237 if (len <= 160)
1238 MEC_EVCNT_INCR(
1239 &sc->sc_ev_txptr1a);
1240 else if (len <= 256)
1241 MEC_EVCNT_INCR(
1242 &sc->sc_ev_txptr1b);
1243 else if (len <= 512)
1244 MEC_EVCNT_INCR(
1245 &sc->sc_ev_txptr1c);
1246 else if (len <= 1024)
1247 MEC_EVCNT_INCR(
1248 &sc->sc_ev_txptr1d);
1249 else
1250 MEC_EVCNT_INCR(
1251 &sc->sc_ev_txptr1e);
1252 } else if (nptr == 2) {
1253 MEC_EVCNT_INCR(&sc->sc_ev_txptr2);
1254 if (len <= 160)
1255 MEC_EVCNT_INCR(
1256 &sc->sc_ev_txptr2a);
1257 else if (len <= 256)
1258 MEC_EVCNT_INCR(
1259 &sc->sc_ev_txptr2b);
1260 else if (len <= 512)
1261 MEC_EVCNT_INCR(
1262 &sc->sc_ev_txptr2c);
1263 else if (len <= 1024)
1264 MEC_EVCNT_INCR(
1265 &sc->sc_ev_txptr2d);
1266 else
1267 MEC_EVCNT_INCR(
1268 &sc->sc_ev_txptr2e);
1269 } else if (nptr == 3) {
1270 MEC_EVCNT_INCR(&sc->sc_ev_txptr3);
1271 if (len <= 160)
1272 MEC_EVCNT_INCR(
1273 &sc->sc_ev_txptr3a);
1274 else if (len <= 256)
1275 MEC_EVCNT_INCR(
1276 &sc->sc_ev_txptr3b);
1277 else if (len <= 512)
1278 MEC_EVCNT_INCR(
1279 &sc->sc_ev_txptr3c);
1280 else if (len <= 1024)
1281 MEC_EVCNT_INCR(
1282 &sc->sc_ev_txptr3d);
1283 else
1284 MEC_EVCNT_INCR(
1285 &sc->sc_ev_txptr3e);
1286 }
1287 if (pseg == 0)
1288 MEC_EVCNT_INCR(&sc->sc_ev_txptrc0);
1289 else if (pseg == 1)
1290 MEC_EVCNT_INCR(&sc->sc_ev_txptrc1);
1291 else if (pseg == 2)
1292 MEC_EVCNT_INCR(&sc->sc_ev_txptrc2);
1293 else if (pseg == 3)
1294 MEC_EVCNT_INCR(&sc->sc_ev_txptrc3);
1295 else if (pseg == 4)
1296 MEC_EVCNT_INCR(&sc->sc_ev_txptrc4);
1297 else if (pseg == 5)
1298 MEC_EVCNT_INCR(&sc->sc_ev_txptrc5);
1299 else
1300 MEC_EVCNT_INCR(&sc->sc_ev_txptrc6);
1301 if (buflen <= 8)
1302 MEC_EVCNT_INCR(&sc->sc_ev_txptrh0);
1303 else if (buflen <= 16)
1304 MEC_EVCNT_INCR(&sc->sc_ev_txptrh1);
1305 else if (buflen <= 32)
1306 MEC_EVCNT_INCR(&sc->sc_ev_txptrh2);
1307 else if (buflen <= 64)
1308 MEC_EVCNT_INCR(&sc->sc_ev_txptrh3);
1309 else if (buflen <= 80)
1310 MEC_EVCNT_INCR(&sc->sc_ev_txptrh4);
1311 else
1312 MEC_EVCNT_INCR(&sc->sc_ev_txptrh5);
1313 }
1314 #endif
1315 m_copydata(m0, 0, buflen, txd->txd_buf + bufoff);
1316
1317 IFQ_DEQUEUE(&ifp->if_snd, m0);
1318 if (m != NULL) {
1319 m_freem(m0);
1320 m0 = m;
1321 }
1322
1323 /*
1324 * sync the DMA map for TX mbuf
1325 */
1326 bus_dmamap_sync(sc->sc_dmat, dmamap, buflen,
1327 len - buflen, BUS_DMASYNC_PREWRITE);
1328 }
1329
1330 #if NBPFILTER > 0
1331 /*
1332 * Pass packet to bpf if there is a listener.
1333 */
1334 if (ifp->if_bpf)
1335 bpf_mtap(ifp->if_bpf, m0);
1336 #endif
1337 MEC_EVCNT_INCR(&sc->sc_ev_txpkts);
1338
1339 /*
1340 * setup the transmit descriptor.
1341 */
1342 txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1);
1343
1344 /*
1345 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1346 * if more than half txdescs have been queued
1347 * because TX_EMPTY interrupts will rarely happen
1348 * if TX queue is so stacked.
1349 */
1350 if (sc->sc_txpending > (MEC_NTXDESC / 2) &&
1351 (nexttx & (MEC_NTXDESC_INTR - 1)) == 0)
1352 txdcmd |= MEC_TXCMD_TXINT;
1353
1354 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1355 bus_dma_segment_t *segs = dmamap->dm_segs;
1356
1357 DPRINTF(MEC_DEBUG_TXSEGS,
1358 ("%s: nsegs = %d, pseg = %d, nptr = %d\n",
1359 __func__, dmamap->dm_nsegs, pseg, nptr));
1360
1361 switch (nptr) {
1362 case 3:
1363 KASSERT((segs[pseg + 2].ds_addr &
1364 MEC_TXD_ALIGNMASK) == 0);
1365 txdcmd |= MEC_TXCMD_PTR3;
1366 txd->txd_ptr[2] =
1367 TXPTR_LEN(segs[pseg + 2].ds_len - 1) |
1368 segs[pseg + 2].ds_addr;
1369 /* FALLTHROUGH */
1370 case 2:
1371 KASSERT((segs[pseg + 1].ds_addr &
1372 MEC_TXD_ALIGNMASK) == 0);
1373 txdcmd |= MEC_TXCMD_PTR2;
1374 txd->txd_ptr[1] =
1375 TXPTR_LEN(segs[pseg + 1].ds_len - 1) |
1376 segs[pseg + 1].ds_addr;
1377 /* FALLTHROUGH */
1378 case 1:
1379 txdcmd |= MEC_TXCMD_PTR1;
1380 txd->txd_ptr[0] =
1381 TXPTR_LEN(segs[pseg].ds_len - resid - 1) |
1382 (segs[pseg].ds_addr + resid);
1383 break;
1384 default:
1385 panic("%s: impossible nptr in %s",
1386 device_xname(sc->sc_dev), __func__);
1387 /* NOTREACHED */
1388 }
1389 /*
1390 * Store a pointer to the packet so we can
1391 * free it later.
1392 */
1393 txs->txs_mbuf = m0;
1394 } else {
1395 /*
1396 * In this case all data are copied to buffer in txdesc,
1397 * we can free TX mbuf here.
1398 */
1399 m_freem(m0);
1400 }
1401 txd->txd_cmd = txdcmd;
1402
1403 DPRINTF(MEC_DEBUG_START,
1404 ("%s: txd_cmd = 0x%016llx\n",
1405 __func__, txd->txd_cmd));
1406 DPRINTF(MEC_DEBUG_START,
1407 ("%s: txd_ptr[0] = 0x%016llx\n",
1408 __func__, txd->txd_ptr[0]));
1409 DPRINTF(MEC_DEBUG_START,
1410 ("%s: txd_ptr[1] = 0x%016llx\n",
1411 __func__, txd->txd_ptr[1]));
1412 DPRINTF(MEC_DEBUG_START,
1413 ("%s: txd_ptr[2] = 0x%016llx\n",
1414 __func__, txd->txd_ptr[2]));
1415 DPRINTF(MEC_DEBUG_START,
1416 ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1417 __func__, len, len, buflen, buflen));
1418
1419 /* sync TX descriptor */
1420 MEC_TXDESCSYNC(sc, nexttx,
1421 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1422
1423 /* start TX */
1424 bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx));
1425
1426 /* advance the TX pointer. */
1427 sc->sc_txpending++;
1428 sc->sc_txlast = nexttx;
1429 }
1430
1431 if (sc->sc_txpending == MEC_NTXDESC - 1) {
1432 /* No more slots; notify upper layer. */
1433 MEC_EVCNT_INCR(&sc->sc_ev_txdstall);
1434 ifp->if_flags |= IFF_OACTIVE;
1435 }
1436
1437 if (sc->sc_txpending != opending) {
1438 /*
1439 * If the transmitter was idle,
1440 * reset the txdirty pointer and re-enable TX interrupt.
1441 */
1442 if (opending == 0) {
1443 sc->sc_txdirty = firsttx;
1444 bus_space_write_8(st, sh, MEC_TX_ALIAS,
1445 MEC_TX_ALIAS_INT_ENABLE);
1446 }
1447
1448 /* Set a watchdog timer in case the chip flakes out. */
1449 ifp->if_timer = 5;
1450 }
1451 }
1452
1453 static void
1454 mec_stop(struct ifnet *ifp, int disable)
1455 {
1456 struct mec_softc *sc = ifp->if_softc;
1457 struct mec_txsoft *txs;
1458 int i;
1459
1460 DPRINTF(MEC_DEBUG_STOP, ("%s\n", __func__));
1461
1462 ifp->if_timer = 0;
1463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1464
1465 callout_stop(&sc->sc_tick_ch);
1466 mii_down(&sc->sc_mii);
1467
1468 /* release any TX buffers */
1469 for (i = 0; i < MEC_NTXDESC; i++) {
1470 txs = &sc->sc_txsoft[i];
1471 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1472 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1473 m_freem(txs->txs_mbuf);
1474 txs->txs_mbuf = NULL;
1475 }
1476 }
1477 }
1478
1479 static int
1480 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1481 {
1482 int s, error;
1483
1484 s = splnet();
1485
1486 error = ether_ioctl(ifp, cmd, data);
1487 if (error == ENETRESET) {
1488 /*
1489 * Multicast list has changed; set the hardware filter
1490 * accordingly.
1491 */
1492 if (ifp->if_flags & IFF_RUNNING)
1493 error = mec_init(ifp);
1494 else
1495 error = 0;
1496 }
1497
1498 /* Try to get more packets going. */
1499 mec_start(ifp);
1500
1501 splx(s);
1502 return error;
1503 }
1504
1505 static void
1506 mec_watchdog(struct ifnet *ifp)
1507 {
1508 struct mec_softc *sc = ifp->if_softc;
1509
1510 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1511 ifp->if_oerrors++;
1512
1513 mec_init(ifp);
1514 }
1515
1516 static void
1517 mec_tick(void *arg)
1518 {
1519 struct mec_softc *sc = arg;
1520 int s;
1521
1522 s = splnet();
1523 mii_tick(&sc->sc_mii);
1524 splx(s);
1525
1526 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1527 }
1528
1529 static void
1530 mec_setfilter(struct mec_softc *sc)
1531 {
1532 struct ethercom *ec = &sc->sc_ethercom;
1533 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1534 struct ether_multi *enm;
1535 struct ether_multistep step;
1536 bus_space_tag_t st = sc->sc_st;
1537 bus_space_handle_t sh = sc->sc_sh;
1538 uint64_t mchash;
1539 uint32_t control, hash;
1540 int mcnt;
1541
1542 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1543 control &= ~MEC_MAC_FILTER_MASK;
1544
1545 if (ifp->if_flags & IFF_PROMISC) {
1546 control |= MEC_MAC_FILTER_PROMISC;
1547 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1548 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1549 return;
1550 }
1551
1552 mcnt = 0;
1553 mchash = 0;
1554 ETHER_FIRST_MULTI(step, ec, enm);
1555 while (enm != NULL) {
1556 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1557 /* set allmulti for a range of multicast addresses */
1558 control |= MEC_MAC_FILTER_ALLMULTI;
1559 bus_space_write_8(st, sh, MEC_MULTICAST,
1560 0xffffffffffffffffULL);
1561 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1562 return;
1563 }
1564
1565 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1566
1567 hash = mec_calchash(enm->enm_addrlo);
1568 mchash |= 1 << hash;
1569 mcnt++;
1570 ETHER_NEXT_MULTI(step, enm);
1571 }
1572
1573 ifp->if_flags &= ~IFF_ALLMULTI;
1574
1575 if (mcnt > 0)
1576 control |= MEC_MAC_FILTER_MATCHMULTI;
1577
1578 bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1579 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1580 }
1581
1582 static int
1583 mec_intr(void *arg)
1584 {
1585 struct mec_softc *sc = arg;
1586 bus_space_tag_t st = sc->sc_st;
1587 bus_space_handle_t sh = sc->sc_sh;
1588 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1589 uint32_t statreg, statack, txptr;
1590 int handled, sent;
1591
1592 DPRINTF(MEC_DEBUG_INTR, ("%s: called\n", __func__));
1593
1594 handled = sent = 0;
1595
1596 for (;;) {
1597 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1598
1599 DPRINTF(MEC_DEBUG_INTR,
1600 ("%s: INT_STAT = 0x%08x\n", __func__, statreg));
1601
1602 statack = statreg & MEC_INT_STATUS_MASK;
1603 if (statack == 0)
1604 break;
1605 bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1606
1607 handled = 1;
1608
1609 if (statack &
1610 (MEC_INT_RX_THRESHOLD |
1611 MEC_INT_RX_FIFO_UNDERFLOW)) {
1612 mec_rxintr(sc);
1613 }
1614
1615 if (statack &
1616 (MEC_INT_TX_EMPTY |
1617 MEC_INT_TX_PACKET_SENT |
1618 MEC_INT_TX_ABORT)) {
1619 txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1620 >> MEC_INT_TX_RING_BUFFER_SHIFT;
1621 mec_txintr(sc, txptr);
1622 sent = 1;
1623 if ((statack & MEC_INT_TX_EMPTY) != 0) {
1624 /*
1625 * disable TX interrupt to stop
1626 * TX empty interrupt
1627 */
1628 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1629 DPRINTF(MEC_DEBUG_INTR,
1630 ("%s: disable TX_INT\n", __func__));
1631 }
1632 #ifdef MEC_EVENT_COUNTERS
1633 if ((statack & MEC_INT_TX_EMPTY) != 0)
1634 MEC_EVCNT_INCR(&sc->sc_ev_txempty);
1635 if ((statack & MEC_INT_TX_PACKET_SENT) != 0)
1636 MEC_EVCNT_INCR(&sc->sc_ev_txsent);
1637 #endif
1638 }
1639
1640 if (statack &
1641 (MEC_INT_TX_LINK_FAIL |
1642 MEC_INT_TX_MEM_ERROR |
1643 MEC_INT_TX_ABORT |
1644 MEC_INT_RX_FIFO_UNDERFLOW |
1645 MEC_INT_RX_DMA_UNDERFLOW)) {
1646 printf("%s: %s: interrupt status = 0x%08x\n",
1647 device_xname(sc->sc_dev), __func__, statreg);
1648 mec_init(ifp);
1649 break;
1650 }
1651 }
1652
1653 if (sent && !IFQ_IS_EMPTY(&ifp->if_snd)) {
1654 /* try to get more packets going */
1655 mec_start(ifp);
1656 }
1657
1658 #if NRND > 0
1659 if (handled)
1660 rnd_add_uint32(&sc->sc_rnd_source, statreg);
1661 #endif
1662
1663 return handled;
1664 }
1665
1666 static void
1667 mec_rxintr(struct mec_softc *sc)
1668 {
1669 bus_space_tag_t st = sc->sc_st;
1670 bus_space_handle_t sh = sc->sc_sh;
1671 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1672 struct mbuf *m;
1673 struct mec_rxdesc *rxd;
1674 uint64_t rxstat;
1675 u_int len;
1676 int i;
1677 uint32_t crc;
1678
1679 DPRINTF(MEC_DEBUG_RXINTR, ("%s: called\n", __func__));
1680
1681 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1682 rxd = &sc->sc_rxdesc[i];
1683
1684 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1685 rxstat = rxd->rxd_stat;
1686
1687 DPRINTF(MEC_DEBUG_RXINTR,
1688 ("%s: rxstat = 0x%016llx, rxptr = %d\n",
1689 __func__, rxstat, i));
1690 DPRINTF(MEC_DEBUG_RXINTR, ("%s: rxfifo = 0x%08x\n",
1691 __func__, (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1692
1693 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1694 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1695 break;
1696 }
1697
1698 len = rxstat & MEC_RXSTAT_LEN;
1699
1700 if (len < ETHER_MIN_LEN ||
1701 len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1702 /* invalid length packet; drop it. */
1703 DPRINTF(MEC_DEBUG_RXINTR,
1704 ("%s: wrong packet\n", __func__));
1705 dropit:
1706 ifp->if_ierrors++;
1707 rxd->rxd_stat = 0;
1708 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1709 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1710 MEC_CDRXADDR(sc, i));
1711 continue;
1712 }
1713
1714 /*
1715 * If 802.1Q VLAN MTU is enabled, ignore the bad packet errror.
1716 */
1717 if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0)
1718 rxstat &= ~MEC_RXSTAT_BADPACKET;
1719
1720 if (rxstat &
1721 (MEC_RXSTAT_BADPACKET |
1722 MEC_RXSTAT_LONGEVENT |
1723 MEC_RXSTAT_INVALID |
1724 MEC_RXSTAT_CRCERROR |
1725 MEC_RXSTAT_VIOLATION)) {
1726 printf("%s: %s: status = 0x%016llx\n",
1727 device_xname(sc->sc_dev), __func__, rxstat);
1728 goto dropit;
1729 }
1730
1731 /*
1732 * The MEC includes the CRC with every packet. Trim
1733 * it off here.
1734 */
1735 len -= ETHER_CRC_LEN;
1736
1737 /*
1738 * now allocate an mbuf (and possibly a cluster) to hold
1739 * the received packet.
1740 */
1741 MGETHDR(m, M_DONTWAIT, MT_DATA);
1742 if (m == NULL) {
1743 printf("%s: unable to allocate RX mbuf\n",
1744 device_xname(sc->sc_dev));
1745 goto dropit;
1746 }
1747 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1748 MCLGET(m, M_DONTWAIT);
1749 if ((m->m_flags & M_EXT) == 0) {
1750 printf("%s: unable to allocate RX cluster\n",
1751 device_xname(sc->sc_dev));
1752 m_freem(m);
1753 m = NULL;
1754 goto dropit;
1755 }
1756 }
1757
1758 /*
1759 * Note MEC chip seems to insert 2 byte padding at the top of
1760 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1761 */
1762 MEC_RXBUFSYNC(sc, i, len + ETHER_CRC_LEN, BUS_DMASYNC_POSTREAD);
1763 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1764 crc = be32dec(rxd->rxd_buf + MEC_ETHER_ALIGN + len);
1765 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1766 m->m_data += MEC_ETHER_ALIGN;
1767
1768 /* put RX buffer into FIFO again */
1769 rxd->rxd_stat = 0;
1770 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1771 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1772
1773 m->m_pkthdr.rcvif = ifp;
1774 m->m_pkthdr.len = m->m_len = len;
1775 if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0)
1776 mec_rxcsum(sc, m, RXSTAT_CKSUM(rxstat), crc);
1777
1778 ifp->if_ipackets++;
1779
1780 #if NBPFILTER > 0
1781 /*
1782 * Pass this up to any BPF listeners, but only
1783 * pass it up the stack if it's for us.
1784 */
1785 if (ifp->if_bpf)
1786 bpf_mtap(ifp->if_bpf, m);
1787 #endif
1788
1789 /* Pass it on. */
1790 (*ifp->if_input)(ifp, m);
1791 }
1792
1793 /* update RX pointer */
1794 sc->sc_rxptr = i;
1795 }
1796
1797 static void
1798 mec_rxcsum(struct mec_softc *sc, struct mbuf *m, uint16_t rxcsum, uint32_t crc)
1799 {
1800 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1801 struct ether_header *eh;
1802 struct ip *ip;
1803 struct udphdr *uh;
1804 u_int len, pktlen, hlen;
1805 uint32_t csum_data, dsum;
1806 int csum_flags;
1807 const uint16_t *dp;
1808
1809 csum_data = 0;
1810 csum_flags = 0;
1811
1812 len = m->m_len;
1813 if (len < ETHER_HDR_LEN + sizeof(struct ip))
1814 goto out;
1815 pktlen = len - ETHER_HDR_LEN;
1816 eh = mtod(m, struct ether_header *);
1817 if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1818 goto out;
1819 ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN);
1820 if (ip->ip_v != IPVERSION)
1821 goto out;
1822
1823 hlen = ip->ip_hl << 2;
1824 if (hlen < sizeof(struct ip))
1825 goto out;
1826
1827 /*
1828 * Bail if too short, has random trailing garbage, truncated,
1829 * fragment, or has ethernet pad.
1830 */
1831 if (ntohs(ip->ip_len) < hlen ||
1832 ntohs(ip->ip_len) != pktlen ||
1833 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0)
1834 goto out;
1835
1836 switch (ip->ip_p) {
1837 case IPPROTO_TCP:
1838 if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 ||
1839 pktlen < (hlen + sizeof(struct tcphdr)))
1840 goto out;
1841 csum_flags = M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1842 break;
1843 case IPPROTO_UDP:
1844 if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 ||
1845 pktlen < (hlen + sizeof(struct udphdr)))
1846 goto out;
1847 uh = (struct udphdr *)((uint8_t *)ip + hlen);
1848 if (uh->uh_sum == 0)
1849 goto out; /* no checksum */
1850 csum_flags = M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1851 break;
1852 default:
1853 goto out;
1854 }
1855
1856 /*
1857 * The computed checksum includes Ethernet header, IP headers,
1858 * and CRC, so we have to deduct them.
1859 * Note IP header cksum should be 0xffff so we don't have to
1860 * dedecut them.
1861 */
1862 dsum = 0;
1863
1864 /* deduct Ethernet header */
1865 dp = (const uint16_t *)eh;
1866 for (hlen = 0; hlen < (ETHER_HDR_LEN / sizeof(uint16_t)); hlen++)
1867 dsum += ntohs(*dp++);
1868
1869 /* deduct CRC */
1870 if (len & 1) {
1871 dsum += (crc >> 24) & 0x00ff;
1872 dsum += (crc >> 8) & 0xffff;
1873 dsum += (crc << 8) & 0xff00;
1874 } else {
1875 dsum += (crc >> 16) & 0xffff;
1876 dsum += (crc >> 0) & 0xffff;
1877 }
1878 while (dsum >> 16)
1879 dsum = (dsum >> 16) + (dsum & 0xffff);
1880
1881 csum_data = rxcsum;
1882 csum_data += (uint16_t)~dsum;
1883
1884 while (csum_data >> 16)
1885 csum_data = (csum_data >> 16) + (csum_data & 0xffff);
1886
1887 out:
1888 m->m_pkthdr.csum_flags = csum_flags;
1889 m->m_pkthdr.csum_data = csum_data;
1890 }
1891
1892 static void
1893 mec_txintr(struct mec_softc *sc, uint32_t txptr)
1894 {
1895 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1896 struct mec_txdesc *txd;
1897 struct mec_txsoft *txs;
1898 bus_dmamap_t dmamap;
1899 uint64_t txstat;
1900 int i;
1901 u_int col;
1902
1903 DPRINTF(MEC_DEBUG_TXINTR, ("%s: called\n", __func__));
1904
1905 for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1906 i = MEC_NEXTTX(i), sc->sc_txpending--) {
1907 txd = &sc->sc_txdesc[i];
1908
1909 MEC_TXCMDSYNC(sc, i,
1910 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1911
1912 txstat = txd->txd_stat;
1913 DPRINTF(MEC_DEBUG_TXINTR,
1914 ("%s: dirty = %d, txstat = 0x%016llx\n",
1915 __func__, i, txstat));
1916 if ((txstat & MEC_TXSTAT_SENT) == 0) {
1917 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1918 break;
1919 }
1920
1921 txs = &sc->sc_txsoft[i];
1922 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1923 dmamap = txs->txs_dmamap;
1924 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1925 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1926 bus_dmamap_unload(sc->sc_dmat, dmamap);
1927 m_freem(txs->txs_mbuf);
1928 txs->txs_mbuf = NULL;
1929 }
1930
1931 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1932 ifp->if_collisions += col;
1933
1934 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1935 printf("%s: TX error: txstat = 0x%016llx\n",
1936 device_xname(sc->sc_dev), txstat);
1937 ifp->if_oerrors++;
1938 } else
1939 ifp->if_opackets++;
1940 }
1941
1942 /* update the dirty TX buffer pointer */
1943 sc->sc_txdirty = i;
1944 DPRINTF(MEC_DEBUG_INTR,
1945 ("%s: sc_txdirty = %2d, sc_txpending = %2d\n",
1946 __func__, sc->sc_txdirty, sc->sc_txpending));
1947
1948 /* cancel the watchdog timer if there are no pending TX packets */
1949 if (sc->sc_txpending == 0)
1950 ifp->if_timer = 0;
1951 if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD)
1952 ifp->if_flags &= ~IFF_OACTIVE;
1953 }
1954
1955 static void
1956 mec_shutdown(void *arg)
1957 {
1958 struct mec_softc *sc = arg;
1959
1960 mec_stop(&sc->sc_ethercom.ec_if, 1);
1961 /* make sure to stop DMA etc. */
1962 mec_reset(sc);
1963 }
1964