if_mec.c revision 1.43 1 /* $NetBSD: if_mec.c,v 1.43 2010/04/05 07:19:32 joerg Exp $ */
2
3 /*-
4 * Copyright (c) 2004, 2008 Izumi Tsutsui. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed for the
42 * NetBSD Project. See http://www.NetBSD.org/ for
43 * information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 /*
60 * MACE MAC-110 Ethernet driver
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.43 2010/04/05 07:19:32 joerg Exp $");
65
66 #include "opt_ddb.h"
67 #include "rnd.h"
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/device.h>
72 #include <sys/callout.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/kernel.h>
76 #include <sys/socket.h>
77 #include <sys/ioctl.h>
78 #include <sys/errno.h>
79
80 #if NRND > 0
81 #include <sys/rnd.h>
82 #endif
83
84 #include <net/if.h>
85 #include <net/if_dl.h>
86 #include <net/if_media.h>
87 #include <net/if_ether.h>
88
89 #include <netinet/in.h>
90 #include <netinet/in_systm.h>
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
93 #include <netinet/udp.h>
94
95 #include <net/bpf.h>
96
97 #include <machine/bus.h>
98 #include <machine/intr.h>
99 #include <machine/machtype.h>
100
101 #include <dev/mii/mii.h>
102 #include <dev/mii/miivar.h>
103
104 #include <sgimips/mace/macevar.h>
105 #include <sgimips/mace/if_mecreg.h>
106
107 #include <dev/arcbios/arcbios.h>
108 #include <dev/arcbios/arcbiosvar.h>
109
110 /* #define MEC_DEBUG */
111
112 #ifdef MEC_DEBUG
113 #define MEC_DEBUG_RESET 0x01
114 #define MEC_DEBUG_START 0x02
115 #define MEC_DEBUG_STOP 0x04
116 #define MEC_DEBUG_INTR 0x08
117 #define MEC_DEBUG_RXINTR 0x10
118 #define MEC_DEBUG_TXINTR 0x20
119 #define MEC_DEBUG_TXSEGS 0x40
120 uint32_t mec_debug = 0;
121 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
122 #else
123 #define DPRINTF(x, y) /* nothing */
124 #endif
125
126 /* #define MEC_EVENT_COUNTERS */
127
128 #ifdef MEC_EVENT_COUNTERS
129 #define MEC_EVCNT_INCR(ev) (ev)->ev_count++
130 #else
131 #define MEC_EVCNT_INCR(ev) do {} while (/* CONSTCOND */ 0)
132 #endif
133
134 /*
135 * Transmit descriptor list size
136 */
137 #define MEC_NTXDESC 64
138 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
139 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
140 #define MEC_NTXDESC_RSVD 4
141 #define MEC_NTXDESC_INTR 8
142
143 /*
144 * software state for TX
145 */
146 struct mec_txsoft {
147 struct mbuf *txs_mbuf; /* head of our mbuf chain */
148 bus_dmamap_t txs_dmamap; /* our DMA map */
149 uint32_t txs_flags;
150 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
151 #define MEC_TXS_TXDPTR 0x00000080 /* concat txd_ptr is used */
152 };
153
154 /*
155 * Transmit buffer descriptor
156 */
157 #define MEC_TXDESCSIZE 128
158 #define MEC_NTXPTR 3
159 #define MEC_TXD_BUFOFFSET sizeof(uint64_t)
160 #define MEC_TXD_BUFOFFSET1 \
161 (sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR)
162 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
163 #define MEC_TXD_BUFSIZE1 (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1)
164 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
165 #define MEC_TXD_ALIGN 8
166 #define MEC_TXD_ALIGNMASK (MEC_TXD_ALIGN - 1)
167 #define MEC_TXD_ROUNDUP(addr) \
168 (((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK)
169 #define MEC_NTXSEG 16
170
171 struct mec_txdesc {
172 volatile uint64_t txd_cmd;
173 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
174 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
175 #define TXCMD_BUFSTART(x) ((x) << 16)
176 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
177 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
178 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
179 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
180 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
181 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
182
183 #define txd_stat txd_cmd
184 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
185 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
186 #define MEC_TXSTAT_COLCNT_SHIFT 16
187 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
188 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
189 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
190 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
191 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
192 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
193 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
194 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
195 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
196 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
197 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
198
199 union {
200 uint64_t txptr[MEC_NTXPTR];
201 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
202 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
203 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
204 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
205 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
206
207 uint8_t txbuf[MEC_TXD_BUFSIZE];
208 } txd_data;
209 #define txd_ptr txd_data.txptr
210 #define txd_buf txd_data.txbuf
211 };
212
213 /*
214 * Receive buffer size
215 */
216 #define MEC_NRXDESC 16
217 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
218 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
219
220 /*
221 * Receive buffer description
222 */
223 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
224 #define MEC_RXD_NRXPAD 3
225 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
226 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
227 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
228
229 struct mec_rxdesc {
230 volatile uint64_t rxd_stat;
231 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
232 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
233 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
234 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
235 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
236 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
237 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
238 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
239 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
240 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
241 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
242 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
243 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
244 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
245 #define RXSTAT_CKSUM(x) (((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32)
246 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
247 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
248 uint64_t rxd_pad1[MEC_RXD_NRXPAD];
249 uint8_t rxd_buf[MEC_RXD_BUFSIZE];
250 };
251
252 /*
253 * control structures for DMA ops
254 */
255 struct mec_control_data {
256 /*
257 * TX descriptors and buffers
258 */
259 struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
260
261 /*
262 * RX descriptors and buffers
263 */
264 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
265 };
266
267 /*
268 * It _seems_ there are some restrictions on descriptor address:
269 *
270 * - Base address of txdescs should be 8kbyte aligned
271 * - Each txdesc should be 128byte aligned
272 * - Each rxdesc should be 4kbyte aligned
273 *
274 * So we should specify 8k align to allocalte txdescs.
275 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
276 * so rxdescs are also allocated at 4kbyte aligned.
277 */
278 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
279
280 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
281 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
282 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
283
284 /*
285 * software state per device
286 */
287 struct mec_softc {
288 device_t sc_dev; /* generic device structures */
289
290 bus_space_tag_t sc_st; /* bus_space tag */
291 bus_space_handle_t sc_sh; /* bus_space handle */
292 bus_dma_tag_t sc_dmat; /* bus_dma tag */
293
294 struct ethercom sc_ethercom; /* Ethernet common part */
295
296 struct mii_data sc_mii; /* MII/media information */
297 int sc_phyaddr; /* MII address */
298 struct callout sc_tick_ch; /* tick callout */
299
300 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
301
302 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
303 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
304
305 /* pointer to allocated control data */
306 struct mec_control_data *sc_control_data;
307 #define sc_txdesc sc_control_data->mcd_txdesc
308 #define sc_rxdesc sc_control_data->mcd_rxdesc
309
310 /* software state for TX descs */
311 struct mec_txsoft sc_txsoft[MEC_NTXDESC];
312
313 int sc_txpending; /* number of TX requests pending */
314 int sc_txdirty; /* first dirty TX descriptor */
315 int sc_txlast; /* last used TX descriptor */
316
317 int sc_rxptr; /* next ready RX buffer */
318
319 #if NRND > 0
320 rndsource_element_t sc_rnd_source; /* random source */
321 #endif
322 #ifdef MEC_EVENT_COUNTERS
323 struct evcnt sc_ev_txpkts; /* TX packets queued total */
324 struct evcnt sc_ev_txdpad; /* TX packets padded in txdesc buf */
325 struct evcnt sc_ev_txdbuf; /* TX packets copied to txdesc buf */
326 struct evcnt sc_ev_txptr1; /* TX packets using concat ptr1 */
327 struct evcnt sc_ev_txptr1a; /* TX packets w/ptr1 ~160bytes */
328 struct evcnt sc_ev_txptr1b; /* TX packets w/ptr1 ~256bytes */
329 struct evcnt sc_ev_txptr1c; /* TX packets w/ptr1 ~512bytes */
330 struct evcnt sc_ev_txptr1d; /* TX packets w/ptr1 ~1024bytes */
331 struct evcnt sc_ev_txptr1e; /* TX packets w/ptr1 >1024bytes */
332 struct evcnt sc_ev_txptr2; /* TX packets using concat ptr1,2 */
333 struct evcnt sc_ev_txptr2a; /* TX packets w/ptr2 ~160bytes */
334 struct evcnt sc_ev_txptr2b; /* TX packets w/ptr2 ~256bytes */
335 struct evcnt sc_ev_txptr2c; /* TX packets w/ptr2 ~512bytes */
336 struct evcnt sc_ev_txptr2d; /* TX packets w/ptr2 ~1024bytes */
337 struct evcnt sc_ev_txptr2e; /* TX packets w/ptr2 >1024bytes */
338 struct evcnt sc_ev_txptr3; /* TX packets using concat ptr1,2,3 */
339 struct evcnt sc_ev_txptr3a; /* TX packets w/ptr3 ~160bytes */
340 struct evcnt sc_ev_txptr3b; /* TX packets w/ptr3 ~256bytes */
341 struct evcnt sc_ev_txptr3c; /* TX packets w/ptr3 ~512bytes */
342 struct evcnt sc_ev_txptr3d; /* TX packets w/ptr3 ~1024bytes */
343 struct evcnt sc_ev_txptr3e; /* TX packets w/ptr3 >1024bytes */
344 struct evcnt sc_ev_txmbuf; /* TX packets copied to new mbufs */
345 struct evcnt sc_ev_txmbufa; /* TX packets w/mbuf ~160bytes */
346 struct evcnt sc_ev_txmbufb; /* TX packets w/mbuf ~256bytes */
347 struct evcnt sc_ev_txmbufc; /* TX packets w/mbuf ~512bytes */
348 struct evcnt sc_ev_txmbufd; /* TX packets w/mbuf ~1024bytes */
349 struct evcnt sc_ev_txmbufe; /* TX packets w/mbuf >1024bytes */
350 struct evcnt sc_ev_txptrs; /* TX packets using ptrs total */
351 struct evcnt sc_ev_txptrc0; /* TX packets w/ptrs no hdr chain */
352 struct evcnt sc_ev_txptrc1; /* TX packets w/ptrs 1 hdr chain */
353 struct evcnt sc_ev_txptrc2; /* TX packets w/ptrs 2 hdr chains */
354 struct evcnt sc_ev_txptrc3; /* TX packets w/ptrs 3 hdr chains */
355 struct evcnt sc_ev_txptrc4; /* TX packets w/ptrs 4 hdr chains */
356 struct evcnt sc_ev_txptrc5; /* TX packets w/ptrs 5 hdr chains */
357 struct evcnt sc_ev_txptrc6; /* TX packets w/ptrs >5 hdr chains */
358 struct evcnt sc_ev_txptrh0; /* TX packets w/ptrs ~8bytes hdr */
359 struct evcnt sc_ev_txptrh1; /* TX packets w/ptrs ~16bytes hdr */
360 struct evcnt sc_ev_txptrh2; /* TX packets w/ptrs ~32bytes hdr */
361 struct evcnt sc_ev_txptrh3; /* TX packets w/ptrs ~64bytes hdr */
362 struct evcnt sc_ev_txptrh4; /* TX packets w/ptrs ~80bytes hdr */
363 struct evcnt sc_ev_txptrh5; /* TX packets w/ptrs ~96bytes hdr */
364 struct evcnt sc_ev_txdstall; /* TX stalled due to no txdesc */
365 struct evcnt sc_ev_txempty; /* TX empty interrupts */
366 struct evcnt sc_ev_txsent; /* TX sent interrupts */
367 #endif
368 };
369
370 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
371 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
372
373 #define MEC_TXDESCSYNC(sc, x, ops) \
374 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
375 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
376 #define MEC_TXCMDSYNC(sc, x, ops) \
377 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
378 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
379
380 #define MEC_RXSTATSYNC(sc, x, ops) \
381 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
382 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
383 #define MEC_RXBUFSYNC(sc, x, len, ops) \
384 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
385 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
386 MEC_ETHER_ALIGN + (len), (ops))
387
388 /* XXX these values should be moved to <net/if_ether.h> ? */
389 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
390 #define MEC_ETHER_ALIGN 2
391
392 static int mec_match(device_t, cfdata_t, void *);
393 static void mec_attach(device_t, device_t, void *);
394
395 static int mec_mii_readreg(device_t, int, int);
396 static void mec_mii_writereg(device_t, int, int, int);
397 static int mec_mii_wait(struct mec_softc *);
398 static void mec_statchg(device_t);
399
400 static void enaddr_aton(const char *, uint8_t *);
401
402 static int mec_init(struct ifnet * ifp);
403 static void mec_start(struct ifnet *);
404 static void mec_watchdog(struct ifnet *);
405 static void mec_tick(void *);
406 static int mec_ioctl(struct ifnet *, u_long, void *);
407 static void mec_reset(struct mec_softc *);
408 static void mec_setfilter(struct mec_softc *);
409 static int mec_intr(void *arg);
410 static void mec_stop(struct ifnet *, int);
411 static void mec_rxintr(struct mec_softc *);
412 static void mec_rxcsum(struct mec_softc *, struct mbuf *, uint16_t,
413 uint32_t);
414 static void mec_txintr(struct mec_softc *, uint32_t);
415 static bool mec_shutdown(device_t, int);
416
417 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
418 mec_match, mec_attach, NULL, NULL);
419
420 static int mec_matched = 0;
421
422 static int
423 mec_match(device_t parent, cfdata_t cf, void *aux)
424 {
425
426 /* allow only one device */
427 if (mec_matched)
428 return 0;
429
430 mec_matched = 1;
431 return 1;
432 }
433
434 static void
435 mec_attach(device_t parent, device_t self, void *aux)
436 {
437 struct mec_softc *sc = device_private(self);
438 struct mace_attach_args *maa = aux;
439 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
440 uint64_t address, command;
441 const char *macaddr;
442 struct mii_softc *child;
443 bus_dma_segment_t seg;
444 int i, err, rseg;
445 bool mac_is_fake;
446
447 sc->sc_dev = self;
448 sc->sc_st = maa->maa_st;
449 if (bus_space_subregion(sc->sc_st, maa->maa_sh,
450 maa->maa_offset, 0, &sc->sc_sh) != 0) {
451 aprint_error(": can't map i/o space\n");
452 return;
453 }
454
455 /* set up DMA structures */
456 sc->sc_dmat = maa->maa_dmat;
457
458 /*
459 * Allocate the control data structures, and create and load the
460 * DMA map for it.
461 */
462 if ((err = bus_dmamem_alloc(sc->sc_dmat,
463 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
464 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
465 aprint_error(": unable to allocate control data, error = %d\n",
466 err);
467 goto fail_0;
468 }
469 /*
470 * XXX needs re-think...
471 * control data structures contain whole RX data buffer, so
472 * BUS_DMA_COHERENT (which disables cache) may cause some performance
473 * issue on copying data from the RX buffer to mbuf on normal memory,
474 * though we have to make sure all bus_dmamap_sync(9) ops are called
475 * properly in that case.
476 */
477 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
478 sizeof(struct mec_control_data),
479 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
480 aprint_error(": unable to map control data, error = %d\n", err);
481 goto fail_1;
482 }
483 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
484
485 if ((err = bus_dmamap_create(sc->sc_dmat,
486 sizeof(struct mec_control_data), 1,
487 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
488 aprint_error(": unable to create control data DMA map,"
489 " error = %d\n", err);
490 goto fail_2;
491 }
492 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
493 sc->sc_control_data, sizeof(struct mec_control_data), NULL,
494 BUS_DMA_NOWAIT)) != 0) {
495 aprint_error(": unable to load control data DMA map,"
496 " error = %d\n", err);
497 goto fail_3;
498 }
499
500 /* create TX buffer DMA maps */
501 for (i = 0; i < MEC_NTXDESC; i++) {
502 if ((err = bus_dmamap_create(sc->sc_dmat,
503 MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0,
504 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
505 aprint_error(": unable to create tx DMA map %d,"
506 " error = %d\n", i, err);
507 goto fail_4;
508 }
509 }
510
511 callout_init(&sc->sc_tick_ch, 0);
512
513 /* get Ethernet address from ARCBIOS */
514 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
515 aprint_error(": unable to get MAC address!\n");
516 goto fail_4;
517 }
518 /*
519 * On some machines the DS2502 chip storing the serial number/
520 * mac address is on the pci riser board - if this board is
521 * missing, ARCBIOS will not know a good ethernet address (but
522 * otherwise the machine will work fine).
523 */
524 mac_is_fake = false;
525 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
526 uint32_t ui = 0;
527 const char * netaddr =
528 ARCBIOS->GetEnvironmentVariable("netaddr");
529
530 /*
531 * Create a MAC address by abusing the "netaddr" env var
532 */
533 sc->sc_enaddr[0] = 0xf2;
534 sc->sc_enaddr[1] = 0x0b;
535 sc->sc_enaddr[2] = 0xa4;
536 if (netaddr) {
537 mac_is_fake = true;
538 while (*netaddr) {
539 int v = 0;
540 while (*netaddr && *netaddr != '.') {
541 if (*netaddr >= '0' && *netaddr <= '9')
542 v = v*10 + (*netaddr - '0');
543 netaddr++;
544 }
545 ui <<= 8;
546 ui |= v;
547 if (*netaddr == '.')
548 netaddr++;
549 }
550 }
551 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
552 }
553 if (!mac_is_fake)
554 enaddr_aton(macaddr, sc->sc_enaddr);
555
556 /* set the Ethernet address */
557 address = 0;
558 for (i = 0; i < ETHER_ADDR_LEN; i++) {
559 address = address << 8;
560 address |= sc->sc_enaddr[i];
561 }
562 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
563
564 /* reset device */
565 mec_reset(sc);
566
567 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
568
569 aprint_normal(": MAC-110 Ethernet, rev %u\n",
570 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
571
572 if (mac_is_fake)
573 aprint_normal_dev(self,
574 "could not get ethernet address from firmware"
575 " - generated one from the \"netaddr\" environment"
576 " variable\n");
577 aprint_normal_dev(self, "Ethernet address %s\n",
578 ether_sprintf(sc->sc_enaddr));
579
580 /* Done, now attach everything */
581
582 sc->sc_mii.mii_ifp = ifp;
583 sc->sc_mii.mii_readreg = mec_mii_readreg;
584 sc->sc_mii.mii_writereg = mec_mii_writereg;
585 sc->sc_mii.mii_statchg = mec_statchg;
586
587 /* Set up PHY properties */
588 sc->sc_ethercom.ec_mii = &sc->sc_mii;
589 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
590 ether_mediastatus);
591 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
592 MII_OFFSET_ANY, 0);
593
594 child = LIST_FIRST(&sc->sc_mii.mii_phys);
595 if (child == NULL) {
596 /* No PHY attached */
597 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
598 0, NULL);
599 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
600 } else {
601 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
602 sc->sc_phyaddr = child->mii_phy;
603 }
604
605 strcpy(ifp->if_xname, device_xname(self));
606 ifp->if_softc = sc;
607 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
608 ifp->if_ioctl = mec_ioctl;
609 ifp->if_start = mec_start;
610 ifp->if_watchdog = mec_watchdog;
611 ifp->if_init = mec_init;
612 ifp->if_stop = mec_stop;
613 ifp->if_mtu = ETHERMTU;
614 IFQ_SET_READY(&ifp->if_snd);
615
616 /* mec has dumb RX cksum support */
617 ifp->if_capabilities = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx;
618
619 /* We can support 802.1Q VLAN-sized frames. */
620 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
621
622 /* attach the interface */
623 if_attach(ifp);
624 ether_ifattach(ifp, sc->sc_enaddr);
625
626 /* establish interrupt */
627 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
628
629 #if NRND > 0
630 rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
631 RND_TYPE_NET, 0);
632 #endif
633
634 #ifdef MEC_EVENT_COUNTERS
635 evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC,
636 NULL, device_xname(self), "TX pkts queued total");
637 evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC,
638 NULL, device_xname(self), "TX pkts padded in txdesc buf");
639 evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC,
640 NULL, device_xname(self), "TX pkts copied to txdesc buf");
641 evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC,
642 NULL, device_xname(self), "TX pkts using concat ptr1");
643 evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC,
644 NULL, device_xname(self), "TX pkts w/ptr1 ~160bytes");
645 evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC,
646 NULL, device_xname(self), "TX pkts w/ptr1 ~256bytes");
647 evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC,
648 NULL, device_xname(self), "TX pkts w/ptr1 ~512bytes");
649 evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC,
650 NULL, device_xname(self), "TX pkts w/ptr1 ~1024bytes");
651 evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC,
652 NULL, device_xname(self), "TX pkts w/ptr1 >1024bytes");
653 evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC,
654 NULL, device_xname(self), "TX pkts using concat ptr1,2");
655 evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC,
656 NULL, device_xname(self), "TX pkts w/ptr2 ~160bytes");
657 evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC,
658 NULL, device_xname(self), "TX pkts w/ptr2 ~256bytes");
659 evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC,
660 NULL, device_xname(self), "TX pkts w/ptr2 ~512bytes");
661 evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC,
662 NULL, device_xname(self), "TX pkts w/ptr2 ~1024bytes");
663 evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC,
664 NULL, device_xname(self), "TX pkts w/ptr2 >1024bytes");
665 evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC,
666 NULL, device_xname(self), "TX pkts using concat ptr1,2,3");
667 evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC,
668 NULL, device_xname(self), "TX pkts w/ptr3 ~160bytes");
669 evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC,
670 NULL, device_xname(self), "TX pkts w/ptr3 ~256bytes");
671 evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC,
672 NULL, device_xname(self), "TX pkts w/ptr3 ~512bytes");
673 evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC,
674 NULL, device_xname(self), "TX pkts w/ptr3 ~1024bytes");
675 evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC,
676 NULL, device_xname(self), "TX pkts w/ptr3 >1024bytes");
677 evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC,
678 NULL, device_xname(self), "TX pkts copied to new mbufs");
679 evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC,
680 NULL, device_xname(self), "TX pkts w/mbuf ~160bytes");
681 evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC,
682 NULL, device_xname(self), "TX pkts w/mbuf ~256bytes");
683 evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC,
684 NULL, device_xname(self), "TX pkts w/mbuf ~512bytes");
685 evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC,
686 NULL, device_xname(self), "TX pkts w/mbuf ~1024bytes");
687 evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC,
688 NULL, device_xname(self), "TX pkts w/mbuf >1024bytes");
689 evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC,
690 NULL, device_xname(self), "TX pkts using ptrs total");
691 evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC,
692 NULL, device_xname(self), "TX pkts w/ptrs no hdr chain");
693 evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC,
694 NULL, device_xname(self), "TX pkts w/ptrs 1 hdr chain");
695 evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC,
696 NULL, device_xname(self), "TX pkts w/ptrs 2 hdr chains");
697 evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC,
698 NULL, device_xname(self), "TX pkts w/ptrs 3 hdr chains");
699 evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC,
700 NULL, device_xname(self), "TX pkts w/ptrs 4 hdr chains");
701 evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC,
702 NULL, device_xname(self), "TX pkts w/ptrs 5 hdr chains");
703 evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC,
704 NULL, device_xname(self), "TX pkts w/ptrs >5 hdr chains");
705 evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC,
706 NULL, device_xname(self), "TX pkts w/ptrs ~8bytes hdr");
707 evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC,
708 NULL, device_xname(self), "TX pkts w/ptrs ~16bytes hdr");
709 evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC,
710 NULL, device_xname(self), "TX pkts w/ptrs ~32bytes hdr");
711 evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC,
712 NULL, device_xname(self), "TX pkts w/ptrs ~64bytes hdr");
713 evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC,
714 NULL, device_xname(self), "TX pkts w/ptrs ~80bytes hdr");
715 evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC,
716 NULL, device_xname(self), "TX pkts w/ptrs ~96bytes hdr");
717 evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC,
718 NULL, device_xname(self), "TX stalled due to no txdesc");
719 evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC,
720 NULL, device_xname(self), "TX empty interrupts");
721 evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC,
722 NULL, device_xname(self), "TX sent interrupts");
723 #endif
724
725 /* set shutdown hook to reset interface on powerdown */
726 if (pmf_device_register1(self, NULL, NULL, mec_shutdown))
727 pmf_class_network_register(self, ifp);
728 else
729 aprint_error_dev(self, "couldn't establish power handler\n");
730
731 return;
732
733 /*
734 * Free any resources we've allocated during the failed attach
735 * attempt. Do this in reverse order and fall though.
736 */
737 fail_4:
738 for (i = 0; i < MEC_NTXDESC; i++) {
739 if (sc->sc_txsoft[i].txs_dmamap != NULL)
740 bus_dmamap_destroy(sc->sc_dmat,
741 sc->sc_txsoft[i].txs_dmamap);
742 }
743 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
744 fail_3:
745 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
746 fail_2:
747 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
748 sizeof(struct mec_control_data));
749 fail_1:
750 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
751 fail_0:
752 return;
753 }
754
755 static int
756 mec_mii_readreg(device_t self, int phy, int reg)
757 {
758 struct mec_softc *sc = device_private(self);
759 bus_space_tag_t st = sc->sc_st;
760 bus_space_handle_t sh = sc->sc_sh;
761 uint64_t val;
762 int i;
763
764 if (mec_mii_wait(sc) != 0)
765 return 0;
766
767 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
768 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
769 delay(25);
770 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
771 delay(25);
772 mec_mii_wait(sc);
773
774 for (i = 0; i < 20; i++) {
775 delay(30);
776
777 val = bus_space_read_8(st, sh, MEC_PHY_DATA);
778
779 if ((val & MEC_PHY_DATA_BUSY) == 0)
780 return val & MEC_PHY_DATA_VALUE;
781 }
782 return 0;
783 }
784
785 static void
786 mec_mii_writereg(device_t self, int phy, int reg, int val)
787 {
788 struct mec_softc *sc = device_private(self);
789 bus_space_tag_t st = sc->sc_st;
790 bus_space_handle_t sh = sc->sc_sh;
791
792 if (mec_mii_wait(sc) != 0) {
793 printf("timed out writing %x: %x\n", reg, val);
794 return;
795 }
796
797 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
798 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
799
800 delay(60);
801
802 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
803
804 delay(60);
805
806 mec_mii_wait(sc);
807 }
808
809 static int
810 mec_mii_wait(struct mec_softc *sc)
811 {
812 uint32_t busy;
813 int i, s;
814
815 for (i = 0; i < 100; i++) {
816 delay(30);
817
818 s = splhigh();
819 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
820 splx(s);
821
822 if ((busy & MEC_PHY_DATA_BUSY) == 0)
823 return 0;
824 #if 0
825 if (busy == 0xffff) /* XXX ? */
826 return 0;
827 #endif
828 }
829
830 printf("%s: MII timed out\n", device_xname(sc->sc_dev));
831 return 1;
832 }
833
834 static void
835 mec_statchg(device_t self)
836 {
837 struct mec_softc *sc = device_private(self);
838 bus_space_tag_t st = sc->sc_st;
839 bus_space_handle_t sh = sc->sc_sh;
840 uint32_t control;
841
842 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
843 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
844 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
845
846 /* must also set IPG here for duplex stuff ... */
847 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
848 control |= MEC_MAC_FULL_DUPLEX;
849 } else {
850 /* set IPG */
851 control |= MEC_MAC_IPG_DEFAULT;
852 }
853
854 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
855 }
856
857 /*
858 * XXX
859 * maybe this function should be moved to common part
860 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
861 */
862 static void
863 enaddr_aton(const char *str, uint8_t *eaddr)
864 {
865 int i;
866 char c;
867
868 for (i = 0; i < ETHER_ADDR_LEN; i++) {
869 if (*str == ':')
870 str++;
871
872 c = *str++;
873 if (isdigit(c)) {
874 eaddr[i] = (c - '0');
875 } else if (isxdigit(c)) {
876 eaddr[i] = (toupper(c) + 10 - 'A');
877 }
878 c = *str++;
879 if (isdigit(c)) {
880 eaddr[i] = (eaddr[i] << 4) | (c - '0');
881 } else if (isxdigit(c)) {
882 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
883 }
884 }
885 }
886
887 static int
888 mec_init(struct ifnet *ifp)
889 {
890 struct mec_softc *sc = ifp->if_softc;
891 bus_space_tag_t st = sc->sc_st;
892 bus_space_handle_t sh = sc->sc_sh;
893 struct mec_rxdesc *rxd;
894 int i, rc;
895
896 /* cancel any pending I/O */
897 mec_stop(ifp, 0);
898
899 /* reset device */
900 mec_reset(sc);
901
902 /* setup filter for multicast or promisc mode */
903 mec_setfilter(sc);
904
905 /* set the TX ring pointer to the base address */
906 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
907
908 sc->sc_txpending = 0;
909 sc->sc_txdirty = 0;
910 sc->sc_txlast = MEC_NTXDESC - 1;
911
912 /* put RX buffers into FIFO */
913 for (i = 0; i < MEC_NRXDESC; i++) {
914 rxd = &sc->sc_rxdesc[i];
915 rxd->rxd_stat = 0;
916 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
917 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
918 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
919 }
920 sc->sc_rxptr = 0;
921
922 #if 0 /* XXX no info */
923 bus_space_write_8(st, sh, MEC_TIMER, 0);
924 #endif
925
926 /*
927 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
928 * spurious interrupts when TX buffers are empty
929 */
930 bus_space_write_8(st, sh, MEC_DMA_CONTROL,
931 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
932 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
933 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
934 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
935
936 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
937
938 if ((rc = ether_mediachange(ifp)) != 0)
939 return rc;
940
941 ifp->if_flags |= IFF_RUNNING;
942 ifp->if_flags &= ~IFF_OACTIVE;
943 mec_start(ifp);
944
945 return 0;
946 }
947
948 static void
949 mec_reset(struct mec_softc *sc)
950 {
951 bus_space_tag_t st = sc->sc_st;
952 bus_space_handle_t sh = sc->sc_sh;
953 uint64_t control;
954
955 /* stop DMA first */
956 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
957
958 /* reset chip */
959 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
960 delay(1000);
961 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
962 delay(1000);
963
964 /* Default to 100/half and let auto-negotiation work its magic */
965 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
966 MEC_MAC_IPG_DEFAULT;
967
968 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
969 /* stop DMA again for sanity */
970 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
971
972 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
973 bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
974 }
975
976 static void
977 mec_start(struct ifnet *ifp)
978 {
979 struct mec_softc *sc = ifp->if_softc;
980 struct mbuf *m0, *m;
981 struct mec_txdesc *txd;
982 struct mec_txsoft *txs;
983 bus_dmamap_t dmamap;
984 bus_space_tag_t st = sc->sc_st;
985 bus_space_handle_t sh = sc->sc_sh;
986 int error, firsttx, nexttx, opending;
987 int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i;
988 uint32_t txdcmd;
989
990 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
991 return;
992
993 /*
994 * Remember the previous txpending and the first transmit descriptor.
995 */
996 opending = sc->sc_txpending;
997 firsttx = MEC_NEXTTX(sc->sc_txlast);
998
999 DPRINTF(MEC_DEBUG_START,
1000 ("%s: opending = %d, firsttx = %d\n", __func__, opending, firsttx));
1001
1002 while (sc->sc_txpending < MEC_NTXDESC - 1) {
1003 /* Grab a packet off the queue. */
1004 IFQ_POLL(&ifp->if_snd, m0);
1005 if (m0 == NULL)
1006 break;
1007 m = NULL;
1008
1009 /*
1010 * Get the next available transmit descriptor.
1011 */
1012 nexttx = MEC_NEXTTX(sc->sc_txlast);
1013 txd = &sc->sc_txdesc[nexttx];
1014 txs = &sc->sc_txsoft[nexttx];
1015 dmamap = txs->txs_dmamap;
1016 txs->txs_flags = 0;
1017
1018 buflen = 0;
1019 bufoff = 0;
1020 resid = 0;
1021 nptr = 0; /* XXX gcc */
1022 pseg = 0; /* XXX gcc */
1023
1024 len = m0->m_pkthdr.len;
1025
1026 DPRINTF(MEC_DEBUG_START,
1027 ("%s: len = %d, nexttx = %d, txpending = %d\n",
1028 __func__, len, nexttx, sc->sc_txpending));
1029
1030 if (len <= MEC_TXD_BUFSIZE) {
1031 /*
1032 * If a TX packet will fit into small txdesc buffer,
1033 * just copy it into there. Maybe it's faster than
1034 * checking alignment and calling bus_dma(9) etc.
1035 */
1036 DPRINTF(MEC_DEBUG_START, ("%s: short packet\n",
1037 __func__));
1038 IFQ_DEQUEUE(&ifp->if_snd, m0);
1039
1040 /*
1041 * I don't know if MEC chip does auto padding,
1042 * but do it manually for safety.
1043 */
1044 if (len < ETHER_PAD_LEN) {
1045 MEC_EVCNT_INCR(&sc->sc_ev_txdpad);
1046 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
1047 m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1048 memset(txd->txd_buf + bufoff + len, 0,
1049 ETHER_PAD_LEN - len);
1050 len = buflen = ETHER_PAD_LEN;
1051 } else {
1052 MEC_EVCNT_INCR(&sc->sc_ev_txdbuf);
1053 bufoff = MEC_TXD_BUFSTART(len);
1054 m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1055 buflen = len;
1056 }
1057 } else {
1058 /*
1059 * If the packet won't fit the static buffer in txdesc,
1060 * we have to use the concatenate pointers to handle it.
1061 */
1062 DPRINTF(MEC_DEBUG_START, ("%s: long packet\n",
1063 __func__));
1064 txs->txs_flags = MEC_TXS_TXDPTR;
1065
1066 /*
1067 * Call bus_dmamap_load_mbuf(9) first to see
1068 * how many chains the TX mbuf has.
1069 */
1070 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1071 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1072 if (error == 0) {
1073 /*
1074 * Check chains which might contain headers.
1075 * They might be so much fragmented and
1076 * it's better to copy them into txdesc buffer
1077 * since they would be small enough.
1078 */
1079 nsegs = dmamap->dm_nsegs;
1080 for (pseg = 0; pseg < nsegs; pseg++) {
1081 slen = dmamap->dm_segs[pseg].ds_len;
1082 if (buflen + slen >
1083 MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN)
1084 break;
1085 buflen += slen;
1086 }
1087 /*
1088 * Check if the rest chains can be fit into
1089 * the concatinate pointers.
1090 */
1091 align = dmamap->dm_segs[pseg].ds_addr &
1092 MEC_TXD_ALIGNMASK;
1093 if (align > 0) {
1094 /*
1095 * If the first chain isn't uint64_t
1096 * aligned, append the unaligned part
1097 * into txdesc buffer too.
1098 */
1099 resid = MEC_TXD_ALIGN - align;
1100 buflen += resid;
1101 for (; pseg < nsegs; pseg++) {
1102 slen =
1103 dmamap->dm_segs[pseg].ds_len;
1104 if (slen > resid)
1105 break;
1106 resid -= slen;
1107 }
1108 } else if (pseg == 0) {
1109 /*
1110 * In this case, the first chain is
1111 * uint64_t aligned but it's too long
1112 * to put into txdesc buf.
1113 * We have to put some data into
1114 * txdesc buf even in this case,
1115 * so put MEC_TXD_ALIGN bytes there.
1116 */
1117 buflen = resid = MEC_TXD_ALIGN;
1118 }
1119 nptr = nsegs - pseg;
1120 if (nptr <= MEC_NTXPTR) {
1121 bufoff = MEC_TXD_BUFSTART(buflen);
1122
1123 /*
1124 * Check if all the rest chains are
1125 * uint64_t aligned.
1126 */
1127 align = 0;
1128 for (i = pseg + 1; i < nsegs; i++)
1129 align |=
1130 dmamap->dm_segs[i].ds_addr
1131 & MEC_TXD_ALIGNMASK;
1132 if (align != 0) {
1133 /* chains are not aligned */
1134 error = -1;
1135 }
1136 } else {
1137 /* The TX mbuf chains doesn't fit. */
1138 error = -1;
1139 }
1140 if (error == -1)
1141 bus_dmamap_unload(sc->sc_dmat, dmamap);
1142 }
1143 if (error != 0) {
1144 /*
1145 * The TX mbuf chains can't be put into
1146 * the concatinate buffers. In this case,
1147 * we have to allocate a new contiguous mbuf
1148 * and copy data into it.
1149 *
1150 * Even in this case, the Ethernet header in
1151 * the TX mbuf might be unaligned and trailing
1152 * data might be word aligned, so put 2 byte
1153 * (MEC_ETHER_ALIGN) padding at the top of the
1154 * allocated mbuf and copy TX packets.
1155 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN)
1156 * at the top of the new mbuf won't be uint64_t
1157 * alignd, but we have to put some data into
1158 * txdesc buffer anyway even if the buffer
1159 * is uint64_t aligned.
1160 */
1161 DPRINTF(MEC_DEBUG_START|MEC_DEBUG_TXSEGS,
1162 ("%s: re-allocating mbuf\n", __func__));
1163
1164 MGETHDR(m, M_DONTWAIT, MT_DATA);
1165 if (m == NULL) {
1166 printf("%s: unable to allocate "
1167 "TX mbuf\n",
1168 device_xname(sc->sc_dev));
1169 break;
1170 }
1171 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1172 MCLGET(m, M_DONTWAIT);
1173 if ((m->m_flags & M_EXT) == 0) {
1174 printf("%s: unable to allocate "
1175 "TX cluster\n",
1176 device_xname(sc->sc_dev));
1177 m_freem(m);
1178 break;
1179 }
1180 }
1181 m->m_data += MEC_ETHER_ALIGN;
1182
1183 /*
1184 * Copy whole data (including unaligned part)
1185 * for following bpf_mtap().
1186 */
1187 m_copydata(m0, 0, len, mtod(m, void *));
1188 m->m_pkthdr.len = m->m_len = len;
1189 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1190 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1191 if (dmamap->dm_nsegs > 1) {
1192 /* should not happen, but for sanity */
1193 bus_dmamap_unload(sc->sc_dmat, dmamap);
1194 error = -1;
1195 }
1196 if (error != 0) {
1197 printf("%s: unable to load TX buffer, "
1198 "error = %d\n",
1199 device_xname(sc->sc_dev), error);
1200 m_freem(m);
1201 break;
1202 }
1203 /*
1204 * Only the first segment should be put into
1205 * the concatinate pointer in this case.
1206 */
1207 pseg = 0;
1208 nptr = 1;
1209
1210 /*
1211 * Set lenght of unaligned part which will be
1212 * copied into txdesc buffer.
1213 */
1214 buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN;
1215 bufoff = MEC_TXD_BUFSTART(buflen);
1216 resid = buflen;
1217 #ifdef MEC_EVENT_COUNTERS
1218 MEC_EVCNT_INCR(&sc->sc_ev_txmbuf);
1219 if (len <= 160)
1220 MEC_EVCNT_INCR(&sc->sc_ev_txmbufa);
1221 else if (len <= 256)
1222 MEC_EVCNT_INCR(&sc->sc_ev_txmbufb);
1223 else if (len <= 512)
1224 MEC_EVCNT_INCR(&sc->sc_ev_txmbufc);
1225 else if (len <= 1024)
1226 MEC_EVCNT_INCR(&sc->sc_ev_txmbufd);
1227 else
1228 MEC_EVCNT_INCR(&sc->sc_ev_txmbufe);
1229 #endif
1230 }
1231 #ifdef MEC_EVENT_COUNTERS
1232 else {
1233 MEC_EVCNT_INCR(&sc->sc_ev_txptrs);
1234 if (nptr == 1) {
1235 MEC_EVCNT_INCR(&sc->sc_ev_txptr1);
1236 if (len <= 160)
1237 MEC_EVCNT_INCR(
1238 &sc->sc_ev_txptr1a);
1239 else if (len <= 256)
1240 MEC_EVCNT_INCR(
1241 &sc->sc_ev_txptr1b);
1242 else if (len <= 512)
1243 MEC_EVCNT_INCR(
1244 &sc->sc_ev_txptr1c);
1245 else if (len <= 1024)
1246 MEC_EVCNT_INCR(
1247 &sc->sc_ev_txptr1d);
1248 else
1249 MEC_EVCNT_INCR(
1250 &sc->sc_ev_txptr1e);
1251 } else if (nptr == 2) {
1252 MEC_EVCNT_INCR(&sc->sc_ev_txptr2);
1253 if (len <= 160)
1254 MEC_EVCNT_INCR(
1255 &sc->sc_ev_txptr2a);
1256 else if (len <= 256)
1257 MEC_EVCNT_INCR(
1258 &sc->sc_ev_txptr2b);
1259 else if (len <= 512)
1260 MEC_EVCNT_INCR(
1261 &sc->sc_ev_txptr2c);
1262 else if (len <= 1024)
1263 MEC_EVCNT_INCR(
1264 &sc->sc_ev_txptr2d);
1265 else
1266 MEC_EVCNT_INCR(
1267 &sc->sc_ev_txptr2e);
1268 } else if (nptr == 3) {
1269 MEC_EVCNT_INCR(&sc->sc_ev_txptr3);
1270 if (len <= 160)
1271 MEC_EVCNT_INCR(
1272 &sc->sc_ev_txptr3a);
1273 else if (len <= 256)
1274 MEC_EVCNT_INCR(
1275 &sc->sc_ev_txptr3b);
1276 else if (len <= 512)
1277 MEC_EVCNT_INCR(
1278 &sc->sc_ev_txptr3c);
1279 else if (len <= 1024)
1280 MEC_EVCNT_INCR(
1281 &sc->sc_ev_txptr3d);
1282 else
1283 MEC_EVCNT_INCR(
1284 &sc->sc_ev_txptr3e);
1285 }
1286 if (pseg == 0)
1287 MEC_EVCNT_INCR(&sc->sc_ev_txptrc0);
1288 else if (pseg == 1)
1289 MEC_EVCNT_INCR(&sc->sc_ev_txptrc1);
1290 else if (pseg == 2)
1291 MEC_EVCNT_INCR(&sc->sc_ev_txptrc2);
1292 else if (pseg == 3)
1293 MEC_EVCNT_INCR(&sc->sc_ev_txptrc3);
1294 else if (pseg == 4)
1295 MEC_EVCNT_INCR(&sc->sc_ev_txptrc4);
1296 else if (pseg == 5)
1297 MEC_EVCNT_INCR(&sc->sc_ev_txptrc5);
1298 else
1299 MEC_EVCNT_INCR(&sc->sc_ev_txptrc6);
1300 if (buflen <= 8)
1301 MEC_EVCNT_INCR(&sc->sc_ev_txptrh0);
1302 else if (buflen <= 16)
1303 MEC_EVCNT_INCR(&sc->sc_ev_txptrh1);
1304 else if (buflen <= 32)
1305 MEC_EVCNT_INCR(&sc->sc_ev_txptrh2);
1306 else if (buflen <= 64)
1307 MEC_EVCNT_INCR(&sc->sc_ev_txptrh3);
1308 else if (buflen <= 80)
1309 MEC_EVCNT_INCR(&sc->sc_ev_txptrh4);
1310 else
1311 MEC_EVCNT_INCR(&sc->sc_ev_txptrh5);
1312 }
1313 #endif
1314 m_copydata(m0, 0, buflen, txd->txd_buf + bufoff);
1315
1316 IFQ_DEQUEUE(&ifp->if_snd, m0);
1317 if (m != NULL) {
1318 m_freem(m0);
1319 m0 = m;
1320 }
1321
1322 /*
1323 * sync the DMA map for TX mbuf
1324 */
1325 bus_dmamap_sync(sc->sc_dmat, dmamap, buflen,
1326 len - buflen, BUS_DMASYNC_PREWRITE);
1327 }
1328
1329 /*
1330 * Pass packet to bpf if there is a listener.
1331 */
1332 bpf_mtap(ifp, m0);
1333 MEC_EVCNT_INCR(&sc->sc_ev_txpkts);
1334
1335 /*
1336 * setup the transmit descriptor.
1337 */
1338 txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1);
1339
1340 /*
1341 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1342 * if more than half txdescs have been queued
1343 * because TX_EMPTY interrupts will rarely happen
1344 * if TX queue is so stacked.
1345 */
1346 if (sc->sc_txpending > (MEC_NTXDESC / 2) &&
1347 (nexttx & (MEC_NTXDESC_INTR - 1)) == 0)
1348 txdcmd |= MEC_TXCMD_TXINT;
1349
1350 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1351 bus_dma_segment_t *segs = dmamap->dm_segs;
1352
1353 DPRINTF(MEC_DEBUG_TXSEGS,
1354 ("%s: nsegs = %d, pseg = %d, nptr = %d\n",
1355 __func__, dmamap->dm_nsegs, pseg, nptr));
1356
1357 switch (nptr) {
1358 case 3:
1359 KASSERT((segs[pseg + 2].ds_addr &
1360 MEC_TXD_ALIGNMASK) == 0);
1361 txdcmd |= MEC_TXCMD_PTR3;
1362 txd->txd_ptr[2] =
1363 TXPTR_LEN(segs[pseg + 2].ds_len - 1) |
1364 segs[pseg + 2].ds_addr;
1365 /* FALLTHROUGH */
1366 case 2:
1367 KASSERT((segs[pseg + 1].ds_addr &
1368 MEC_TXD_ALIGNMASK) == 0);
1369 txdcmd |= MEC_TXCMD_PTR2;
1370 txd->txd_ptr[1] =
1371 TXPTR_LEN(segs[pseg + 1].ds_len - 1) |
1372 segs[pseg + 1].ds_addr;
1373 /* FALLTHROUGH */
1374 case 1:
1375 txdcmd |= MEC_TXCMD_PTR1;
1376 txd->txd_ptr[0] =
1377 TXPTR_LEN(segs[pseg].ds_len - resid - 1) |
1378 (segs[pseg].ds_addr + resid);
1379 break;
1380 default:
1381 panic("%s: impossible nptr in %s",
1382 device_xname(sc->sc_dev), __func__);
1383 /* NOTREACHED */
1384 }
1385 /*
1386 * Store a pointer to the packet so we can
1387 * free it later.
1388 */
1389 txs->txs_mbuf = m0;
1390 } else {
1391 /*
1392 * In this case all data are copied to buffer in txdesc,
1393 * we can free TX mbuf here.
1394 */
1395 m_freem(m0);
1396 }
1397 txd->txd_cmd = txdcmd;
1398
1399 DPRINTF(MEC_DEBUG_START,
1400 ("%s: txd_cmd = 0x%016llx\n",
1401 __func__, txd->txd_cmd));
1402 DPRINTF(MEC_DEBUG_START,
1403 ("%s: txd_ptr[0] = 0x%016llx\n",
1404 __func__, txd->txd_ptr[0]));
1405 DPRINTF(MEC_DEBUG_START,
1406 ("%s: txd_ptr[1] = 0x%016llx\n",
1407 __func__, txd->txd_ptr[1]));
1408 DPRINTF(MEC_DEBUG_START,
1409 ("%s: txd_ptr[2] = 0x%016llx\n",
1410 __func__, txd->txd_ptr[2]));
1411 DPRINTF(MEC_DEBUG_START,
1412 ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1413 __func__, len, len, buflen, buflen));
1414
1415 /* sync TX descriptor */
1416 MEC_TXDESCSYNC(sc, nexttx,
1417 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1418
1419 /* start TX */
1420 bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx));
1421
1422 /* advance the TX pointer. */
1423 sc->sc_txpending++;
1424 sc->sc_txlast = nexttx;
1425 }
1426
1427 if (sc->sc_txpending == MEC_NTXDESC - 1) {
1428 /* No more slots; notify upper layer. */
1429 MEC_EVCNT_INCR(&sc->sc_ev_txdstall);
1430 ifp->if_flags |= IFF_OACTIVE;
1431 }
1432
1433 if (sc->sc_txpending != opending) {
1434 /*
1435 * If the transmitter was idle,
1436 * reset the txdirty pointer and re-enable TX interrupt.
1437 */
1438 if (opending == 0) {
1439 sc->sc_txdirty = firsttx;
1440 bus_space_write_8(st, sh, MEC_TX_ALIAS,
1441 MEC_TX_ALIAS_INT_ENABLE);
1442 }
1443
1444 /* Set a watchdog timer in case the chip flakes out. */
1445 ifp->if_timer = 5;
1446 }
1447 }
1448
1449 static void
1450 mec_stop(struct ifnet *ifp, int disable)
1451 {
1452 struct mec_softc *sc = ifp->if_softc;
1453 struct mec_txsoft *txs;
1454 int i;
1455
1456 DPRINTF(MEC_DEBUG_STOP, ("%s\n", __func__));
1457
1458 ifp->if_timer = 0;
1459 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1460
1461 callout_stop(&sc->sc_tick_ch);
1462 mii_down(&sc->sc_mii);
1463
1464 /* release any TX buffers */
1465 for (i = 0; i < MEC_NTXDESC; i++) {
1466 txs = &sc->sc_txsoft[i];
1467 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1468 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1469 m_freem(txs->txs_mbuf);
1470 txs->txs_mbuf = NULL;
1471 }
1472 }
1473 }
1474
1475 static int
1476 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1477 {
1478 int s, error;
1479
1480 s = splnet();
1481
1482 error = ether_ioctl(ifp, cmd, data);
1483 if (error == ENETRESET) {
1484 /*
1485 * Multicast list has changed; set the hardware filter
1486 * accordingly.
1487 */
1488 if (ifp->if_flags & IFF_RUNNING)
1489 error = mec_init(ifp);
1490 else
1491 error = 0;
1492 }
1493
1494 /* Try to get more packets going. */
1495 mec_start(ifp);
1496
1497 splx(s);
1498 return error;
1499 }
1500
1501 static void
1502 mec_watchdog(struct ifnet *ifp)
1503 {
1504 struct mec_softc *sc = ifp->if_softc;
1505
1506 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1507 ifp->if_oerrors++;
1508
1509 mec_init(ifp);
1510 }
1511
1512 static void
1513 mec_tick(void *arg)
1514 {
1515 struct mec_softc *sc = arg;
1516 int s;
1517
1518 s = splnet();
1519 mii_tick(&sc->sc_mii);
1520 splx(s);
1521
1522 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1523 }
1524
1525 static void
1526 mec_setfilter(struct mec_softc *sc)
1527 {
1528 struct ethercom *ec = &sc->sc_ethercom;
1529 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1530 struct ether_multi *enm;
1531 struct ether_multistep step;
1532 bus_space_tag_t st = sc->sc_st;
1533 bus_space_handle_t sh = sc->sc_sh;
1534 uint64_t mchash;
1535 uint32_t control, hash;
1536 int mcnt;
1537
1538 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1539 control &= ~MEC_MAC_FILTER_MASK;
1540
1541 if (ifp->if_flags & IFF_PROMISC) {
1542 control |= MEC_MAC_FILTER_PROMISC;
1543 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1544 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1545 return;
1546 }
1547
1548 mcnt = 0;
1549 mchash = 0;
1550 ETHER_FIRST_MULTI(step, ec, enm);
1551 while (enm != NULL) {
1552 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1553 /* set allmulti for a range of multicast addresses */
1554 control |= MEC_MAC_FILTER_ALLMULTI;
1555 bus_space_write_8(st, sh, MEC_MULTICAST,
1556 0xffffffffffffffffULL);
1557 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1558 return;
1559 }
1560
1561 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1562
1563 hash = mec_calchash(enm->enm_addrlo);
1564 mchash |= 1 << hash;
1565 mcnt++;
1566 ETHER_NEXT_MULTI(step, enm);
1567 }
1568
1569 ifp->if_flags &= ~IFF_ALLMULTI;
1570
1571 if (mcnt > 0)
1572 control |= MEC_MAC_FILTER_MATCHMULTI;
1573
1574 bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1575 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1576 }
1577
1578 static int
1579 mec_intr(void *arg)
1580 {
1581 struct mec_softc *sc = arg;
1582 bus_space_tag_t st = sc->sc_st;
1583 bus_space_handle_t sh = sc->sc_sh;
1584 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1585 uint32_t statreg, statack, txptr;
1586 int handled, sent;
1587
1588 DPRINTF(MEC_DEBUG_INTR, ("%s: called\n", __func__));
1589
1590 handled = sent = 0;
1591
1592 for (;;) {
1593 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1594
1595 DPRINTF(MEC_DEBUG_INTR,
1596 ("%s: INT_STAT = 0x%08x\n", __func__, statreg));
1597
1598 statack = statreg & MEC_INT_STATUS_MASK;
1599 if (statack == 0)
1600 break;
1601 bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1602
1603 handled = 1;
1604
1605 if (statack &
1606 (MEC_INT_RX_THRESHOLD |
1607 MEC_INT_RX_FIFO_UNDERFLOW)) {
1608 mec_rxintr(sc);
1609 }
1610
1611 if (statack &
1612 (MEC_INT_TX_EMPTY |
1613 MEC_INT_TX_PACKET_SENT |
1614 MEC_INT_TX_ABORT)) {
1615 txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1616 >> MEC_INT_TX_RING_BUFFER_SHIFT;
1617 mec_txintr(sc, txptr);
1618 sent = 1;
1619 if ((statack & MEC_INT_TX_EMPTY) != 0) {
1620 /*
1621 * disable TX interrupt to stop
1622 * TX empty interrupt
1623 */
1624 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1625 DPRINTF(MEC_DEBUG_INTR,
1626 ("%s: disable TX_INT\n", __func__));
1627 }
1628 #ifdef MEC_EVENT_COUNTERS
1629 if ((statack & MEC_INT_TX_EMPTY) != 0)
1630 MEC_EVCNT_INCR(&sc->sc_ev_txempty);
1631 if ((statack & MEC_INT_TX_PACKET_SENT) != 0)
1632 MEC_EVCNT_INCR(&sc->sc_ev_txsent);
1633 #endif
1634 }
1635
1636 if (statack &
1637 (MEC_INT_TX_LINK_FAIL |
1638 MEC_INT_TX_MEM_ERROR |
1639 MEC_INT_TX_ABORT |
1640 MEC_INT_RX_FIFO_UNDERFLOW |
1641 MEC_INT_RX_DMA_UNDERFLOW)) {
1642 printf("%s: %s: interrupt status = 0x%08x\n",
1643 device_xname(sc->sc_dev), __func__, statreg);
1644 mec_init(ifp);
1645 break;
1646 }
1647 }
1648
1649 if (sent && !IFQ_IS_EMPTY(&ifp->if_snd)) {
1650 /* try to get more packets going */
1651 mec_start(ifp);
1652 }
1653
1654 #if NRND > 0
1655 if (handled)
1656 rnd_add_uint32(&sc->sc_rnd_source, statreg);
1657 #endif
1658
1659 return handled;
1660 }
1661
1662 static void
1663 mec_rxintr(struct mec_softc *sc)
1664 {
1665 bus_space_tag_t st = sc->sc_st;
1666 bus_space_handle_t sh = sc->sc_sh;
1667 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1668 struct mbuf *m;
1669 struct mec_rxdesc *rxd;
1670 uint64_t rxstat;
1671 u_int len;
1672 int i;
1673 uint32_t crc;
1674
1675 DPRINTF(MEC_DEBUG_RXINTR, ("%s: called\n", __func__));
1676
1677 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1678 rxd = &sc->sc_rxdesc[i];
1679
1680 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1681 rxstat = rxd->rxd_stat;
1682
1683 DPRINTF(MEC_DEBUG_RXINTR,
1684 ("%s: rxstat = 0x%016llx, rxptr = %d\n",
1685 __func__, rxstat, i));
1686 DPRINTF(MEC_DEBUG_RXINTR, ("%s: rxfifo = 0x%08x\n",
1687 __func__, (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1688
1689 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1690 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1691 break;
1692 }
1693
1694 len = rxstat & MEC_RXSTAT_LEN;
1695
1696 if (len < ETHER_MIN_LEN ||
1697 len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1698 /* invalid length packet; drop it. */
1699 DPRINTF(MEC_DEBUG_RXINTR,
1700 ("%s: wrong packet\n", __func__));
1701 dropit:
1702 ifp->if_ierrors++;
1703 rxd->rxd_stat = 0;
1704 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1705 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1706 MEC_CDRXADDR(sc, i));
1707 continue;
1708 }
1709
1710 /*
1711 * If 802.1Q VLAN MTU is enabled, ignore the bad packet error.
1712 */
1713 if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0)
1714 rxstat &= ~MEC_RXSTAT_BADPACKET;
1715
1716 if (rxstat &
1717 (MEC_RXSTAT_BADPACKET |
1718 MEC_RXSTAT_LONGEVENT |
1719 MEC_RXSTAT_INVALID |
1720 MEC_RXSTAT_CRCERROR |
1721 MEC_RXSTAT_VIOLATION)) {
1722 printf("%s: mec_rxintr: status = 0x%016"PRIx64"\n",
1723 device_xname(sc->sc_dev), rxstat);
1724 goto dropit;
1725 }
1726
1727 /*
1728 * The MEC includes the CRC with every packet. Trim
1729 * it off here.
1730 */
1731 len -= ETHER_CRC_LEN;
1732
1733 /*
1734 * now allocate an mbuf (and possibly a cluster) to hold
1735 * the received packet.
1736 */
1737 MGETHDR(m, M_DONTWAIT, MT_DATA);
1738 if (m == NULL) {
1739 printf("%s: unable to allocate RX mbuf\n",
1740 device_xname(sc->sc_dev));
1741 goto dropit;
1742 }
1743 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1744 MCLGET(m, M_DONTWAIT);
1745 if ((m->m_flags & M_EXT) == 0) {
1746 printf("%s: unable to allocate RX cluster\n",
1747 device_xname(sc->sc_dev));
1748 m_freem(m);
1749 m = NULL;
1750 goto dropit;
1751 }
1752 }
1753
1754 /*
1755 * Note MEC chip seems to insert 2 byte padding at the top of
1756 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1757 */
1758 MEC_RXBUFSYNC(sc, i, len + ETHER_CRC_LEN, BUS_DMASYNC_POSTREAD);
1759 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1760 crc = be32dec(rxd->rxd_buf + MEC_ETHER_ALIGN + len);
1761 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1762 m->m_data += MEC_ETHER_ALIGN;
1763
1764 /* put RX buffer into FIFO again */
1765 rxd->rxd_stat = 0;
1766 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1767 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1768
1769 m->m_pkthdr.rcvif = ifp;
1770 m->m_pkthdr.len = m->m_len = len;
1771 if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0)
1772 mec_rxcsum(sc, m, RXSTAT_CKSUM(rxstat), crc);
1773
1774 ifp->if_ipackets++;
1775
1776 /*
1777 * Pass this up to any BPF listeners, but only
1778 * pass it up the stack if it's for us.
1779 */
1780 bpf_mtap(ifp, m);
1781
1782 /* Pass it on. */
1783 (*ifp->if_input)(ifp, m);
1784 }
1785
1786 /* update RX pointer */
1787 sc->sc_rxptr = i;
1788 }
1789
1790 static void
1791 mec_rxcsum(struct mec_softc *sc, struct mbuf *m, uint16_t rxcsum, uint32_t crc)
1792 {
1793 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1794 struct ether_header *eh;
1795 struct ip *ip;
1796 struct udphdr *uh;
1797 u_int len, pktlen, hlen;
1798 uint32_t csum_data, dsum;
1799 int csum_flags;
1800 const uint16_t *dp;
1801
1802 csum_data = 0;
1803 csum_flags = 0;
1804
1805 len = m->m_len;
1806 if (len < ETHER_HDR_LEN + sizeof(struct ip))
1807 goto out;
1808 pktlen = len - ETHER_HDR_LEN;
1809 eh = mtod(m, struct ether_header *);
1810 if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1811 goto out;
1812 ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN);
1813 if (ip->ip_v != IPVERSION)
1814 goto out;
1815
1816 hlen = ip->ip_hl << 2;
1817 if (hlen < sizeof(struct ip))
1818 goto out;
1819
1820 /*
1821 * Bail if too short, has random trailing garbage, truncated,
1822 * fragment, or has ethernet pad.
1823 */
1824 if (ntohs(ip->ip_len) < hlen ||
1825 ntohs(ip->ip_len) != pktlen ||
1826 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0)
1827 goto out;
1828
1829 switch (ip->ip_p) {
1830 case IPPROTO_TCP:
1831 if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 ||
1832 pktlen < (hlen + sizeof(struct tcphdr)))
1833 goto out;
1834 csum_flags = M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1835 break;
1836 case IPPROTO_UDP:
1837 if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 ||
1838 pktlen < (hlen + sizeof(struct udphdr)))
1839 goto out;
1840 uh = (struct udphdr *)((uint8_t *)ip + hlen);
1841 if (uh->uh_sum == 0)
1842 goto out; /* no checksum */
1843 csum_flags = M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1844 break;
1845 default:
1846 goto out;
1847 }
1848
1849 /*
1850 * The computed checksum includes Ethernet header, IP headers,
1851 * and CRC, so we have to deduct them.
1852 * Note IP header cksum should be 0xffff so we don't have to
1853 * dedecut them.
1854 */
1855 dsum = 0;
1856
1857 /* deduct Ethernet header */
1858 dp = (const uint16_t *)eh;
1859 for (hlen = 0; hlen < (ETHER_HDR_LEN / sizeof(uint16_t)); hlen++)
1860 dsum += ntohs(*dp++);
1861
1862 /* deduct CRC */
1863 if (len & 1) {
1864 dsum += (crc >> 24) & 0x00ff;
1865 dsum += (crc >> 8) & 0xffff;
1866 dsum += (crc << 8) & 0xff00;
1867 } else {
1868 dsum += (crc >> 16) & 0xffff;
1869 dsum += (crc >> 0) & 0xffff;
1870 }
1871 while (dsum >> 16)
1872 dsum = (dsum >> 16) + (dsum & 0xffff);
1873
1874 csum_data = rxcsum;
1875 csum_data += (uint16_t)~dsum;
1876
1877 while (csum_data >> 16)
1878 csum_data = (csum_data >> 16) + (csum_data & 0xffff);
1879
1880 out:
1881 m->m_pkthdr.csum_flags = csum_flags;
1882 m->m_pkthdr.csum_data = csum_data;
1883 }
1884
1885 static void
1886 mec_txintr(struct mec_softc *sc, uint32_t txptr)
1887 {
1888 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1889 struct mec_txdesc *txd;
1890 struct mec_txsoft *txs;
1891 bus_dmamap_t dmamap;
1892 uint64_t txstat;
1893 int i;
1894 u_int col;
1895
1896 DPRINTF(MEC_DEBUG_TXINTR, ("%s: called\n", __func__));
1897
1898 for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1899 i = MEC_NEXTTX(i), sc->sc_txpending--) {
1900 txd = &sc->sc_txdesc[i];
1901
1902 MEC_TXCMDSYNC(sc, i,
1903 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1904
1905 txstat = txd->txd_stat;
1906 DPRINTF(MEC_DEBUG_TXINTR,
1907 ("%s: dirty = %d, txstat = 0x%016llx\n",
1908 __func__, i, txstat));
1909 if ((txstat & MEC_TXSTAT_SENT) == 0) {
1910 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1911 break;
1912 }
1913
1914 txs = &sc->sc_txsoft[i];
1915 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1916 dmamap = txs->txs_dmamap;
1917 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1918 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1919 bus_dmamap_unload(sc->sc_dmat, dmamap);
1920 m_freem(txs->txs_mbuf);
1921 txs->txs_mbuf = NULL;
1922 }
1923
1924 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1925 ifp->if_collisions += col;
1926
1927 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1928 printf("%s: TX error: txstat = 0x%016"PRIx64"\n",
1929 device_xname(sc->sc_dev), txstat);
1930 ifp->if_oerrors++;
1931 } else
1932 ifp->if_opackets++;
1933 }
1934
1935 /* update the dirty TX buffer pointer */
1936 sc->sc_txdirty = i;
1937 DPRINTF(MEC_DEBUG_INTR,
1938 ("%s: sc_txdirty = %2d, sc_txpending = %2d\n",
1939 __func__, sc->sc_txdirty, sc->sc_txpending));
1940
1941 /* cancel the watchdog timer if there are no pending TX packets */
1942 if (sc->sc_txpending == 0)
1943 ifp->if_timer = 0;
1944 if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD)
1945 ifp->if_flags &= ~IFF_OACTIVE;
1946 }
1947
1948 static bool
1949 mec_shutdown(device_t self, int howto)
1950 {
1951 struct mec_softc *sc = device_private(self);
1952
1953 mec_stop(&sc->sc_ethercom.ec_if, 1);
1954 /* make sure to stop DMA etc. */
1955 mec_reset(sc);
1956
1957 return true;
1958 }
1959