if_mec.c revision 1.62 1 /* $NetBSD: if_mec.c,v 1.62 2019/12/26 04:53:11 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 2004, 2008 Izumi Tsutsui. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed for the
42 * NetBSD Project. See http://www.NetBSD.org/ for
43 * information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 /*
60 * MACE MAC-110 Ethernet driver
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.62 2019/12/26 04:53:11 msaitoh Exp $");
65
66 #include "opt_ddb.h"
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/device.h>
71 #include <sys/callout.h>
72 #include <sys/mbuf.h>
73 #include <sys/malloc.h>
74 #include <sys/kernel.h>
75 #include <sys/socket.h>
76 #include <sys/ioctl.h>
77 #include <sys/errno.h>
78
79 #include <sys/rndsource.h>
80
81 #include <net/if.h>
82 #include <net/if_dl.h>
83 #include <net/if_media.h>
84 #include <net/if_ether.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/tcp.h>
90 #include <netinet/udp.h>
91
92 #include <net/bpf.h>
93
94 #include <sys/bus.h>
95 #include <machine/intr.h>
96 #include <machine/machtype.h>
97
98 #include <dev/mii/mii.h>
99 #include <dev/mii/miivar.h>
100
101 #include <sgimips/mace/macevar.h>
102 #include <sgimips/mace/if_mecreg.h>
103
104 #include <dev/arcbios/arcbios.h>
105 #include <dev/arcbios/arcbiosvar.h>
106
107 /* #define MEC_DEBUG */
108
109 #ifdef MEC_DEBUG
110 #define MEC_DEBUG_RESET 0x01
111 #define MEC_DEBUG_START 0x02
112 #define MEC_DEBUG_STOP 0x04
113 #define MEC_DEBUG_INTR 0x08
114 #define MEC_DEBUG_RXINTR 0x10
115 #define MEC_DEBUG_TXINTR 0x20
116 #define MEC_DEBUG_TXSEGS 0x40
117 uint32_t mec_debug = 0;
118 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
119 #else
120 #define DPRINTF(x, y) /* nothing */
121 #endif
122
123 /* #define MEC_EVENT_COUNTERS */
124
125 #ifdef MEC_EVENT_COUNTERS
126 #define MEC_EVCNT_INCR(ev) (ev)->ev_count++
127 #else
128 #define MEC_EVCNT_INCR(ev) do {} while (/* CONSTCOND */ 0)
129 #endif
130
131 /*
132 * Transmit descriptor list size
133 */
134 #define MEC_NTXDESC 64
135 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
136 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
137 #define MEC_NTXDESC_RSVD 4
138 #define MEC_NTXDESC_INTR 8
139
140 /*
141 * software state for TX
142 */
143 struct mec_txsoft {
144 struct mbuf *txs_mbuf; /* head of our mbuf chain */
145 bus_dmamap_t txs_dmamap; /* our DMA map */
146 uint32_t txs_flags;
147 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
148 #define MEC_TXS_TXDPTR 0x00000080 /* concat txd_ptr is used */
149 };
150
151 /*
152 * Transmit buffer descriptor
153 */
154 #define MEC_TXDESCSIZE 128
155 #define MEC_NTXPTR 3
156 #define MEC_TXD_BUFOFFSET sizeof(uint64_t)
157 #define MEC_TXD_BUFOFFSET1 \
158 (sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR)
159 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
160 #define MEC_TXD_BUFSIZE1 (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1)
161 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
162 #define MEC_TXD_ALIGN 8
163 #define MEC_TXD_ALIGNMASK (MEC_TXD_ALIGN - 1)
164 #define MEC_TXD_ROUNDUP(addr) \
165 (((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK)
166 #define MEC_NTXSEG 16
167
168 struct mec_txdesc {
169 volatile uint64_t txd_cmd;
170 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
171 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
172 #define TXCMD_BUFSTART(x) ((x) << 16)
173 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
174 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
175 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
176 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
177 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
178 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
179
180 #define txd_stat txd_cmd
181 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
182 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
183 #define MEC_TXSTAT_COLCNT_SHIFT 16
184 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
185 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
186 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
187 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
188 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
189 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
190 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
191 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
192 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
193 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
194 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
195
196 union {
197 uint64_t txptr[MEC_NTXPTR];
198 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
199 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
200 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
201 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
202 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
203
204 uint8_t txbuf[MEC_TXD_BUFSIZE];
205 } txd_data;
206 #define txd_ptr txd_data.txptr
207 #define txd_buf txd_data.txbuf
208 };
209
210 /*
211 * Receive buffer size
212 */
213 #define MEC_NRXDESC 16
214 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
215 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
216
217 /*
218 * Receive buffer description
219 */
220 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
221 #define MEC_RXD_NRXPAD 3
222 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
223 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
224 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
225
226 struct mec_rxdesc {
227 volatile uint64_t rxd_stat;
228 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
229 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
230 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
231 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
232 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
233 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
234 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
235 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
236 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
237 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
238 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
239 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
240 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
241 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
242 #define RXSTAT_CKSUM(x) (((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32)
243 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
244 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
245 uint64_t rxd_pad1[MEC_RXD_NRXPAD];
246 uint8_t rxd_buf[MEC_RXD_BUFSIZE];
247 };
248
249 /*
250 * control structures for DMA ops
251 */
252 struct mec_control_data {
253 /*
254 * TX descriptors and buffers
255 */
256 struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
257
258 /*
259 * RX descriptors and buffers
260 */
261 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
262 };
263
264 /*
265 * It _seems_ there are some restrictions on descriptor address:
266 *
267 * - Base address of txdescs should be 8kbyte aligned
268 * - Each txdesc should be 128byte aligned
269 * - Each rxdesc should be 4kbyte aligned
270 *
271 * So we should specify 8k align to allocalte txdescs.
272 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
273 * so rxdescs are also allocated at 4kbyte aligned.
274 */
275 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
276
277 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
278 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
279 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
280
281 /*
282 * software state per device
283 */
284 struct mec_softc {
285 device_t sc_dev; /* generic device structures */
286
287 bus_space_tag_t sc_st; /* bus_space tag */
288 bus_space_handle_t sc_sh; /* bus_space handle */
289 bus_dma_tag_t sc_dmat; /* bus_dma tag */
290
291 struct ethercom sc_ethercom; /* Ethernet common part */
292
293 struct mii_data sc_mii; /* MII/media information */
294 int sc_phyaddr; /* MII address */
295 struct callout sc_tick_ch; /* tick callout */
296
297 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
298
299 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301
302 /* pointer to allocated control data */
303 struct mec_control_data *sc_control_data;
304 #define sc_txdesc sc_control_data->mcd_txdesc
305 #define sc_rxdesc sc_control_data->mcd_rxdesc
306
307 /* software state for TX descs */
308 struct mec_txsoft sc_txsoft[MEC_NTXDESC];
309
310 int sc_txpending; /* number of TX requests pending */
311 int sc_txdirty; /* first dirty TX descriptor */
312 int sc_txlast; /* last used TX descriptor */
313
314 int sc_rxptr; /* next ready RX buffer */
315
316 krndsource_t sc_rnd_source; /* random source */
317 #ifdef MEC_EVENT_COUNTERS
318 struct evcnt sc_ev_txpkts; /* TX packets queued total */
319 struct evcnt sc_ev_txdpad; /* TX packets padded in txdesc buf */
320 struct evcnt sc_ev_txdbuf; /* TX packets copied to txdesc buf */
321 struct evcnt sc_ev_txptr1; /* TX packets using concat ptr1 */
322 struct evcnt sc_ev_txptr1a; /* TX packets w/ptr1 ~160bytes */
323 struct evcnt sc_ev_txptr1b; /* TX packets w/ptr1 ~256bytes */
324 struct evcnt sc_ev_txptr1c; /* TX packets w/ptr1 ~512bytes */
325 struct evcnt sc_ev_txptr1d; /* TX packets w/ptr1 ~1024bytes */
326 struct evcnt sc_ev_txptr1e; /* TX packets w/ptr1 >1024bytes */
327 struct evcnt sc_ev_txptr2; /* TX packets using concat ptr1,2 */
328 struct evcnt sc_ev_txptr2a; /* TX packets w/ptr2 ~160bytes */
329 struct evcnt sc_ev_txptr2b; /* TX packets w/ptr2 ~256bytes */
330 struct evcnt sc_ev_txptr2c; /* TX packets w/ptr2 ~512bytes */
331 struct evcnt sc_ev_txptr2d; /* TX packets w/ptr2 ~1024bytes */
332 struct evcnt sc_ev_txptr2e; /* TX packets w/ptr2 >1024bytes */
333 struct evcnt sc_ev_txptr3; /* TX packets using concat ptr1,2,3 */
334 struct evcnt sc_ev_txptr3a; /* TX packets w/ptr3 ~160bytes */
335 struct evcnt sc_ev_txptr3b; /* TX packets w/ptr3 ~256bytes */
336 struct evcnt sc_ev_txptr3c; /* TX packets w/ptr3 ~512bytes */
337 struct evcnt sc_ev_txptr3d; /* TX packets w/ptr3 ~1024bytes */
338 struct evcnt sc_ev_txptr3e; /* TX packets w/ptr3 >1024bytes */
339 struct evcnt sc_ev_txmbuf; /* TX packets copied to new mbufs */
340 struct evcnt sc_ev_txmbufa; /* TX packets w/mbuf ~160bytes */
341 struct evcnt sc_ev_txmbufb; /* TX packets w/mbuf ~256bytes */
342 struct evcnt sc_ev_txmbufc; /* TX packets w/mbuf ~512bytes */
343 struct evcnt sc_ev_txmbufd; /* TX packets w/mbuf ~1024bytes */
344 struct evcnt sc_ev_txmbufe; /* TX packets w/mbuf >1024bytes */
345 struct evcnt sc_ev_txptrs; /* TX packets using ptrs total */
346 struct evcnt sc_ev_txptrc0; /* TX packets w/ptrs no hdr chain */
347 struct evcnt sc_ev_txptrc1; /* TX packets w/ptrs 1 hdr chain */
348 struct evcnt sc_ev_txptrc2; /* TX packets w/ptrs 2 hdr chains */
349 struct evcnt sc_ev_txptrc3; /* TX packets w/ptrs 3 hdr chains */
350 struct evcnt sc_ev_txptrc4; /* TX packets w/ptrs 4 hdr chains */
351 struct evcnt sc_ev_txptrc5; /* TX packets w/ptrs 5 hdr chains */
352 struct evcnt sc_ev_txptrc6; /* TX packets w/ptrs >5 hdr chains */
353 struct evcnt sc_ev_txptrh0; /* TX packets w/ptrs ~8bytes hdr */
354 struct evcnt sc_ev_txptrh1; /* TX packets w/ptrs ~16bytes hdr */
355 struct evcnt sc_ev_txptrh2; /* TX packets w/ptrs ~32bytes hdr */
356 struct evcnt sc_ev_txptrh3; /* TX packets w/ptrs ~64bytes hdr */
357 struct evcnt sc_ev_txptrh4; /* TX packets w/ptrs ~80bytes hdr */
358 struct evcnt sc_ev_txptrh5; /* TX packets w/ptrs ~96bytes hdr */
359 struct evcnt sc_ev_txdstall; /* TX stalled due to no txdesc */
360 struct evcnt sc_ev_txempty; /* TX empty interrupts */
361 struct evcnt sc_ev_txsent; /* TX sent interrupts */
362 #endif
363 };
364
365 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
366 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
367
368 #define MEC_TXDESCSYNC(sc, x, ops) \
369 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
370 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
371 #define MEC_TXCMDSYNC(sc, x, ops) \
372 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
373 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
374
375 #define MEC_RXSTATSYNC(sc, x, ops) \
376 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
377 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
378 #define MEC_RXBUFSYNC(sc, x, len, ops) \
379 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
380 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
381 MEC_ETHER_ALIGN + (len), (ops))
382
383 /* XXX these values should be moved to <net/if_ether.h> ? */
384 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
385 #define MEC_ETHER_ALIGN 2
386
387 static int mec_match(device_t, cfdata_t, void *);
388 static void mec_attach(device_t, device_t, void *);
389
390 static int mec_mii_readreg(device_t, int, int, uint16_t *);
391 static int mec_mii_writereg(device_t, int, int, uint16_t);
392 static int mec_mii_wait(struct mec_softc *);
393 static void mec_statchg(struct ifnet *);
394
395 static int mec_init(struct ifnet * ifp);
396 static void mec_start(struct ifnet *);
397 static void mec_watchdog(struct ifnet *);
398 static void mec_tick(void *);
399 static int mec_ioctl(struct ifnet *, u_long, void *);
400 static void mec_reset(struct mec_softc *);
401 static void mec_setfilter(struct mec_softc *);
402 static int mec_intr(void *arg);
403 static void mec_stop(struct ifnet *, int);
404 static void mec_rxintr(struct mec_softc *);
405 static void mec_rxcsum(struct mec_softc *, struct mbuf *, uint16_t,
406 uint32_t);
407 static void mec_txintr(struct mec_softc *, uint32_t);
408 static bool mec_shutdown(device_t, int);
409
410 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
411 mec_match, mec_attach, NULL, NULL);
412
413 static int mec_matched = 0;
414
415 static int
416 mec_match(device_t parent, cfdata_t cf, void *aux)
417 {
418
419 /* allow only one device */
420 if (mec_matched)
421 return 0;
422
423 mec_matched = 1;
424 return 1;
425 }
426
427 static void
428 mec_attach(device_t parent, device_t self, void *aux)
429 {
430 struct mec_softc *sc = device_private(self);
431 struct mace_attach_args *maa = aux;
432 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
433 struct mii_data *mii = &sc->sc_mii;
434 uint64_t address, command;
435 const char *macaddr;
436 struct mii_softc *child;
437 bus_dma_segment_t seg;
438 int i, err, rseg;
439 bool mac_is_fake;
440
441 sc->sc_dev = self;
442 sc->sc_st = maa->maa_st;
443 if (bus_space_subregion(sc->sc_st, maa->maa_sh,
444 maa->maa_offset, 0, &sc->sc_sh) != 0) {
445 aprint_error(": can't map i/o space\n");
446 return;
447 }
448
449 /* set up DMA structures */
450 sc->sc_dmat = maa->maa_dmat;
451
452 /*
453 * Allocate the control data structures, and create and load the
454 * DMA map for it.
455 */
456 if ((err = bus_dmamem_alloc(sc->sc_dmat,
457 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
458 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
459 aprint_error(": unable to allocate control data, error = %d\n",
460 err);
461 goto fail_0;
462 }
463 /*
464 * XXX needs re-think...
465 * control data structures contain whole RX data buffer, so
466 * BUS_DMA_COHERENT (which disables cache) may cause some performance
467 * issue on copying data from the RX buffer to mbuf on normal memory,
468 * though we have to make sure all bus_dmamap_sync(9) ops are called
469 * properly in that case.
470 */
471 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
472 sizeof(struct mec_control_data),
473 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
474 aprint_error(": unable to map control data, error = %d\n", err);
475 goto fail_1;
476 }
477 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
478
479 if ((err = bus_dmamap_create(sc->sc_dmat,
480 sizeof(struct mec_control_data), 1,
481 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
482 aprint_error(": unable to create control data DMA map,"
483 " error = %d\n", err);
484 goto fail_2;
485 }
486 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
487 sc->sc_control_data, sizeof(struct mec_control_data), NULL,
488 BUS_DMA_NOWAIT)) != 0) {
489 aprint_error(": unable to load control data DMA map,"
490 " error = %d\n", err);
491 goto fail_3;
492 }
493
494 /* create TX buffer DMA maps */
495 for (i = 0; i < MEC_NTXDESC; i++) {
496 if ((err = bus_dmamap_create(sc->sc_dmat,
497 MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0,
498 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
499 aprint_error(": unable to create tx DMA map %d,"
500 " error = %d\n", i, err);
501 goto fail_4;
502 }
503 }
504
505 callout_init(&sc->sc_tick_ch, 0);
506
507 /* get Ethernet address from ARCBIOS */
508 if ((macaddr = arcbios_GetEnvironmentVariable("eaddr")) == NULL) {
509 aprint_error(": unable to get MAC address!\n");
510 goto fail_4;
511 }
512 /*
513 * On some machines the DS2502 chip storing the serial number/
514 * mac address is on the pci riser board - if this board is
515 * missing, ARCBIOS will not know a good ethernet address (but
516 * otherwise the machine will work fine).
517 */
518 mac_is_fake = false;
519 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
520 uint32_t ui = 0;
521 const char * netaddr =
522 arcbios_GetEnvironmentVariable("netaddr");
523
524 /*
525 * Create a MAC address by abusing the "netaddr" env var
526 */
527 sc->sc_enaddr[0] = 0xf2;
528 sc->sc_enaddr[1] = 0x0b;
529 sc->sc_enaddr[2] = 0xa4;
530 if (netaddr) {
531 mac_is_fake = true;
532 while (*netaddr) {
533 int v = 0;
534 while (*netaddr && *netaddr != '.') {
535 if (*netaddr >= '0' && *netaddr <= '9')
536 v = v*10 + (*netaddr - '0');
537 netaddr++;
538 }
539 ui <<= 8;
540 ui |= v;
541 if (*netaddr == '.')
542 netaddr++;
543 }
544 }
545 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
546 }
547 if (!mac_is_fake)
548 ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr);
549
550 /* set the Ethernet address */
551 address = 0;
552 for (i = 0; i < ETHER_ADDR_LEN; i++) {
553 address = address << 8;
554 address |= sc->sc_enaddr[i];
555 }
556 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
557
558 /* reset device */
559 mec_reset(sc);
560
561 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
562
563 aprint_normal(": MAC-110 Ethernet, rev %u\n",
564 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
565
566 if (mac_is_fake)
567 aprint_normal_dev(self,
568 "could not get ethernet address from firmware"
569 " - generated one from the \"netaddr\" environment"
570 " variable\n");
571 aprint_normal_dev(self, "Ethernet address %s\n",
572 ether_sprintf(sc->sc_enaddr));
573
574 /* Done, now attach everything */
575
576 mii->mii_ifp = ifp;
577 mii->mii_readreg = mec_mii_readreg;
578 mii->mii_writereg = mec_mii_writereg;
579 mii->mii_statchg = mec_statchg;
580
581 /* Set up PHY properties */
582 sc->sc_ethercom.ec_mii = mii;
583 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
584 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
585
586 child = LIST_FIRST(&mii->mii_phys);
587 if (child == NULL) {
588 /* No PHY attached */
589 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
590 0, NULL);
591 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
592 } else {
593 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
594 sc->sc_phyaddr = child->mii_phy;
595 }
596
597 strcpy(ifp->if_xname, device_xname(self));
598 ifp->if_softc = sc;
599 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
600 ifp->if_ioctl = mec_ioctl;
601 ifp->if_start = mec_start;
602 ifp->if_watchdog = mec_watchdog;
603 ifp->if_init = mec_init;
604 ifp->if_stop = mec_stop;
605 ifp->if_mtu = ETHERMTU;
606 IFQ_SET_READY(&ifp->if_snd);
607
608 /* mec has dumb RX cksum support */
609 ifp->if_capabilities = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx;
610
611 /* We can support 802.1Q VLAN-sized frames. */
612 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
613
614 /* attach the interface */
615 if_attach(ifp);
616 if_deferred_start_init(ifp, NULL);
617 ether_ifattach(ifp, sc->sc_enaddr);
618
619 /* establish interrupt */
620 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
621
622 rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
623 RND_TYPE_NET, RND_FLAG_DEFAULT);
624
625 #ifdef MEC_EVENT_COUNTERS
626 evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC,
627 NULL, device_xname(self), "TX pkts queued total");
628 evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC,
629 NULL, device_xname(self), "TX pkts padded in txdesc buf");
630 evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC,
631 NULL, device_xname(self), "TX pkts copied to txdesc buf");
632 evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC,
633 NULL, device_xname(self), "TX pkts using concat ptr1");
634 evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC,
635 NULL, device_xname(self), "TX pkts w/ptr1 ~160bytes");
636 evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC,
637 NULL, device_xname(self), "TX pkts w/ptr1 ~256bytes");
638 evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC,
639 NULL, device_xname(self), "TX pkts w/ptr1 ~512bytes");
640 evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC,
641 NULL, device_xname(self), "TX pkts w/ptr1 ~1024bytes");
642 evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC,
643 NULL, device_xname(self), "TX pkts w/ptr1 >1024bytes");
644 evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC,
645 NULL, device_xname(self), "TX pkts using concat ptr1,2");
646 evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC,
647 NULL, device_xname(self), "TX pkts w/ptr2 ~160bytes");
648 evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC,
649 NULL, device_xname(self), "TX pkts w/ptr2 ~256bytes");
650 evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC,
651 NULL, device_xname(self), "TX pkts w/ptr2 ~512bytes");
652 evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC,
653 NULL, device_xname(self), "TX pkts w/ptr2 ~1024bytes");
654 evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC,
655 NULL, device_xname(self), "TX pkts w/ptr2 >1024bytes");
656 evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC,
657 NULL, device_xname(self), "TX pkts using concat ptr1,2,3");
658 evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC,
659 NULL, device_xname(self), "TX pkts w/ptr3 ~160bytes");
660 evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC,
661 NULL, device_xname(self), "TX pkts w/ptr3 ~256bytes");
662 evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC,
663 NULL, device_xname(self), "TX pkts w/ptr3 ~512bytes");
664 evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC,
665 NULL, device_xname(self), "TX pkts w/ptr3 ~1024bytes");
666 evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC,
667 NULL, device_xname(self), "TX pkts w/ptr3 >1024bytes");
668 evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC,
669 NULL, device_xname(self), "TX pkts copied to new mbufs");
670 evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC,
671 NULL, device_xname(self), "TX pkts w/mbuf ~160bytes");
672 evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC,
673 NULL, device_xname(self), "TX pkts w/mbuf ~256bytes");
674 evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC,
675 NULL, device_xname(self), "TX pkts w/mbuf ~512bytes");
676 evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC,
677 NULL, device_xname(self), "TX pkts w/mbuf ~1024bytes");
678 evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC,
679 NULL, device_xname(self), "TX pkts w/mbuf >1024bytes");
680 evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC,
681 NULL, device_xname(self), "TX pkts using ptrs total");
682 evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC,
683 NULL, device_xname(self), "TX pkts w/ptrs no hdr chain");
684 evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC,
685 NULL, device_xname(self), "TX pkts w/ptrs 1 hdr chain");
686 evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC,
687 NULL, device_xname(self), "TX pkts w/ptrs 2 hdr chains");
688 evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC,
689 NULL, device_xname(self), "TX pkts w/ptrs 3 hdr chains");
690 evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC,
691 NULL, device_xname(self), "TX pkts w/ptrs 4 hdr chains");
692 evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC,
693 NULL, device_xname(self), "TX pkts w/ptrs 5 hdr chains");
694 evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC,
695 NULL, device_xname(self), "TX pkts w/ptrs >5 hdr chains");
696 evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC,
697 NULL, device_xname(self), "TX pkts w/ptrs ~8bytes hdr");
698 evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC,
699 NULL, device_xname(self), "TX pkts w/ptrs ~16bytes hdr");
700 evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC,
701 NULL, device_xname(self), "TX pkts w/ptrs ~32bytes hdr");
702 evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC,
703 NULL, device_xname(self), "TX pkts w/ptrs ~64bytes hdr");
704 evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC,
705 NULL, device_xname(self), "TX pkts w/ptrs ~80bytes hdr");
706 evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC,
707 NULL, device_xname(self), "TX pkts w/ptrs ~96bytes hdr");
708 evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC,
709 NULL, device_xname(self), "TX stalled due to no txdesc");
710 evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC,
711 NULL, device_xname(self), "TX empty interrupts");
712 evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC,
713 NULL, device_xname(self), "TX sent interrupts");
714 #endif
715
716 /* set shutdown hook to reset interface on powerdown */
717 if (pmf_device_register1(self, NULL, NULL, mec_shutdown))
718 pmf_class_network_register(self, ifp);
719 else
720 aprint_error_dev(self, "couldn't establish power handler\n");
721
722 return;
723
724 /*
725 * Free any resources we've allocated during the failed attach
726 * attempt. Do this in reverse order and fall though.
727 */
728 fail_4:
729 for (i = 0; i < MEC_NTXDESC; i++) {
730 if (sc->sc_txsoft[i].txs_dmamap != NULL)
731 bus_dmamap_destroy(sc->sc_dmat,
732 sc->sc_txsoft[i].txs_dmamap);
733 }
734 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
735 fail_3:
736 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
737 fail_2:
738 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
739 sizeof(struct mec_control_data));
740 fail_1:
741 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
742 fail_0:
743 return;
744 }
745
746 static int
747 mec_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
748 {
749 struct mec_softc *sc = device_private(self);
750 bus_space_tag_t st = sc->sc_st;
751 bus_space_handle_t sh = sc->sc_sh;
752 uint64_t data;
753 int i, rv;
754
755 if ((rv = mec_mii_wait(sc)) != 0)
756 return rv;
757
758 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
759 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
760 delay(25);
761 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
762 delay(25);
763 mec_mii_wait(sc);
764
765 for (i = 0; i < 20; i++) {
766 delay(30);
767
768 data = bus_space_read_8(st, sh, MEC_PHY_DATA);
769
770 if ((data & MEC_PHY_DATA_BUSY) == 0) {
771 *val = data & MEC_PHY_DATA_VALUE;
772 return 0;
773 }
774 }
775 return -1;
776 }
777
778 static int
779 mec_mii_writereg(device_t self, int phy, int reg, uint16_t val)
780 {
781 struct mec_softc *sc = device_private(self);
782 bus_space_tag_t st = sc->sc_st;
783 bus_space_handle_t sh = sc->sc_sh;
784 int rv;
785
786 if ((rv = mec_mii_wait(sc)) != 0) {
787 printf("timed out writing %x: %hx\n", reg, val);
788 return rv;
789 }
790
791 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
792 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
793
794 delay(60);
795
796 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
797
798 delay(60);
799
800 mec_mii_wait(sc);
801
802 return 0;
803 }
804
805 static int
806 mec_mii_wait(struct mec_softc *sc)
807 {
808 uint32_t busy;
809 int i, s;
810
811 for (i = 0; i < 100; i++) {
812 delay(30);
813
814 s = splhigh();
815 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
816 splx(s);
817
818 if ((busy & MEC_PHY_DATA_BUSY) == 0)
819 return 0;
820 #if 0
821 if (busy == 0xffff) /* XXX ? */
822 return 0;
823 #endif
824 }
825
826 printf("%s: MII timed out\n", device_xname(sc->sc_dev));
827 return ETIMEDOUT;
828 }
829
830 static void
831 mec_statchg(struct ifnet *ifp)
832 {
833 struct mec_softc *sc = ifp->if_softc;
834 bus_space_tag_t st = sc->sc_st;
835 bus_space_handle_t sh = sc->sc_sh;
836 uint32_t control;
837
838 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
839 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
840 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
841
842 /* must also set IPG here for duplex stuff ... */
843 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
844 control |= MEC_MAC_FULL_DUPLEX;
845 } else {
846 /* set IPG */
847 control |= MEC_MAC_IPG_DEFAULT;
848 }
849
850 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
851 }
852
853 static int
854 mec_init(struct ifnet *ifp)
855 {
856 struct mec_softc *sc = ifp->if_softc;
857 bus_space_tag_t st = sc->sc_st;
858 bus_space_handle_t sh = sc->sc_sh;
859 struct mec_rxdesc *rxd;
860 int i, rc;
861
862 /* cancel any pending I/O */
863 mec_stop(ifp, 0);
864
865 /* reset device */
866 mec_reset(sc);
867
868 /* setup filter for multicast or promisc mode */
869 mec_setfilter(sc);
870
871 /* set the TX ring pointer to the base address */
872 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
873
874 sc->sc_txpending = 0;
875 sc->sc_txdirty = 0;
876 sc->sc_txlast = MEC_NTXDESC - 1;
877
878 /* put RX buffers into FIFO */
879 for (i = 0; i < MEC_NRXDESC; i++) {
880 rxd = &sc->sc_rxdesc[i];
881 rxd->rxd_stat = 0;
882 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
883 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
884 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
885 }
886 sc->sc_rxptr = 0;
887
888 #if 0 /* XXX no info */
889 bus_space_write_8(st, sh, MEC_TIMER, 0);
890 #endif
891
892 /*
893 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
894 * spurious interrupts when TX buffers are empty
895 */
896 bus_space_write_8(st, sh, MEC_DMA_CONTROL,
897 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
898 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
899 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
900 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
901
902 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
903
904 if ((rc = ether_mediachange(ifp)) != 0)
905 return rc;
906
907 ifp->if_flags |= IFF_RUNNING;
908 ifp->if_flags &= ~IFF_OACTIVE;
909 mec_start(ifp);
910
911 return 0;
912 }
913
914 static void
915 mec_reset(struct mec_softc *sc)
916 {
917 bus_space_tag_t st = sc->sc_st;
918 bus_space_handle_t sh = sc->sc_sh;
919 uint64_t control;
920
921 /* stop DMA first */
922 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
923
924 /* reset chip */
925 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
926 delay(1000);
927 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
928 delay(1000);
929
930 /* Default to 100/half and let auto-negotiation work its magic */
931 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
932 MEC_MAC_IPG_DEFAULT;
933
934 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
935 /* stop DMA again for sanity */
936 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
937
938 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
939 bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
940 }
941
942 static void
943 mec_start(struct ifnet *ifp)
944 {
945 struct mec_softc *sc = ifp->if_softc;
946 struct mbuf *m0, *m;
947 struct mec_txdesc *txd;
948 struct mec_txsoft *txs;
949 bus_dmamap_t dmamap;
950 bus_space_tag_t st = sc->sc_st;
951 bus_space_handle_t sh = sc->sc_sh;
952 int error, firsttx, nexttx, opending;
953 int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i;
954 uint32_t txdcmd;
955
956 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
957 return;
958
959 /*
960 * Remember the previous txpending and the first transmit descriptor.
961 */
962 opending = sc->sc_txpending;
963 firsttx = MEC_NEXTTX(sc->sc_txlast);
964
965 DPRINTF(MEC_DEBUG_START,
966 ("%s: opending = %d, firsttx = %d\n", __func__, opending, firsttx));
967
968 while (sc->sc_txpending < MEC_NTXDESC - 1) {
969 /* Grab a packet off the queue. */
970 IFQ_POLL(&ifp->if_snd, m0);
971 if (m0 == NULL)
972 break;
973 m = NULL;
974
975 /*
976 * Get the next available transmit descriptor.
977 */
978 nexttx = MEC_NEXTTX(sc->sc_txlast);
979 txd = &sc->sc_txdesc[nexttx];
980 txs = &sc->sc_txsoft[nexttx];
981 dmamap = txs->txs_dmamap;
982 txs->txs_flags = 0;
983
984 buflen = 0;
985 bufoff = 0;
986 resid = 0;
987 nptr = 0; /* XXX gcc */
988 pseg = 0; /* XXX gcc */
989
990 len = m0->m_pkthdr.len;
991
992 DPRINTF(MEC_DEBUG_START,
993 ("%s: len = %d, nexttx = %d, txpending = %d\n",
994 __func__, len, nexttx, sc->sc_txpending));
995
996 if (len <= MEC_TXD_BUFSIZE) {
997 /*
998 * If a TX packet will fit into small txdesc buffer,
999 * just copy it into there. Maybe it's faster than
1000 * checking alignment and calling bus_dma(9) etc.
1001 */
1002 DPRINTF(MEC_DEBUG_START, ("%s: short packet\n",
1003 __func__));
1004 IFQ_DEQUEUE(&ifp->if_snd, m0);
1005
1006 /*
1007 * I don't know if MEC chip does auto padding,
1008 * but do it manually for safety.
1009 */
1010 if (len < ETHER_PAD_LEN) {
1011 MEC_EVCNT_INCR(&sc->sc_ev_txdpad);
1012 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
1013 m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1014 memset(txd->txd_buf + bufoff + len, 0,
1015 ETHER_PAD_LEN - len);
1016 len = buflen = ETHER_PAD_LEN;
1017 } else {
1018 MEC_EVCNT_INCR(&sc->sc_ev_txdbuf);
1019 bufoff = MEC_TXD_BUFSTART(len);
1020 m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1021 buflen = len;
1022 }
1023 } else {
1024 /*
1025 * If the packet won't fit the static buffer in txdesc,
1026 * we have to use the concatenate pointers to handle it.
1027 */
1028 DPRINTF(MEC_DEBUG_START, ("%s: long packet\n",
1029 __func__));
1030 txs->txs_flags = MEC_TXS_TXDPTR;
1031
1032 /*
1033 * Call bus_dmamap_load_mbuf(9) first to see
1034 * how many chains the TX mbuf has.
1035 */
1036 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1037 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1038 if (error == 0) {
1039 /*
1040 * Check chains which might contain headers.
1041 * They might be so much fragmented and
1042 * it's better to copy them into txdesc buffer
1043 * since they would be small enough.
1044 */
1045 nsegs = dmamap->dm_nsegs;
1046 for (pseg = 0; pseg < nsegs; pseg++) {
1047 slen = dmamap->dm_segs[pseg].ds_len;
1048 if (buflen + slen >
1049 MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN)
1050 break;
1051 buflen += slen;
1052 }
1053 /*
1054 * Check if the rest chains can be fit into
1055 * the concatinate pointers.
1056 */
1057 align = dmamap->dm_segs[pseg].ds_addr &
1058 MEC_TXD_ALIGNMASK;
1059 if (align > 0) {
1060 /*
1061 * If the first chain isn't uint64_t
1062 * aligned, append the unaligned part
1063 * into txdesc buffer too.
1064 */
1065 resid = MEC_TXD_ALIGN - align;
1066 buflen += resid;
1067 for (; pseg < nsegs; pseg++) {
1068 slen =
1069 dmamap->dm_segs[pseg].ds_len;
1070 if (slen > resid)
1071 break;
1072 resid -= slen;
1073 }
1074 } else if (pseg == 0) {
1075 /*
1076 * In this case, the first chain is
1077 * uint64_t aligned but it's too long
1078 * to put into txdesc buf.
1079 * We have to put some data into
1080 * txdesc buf even in this case,
1081 * so put MEC_TXD_ALIGN bytes there.
1082 */
1083 buflen = resid = MEC_TXD_ALIGN;
1084 }
1085 nptr = nsegs - pseg;
1086 if (nptr <= MEC_NTXPTR) {
1087 bufoff = MEC_TXD_BUFSTART(buflen);
1088
1089 /*
1090 * Check if all the rest chains are
1091 * uint64_t aligned.
1092 */
1093 align = 0;
1094 for (i = pseg + 1; i < nsegs; i++)
1095 align |=
1096 dmamap->dm_segs[i].ds_addr
1097 & MEC_TXD_ALIGNMASK;
1098 if (align != 0) {
1099 /* chains are not aligned */
1100 error = -1;
1101 }
1102 } else {
1103 /* The TX mbuf chains doesn't fit. */
1104 error = -1;
1105 }
1106 if (error == -1)
1107 bus_dmamap_unload(sc->sc_dmat, dmamap);
1108 }
1109 if (error != 0) {
1110 /*
1111 * The TX mbuf chains can't be put into
1112 * the concatinate buffers. In this case,
1113 * we have to allocate a new contiguous mbuf
1114 * and copy data into it.
1115 *
1116 * Even in this case, the Ethernet header in
1117 * the TX mbuf might be unaligned and trailing
1118 * data might be word aligned, so put 2 byte
1119 * (MEC_ETHER_ALIGN) padding at the top of the
1120 * allocated mbuf and copy TX packets.
1121 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN)
1122 * at the top of the new mbuf won't be uint64_t
1123 * alignd, but we have to put some data into
1124 * txdesc buffer anyway even if the buffer
1125 * is uint64_t aligned.
1126 */
1127 DPRINTF(MEC_DEBUG_START | MEC_DEBUG_TXSEGS,
1128 ("%s: re-allocating mbuf\n", __func__));
1129
1130 MGETHDR(m, M_DONTWAIT, MT_DATA);
1131 if (m == NULL) {
1132 printf("%s: unable to allocate "
1133 "TX mbuf\n",
1134 device_xname(sc->sc_dev));
1135 break;
1136 }
1137 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1138 MCLGET(m, M_DONTWAIT);
1139 if ((m->m_flags & M_EXT) == 0) {
1140 printf("%s: unable to allocate "
1141 "TX cluster\n",
1142 device_xname(sc->sc_dev));
1143 m_freem(m);
1144 break;
1145 }
1146 }
1147 m->m_data += MEC_ETHER_ALIGN;
1148
1149 /*
1150 * Copy whole data (including unaligned part)
1151 * for following bpf_mtap().
1152 */
1153 m_copydata(m0, 0, len, mtod(m, void *));
1154 m->m_pkthdr.len = m->m_len = len;
1155 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1156 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1157 if (dmamap->dm_nsegs > 1) {
1158 /* should not happen, but for sanity */
1159 bus_dmamap_unload(sc->sc_dmat, dmamap);
1160 error = -1;
1161 }
1162 if (error != 0) {
1163 printf("%s: unable to load TX buffer, "
1164 "error = %d\n",
1165 device_xname(sc->sc_dev), error);
1166 m_freem(m);
1167 break;
1168 }
1169 /*
1170 * Only the first segment should be put into
1171 * the concatinate pointer in this case.
1172 */
1173 pseg = 0;
1174 nptr = 1;
1175
1176 /*
1177 * Set length of unaligned part which will be
1178 * copied into txdesc buffer.
1179 */
1180 buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN;
1181 bufoff = MEC_TXD_BUFSTART(buflen);
1182 resid = buflen;
1183 #ifdef MEC_EVENT_COUNTERS
1184 MEC_EVCNT_INCR(&sc->sc_ev_txmbuf);
1185 if (len <= 160)
1186 MEC_EVCNT_INCR(&sc->sc_ev_txmbufa);
1187 else if (len <= 256)
1188 MEC_EVCNT_INCR(&sc->sc_ev_txmbufb);
1189 else if (len <= 512)
1190 MEC_EVCNT_INCR(&sc->sc_ev_txmbufc);
1191 else if (len <= 1024)
1192 MEC_EVCNT_INCR(&sc->sc_ev_txmbufd);
1193 else
1194 MEC_EVCNT_INCR(&sc->sc_ev_txmbufe);
1195 #endif
1196 }
1197 #ifdef MEC_EVENT_COUNTERS
1198 else {
1199 MEC_EVCNT_INCR(&sc->sc_ev_txptrs);
1200 if (nptr == 1) {
1201 MEC_EVCNT_INCR(&sc->sc_ev_txptr1);
1202 if (len <= 160)
1203 MEC_EVCNT_INCR(
1204 &sc->sc_ev_txptr1a);
1205 else if (len <= 256)
1206 MEC_EVCNT_INCR(
1207 &sc->sc_ev_txptr1b);
1208 else if (len <= 512)
1209 MEC_EVCNT_INCR(
1210 &sc->sc_ev_txptr1c);
1211 else if (len <= 1024)
1212 MEC_EVCNT_INCR(
1213 &sc->sc_ev_txptr1d);
1214 else
1215 MEC_EVCNT_INCR(
1216 &sc->sc_ev_txptr1e);
1217 } else if (nptr == 2) {
1218 MEC_EVCNT_INCR(&sc->sc_ev_txptr2);
1219 if (len <= 160)
1220 MEC_EVCNT_INCR(
1221 &sc->sc_ev_txptr2a);
1222 else if (len <= 256)
1223 MEC_EVCNT_INCR(
1224 &sc->sc_ev_txptr2b);
1225 else if (len <= 512)
1226 MEC_EVCNT_INCR(
1227 &sc->sc_ev_txptr2c);
1228 else if (len <= 1024)
1229 MEC_EVCNT_INCR(
1230 &sc->sc_ev_txptr2d);
1231 else
1232 MEC_EVCNT_INCR(
1233 &sc->sc_ev_txptr2e);
1234 } else if (nptr == 3) {
1235 MEC_EVCNT_INCR(&sc->sc_ev_txptr3);
1236 if (len <= 160)
1237 MEC_EVCNT_INCR(
1238 &sc->sc_ev_txptr3a);
1239 else if (len <= 256)
1240 MEC_EVCNT_INCR(
1241 &sc->sc_ev_txptr3b);
1242 else if (len <= 512)
1243 MEC_EVCNT_INCR(
1244 &sc->sc_ev_txptr3c);
1245 else if (len <= 1024)
1246 MEC_EVCNT_INCR(
1247 &sc->sc_ev_txptr3d);
1248 else
1249 MEC_EVCNT_INCR(
1250 &sc->sc_ev_txptr3e);
1251 }
1252 if (pseg == 0)
1253 MEC_EVCNT_INCR(&sc->sc_ev_txptrc0);
1254 else if (pseg == 1)
1255 MEC_EVCNT_INCR(&sc->sc_ev_txptrc1);
1256 else if (pseg == 2)
1257 MEC_EVCNT_INCR(&sc->sc_ev_txptrc2);
1258 else if (pseg == 3)
1259 MEC_EVCNT_INCR(&sc->sc_ev_txptrc3);
1260 else if (pseg == 4)
1261 MEC_EVCNT_INCR(&sc->sc_ev_txptrc4);
1262 else if (pseg == 5)
1263 MEC_EVCNT_INCR(&sc->sc_ev_txptrc5);
1264 else
1265 MEC_EVCNT_INCR(&sc->sc_ev_txptrc6);
1266 if (buflen <= 8)
1267 MEC_EVCNT_INCR(&sc->sc_ev_txptrh0);
1268 else if (buflen <= 16)
1269 MEC_EVCNT_INCR(&sc->sc_ev_txptrh1);
1270 else if (buflen <= 32)
1271 MEC_EVCNT_INCR(&sc->sc_ev_txptrh2);
1272 else if (buflen <= 64)
1273 MEC_EVCNT_INCR(&sc->sc_ev_txptrh3);
1274 else if (buflen <= 80)
1275 MEC_EVCNT_INCR(&sc->sc_ev_txptrh4);
1276 else
1277 MEC_EVCNT_INCR(&sc->sc_ev_txptrh5);
1278 }
1279 #endif
1280 m_copydata(m0, 0, buflen, txd->txd_buf + bufoff);
1281
1282 IFQ_DEQUEUE(&ifp->if_snd, m0);
1283 if (m != NULL) {
1284 m_freem(m0);
1285 m0 = m;
1286 }
1287
1288 /*
1289 * sync the DMA map for TX mbuf
1290 */
1291 bus_dmamap_sync(sc->sc_dmat, dmamap, buflen,
1292 len - buflen, BUS_DMASYNC_PREWRITE);
1293 }
1294
1295 /*
1296 * Pass packet to bpf if there is a listener.
1297 */
1298 bpf_mtap(ifp, m0, BPF_D_OUT);
1299 MEC_EVCNT_INCR(&sc->sc_ev_txpkts);
1300
1301 /*
1302 * setup the transmit descriptor.
1303 */
1304 txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1);
1305
1306 /*
1307 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1308 * if more than half txdescs have been queued
1309 * because TX_EMPTY interrupts will rarely happen
1310 * if TX queue is so stacked.
1311 */
1312 if (sc->sc_txpending > (MEC_NTXDESC / 2) &&
1313 (nexttx & (MEC_NTXDESC_INTR - 1)) == 0)
1314 txdcmd |= MEC_TXCMD_TXINT;
1315
1316 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1317 bus_dma_segment_t *segs = dmamap->dm_segs;
1318
1319 DPRINTF(MEC_DEBUG_TXSEGS,
1320 ("%s: nsegs = %d, pseg = %d, nptr = %d\n",
1321 __func__, dmamap->dm_nsegs, pseg, nptr));
1322
1323 switch (nptr) {
1324 case 3:
1325 KASSERT((segs[pseg + 2].ds_addr &
1326 MEC_TXD_ALIGNMASK) == 0);
1327 txdcmd |= MEC_TXCMD_PTR3;
1328 txd->txd_ptr[2] =
1329 TXPTR_LEN(segs[pseg + 2].ds_len - 1) |
1330 segs[pseg + 2].ds_addr;
1331 /* FALLTHROUGH */
1332 case 2:
1333 KASSERT((segs[pseg + 1].ds_addr &
1334 MEC_TXD_ALIGNMASK) == 0);
1335 txdcmd |= MEC_TXCMD_PTR2;
1336 txd->txd_ptr[1] =
1337 TXPTR_LEN(segs[pseg + 1].ds_len - 1) |
1338 segs[pseg + 1].ds_addr;
1339 /* FALLTHROUGH */
1340 case 1:
1341 txdcmd |= MEC_TXCMD_PTR1;
1342 txd->txd_ptr[0] =
1343 TXPTR_LEN(segs[pseg].ds_len - resid - 1) |
1344 (segs[pseg].ds_addr + resid);
1345 break;
1346 default:
1347 panic("%s: impossible nptr in %s",
1348 device_xname(sc->sc_dev), __func__);
1349 /* NOTREACHED */
1350 }
1351 /*
1352 * Store a pointer to the packet so we can
1353 * free it later.
1354 */
1355 txs->txs_mbuf = m0;
1356 } else {
1357 /*
1358 * In this case all data are copied to buffer in txdesc,
1359 * we can free TX mbuf here.
1360 */
1361 m_freem(m0);
1362 }
1363 txd->txd_cmd = txdcmd;
1364
1365 DPRINTF(MEC_DEBUG_START,
1366 ("%s: txd_cmd = 0x%016llx\n",
1367 __func__, txd->txd_cmd));
1368 DPRINTF(MEC_DEBUG_START,
1369 ("%s: txd_ptr[0] = 0x%016llx\n",
1370 __func__, txd->txd_ptr[0]));
1371 DPRINTF(MEC_DEBUG_START,
1372 ("%s: txd_ptr[1] = 0x%016llx\n",
1373 __func__, txd->txd_ptr[1]));
1374 DPRINTF(MEC_DEBUG_START,
1375 ("%s: txd_ptr[2] = 0x%016llx\n",
1376 __func__, txd->txd_ptr[2]));
1377 DPRINTF(MEC_DEBUG_START,
1378 ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1379 __func__, len, len, buflen, buflen));
1380
1381 /* sync TX descriptor */
1382 MEC_TXDESCSYNC(sc, nexttx,
1383 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1384
1385 /* start TX */
1386 bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx));
1387
1388 /* advance the TX pointer. */
1389 sc->sc_txpending++;
1390 sc->sc_txlast = nexttx;
1391 }
1392
1393 if (sc->sc_txpending == MEC_NTXDESC - 1) {
1394 /* No more slots; notify upper layer. */
1395 MEC_EVCNT_INCR(&sc->sc_ev_txdstall);
1396 ifp->if_flags |= IFF_OACTIVE;
1397 }
1398
1399 if (sc->sc_txpending != opending) {
1400 /*
1401 * If the transmitter was idle,
1402 * reset the txdirty pointer and re-enable TX interrupt.
1403 */
1404 if (opending == 0) {
1405 sc->sc_txdirty = firsttx;
1406 bus_space_write_8(st, sh, MEC_TX_ALIAS,
1407 MEC_TX_ALIAS_INT_ENABLE);
1408 }
1409
1410 /* Set a watchdog timer in case the chip flakes out. */
1411 ifp->if_timer = 5;
1412 }
1413 }
1414
1415 static void
1416 mec_stop(struct ifnet *ifp, int disable)
1417 {
1418 struct mec_softc *sc = ifp->if_softc;
1419 struct mec_txsoft *txs;
1420 int i;
1421
1422 DPRINTF(MEC_DEBUG_STOP, ("%s\n", __func__));
1423
1424 ifp->if_timer = 0;
1425 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1426
1427 callout_stop(&sc->sc_tick_ch);
1428 mii_down(&sc->sc_mii);
1429
1430 /* release any TX buffers */
1431 for (i = 0; i < MEC_NTXDESC; i++) {
1432 txs = &sc->sc_txsoft[i];
1433 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1434 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1435 m_freem(txs->txs_mbuf);
1436 txs->txs_mbuf = NULL;
1437 }
1438 }
1439 }
1440
1441 static int
1442 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1443 {
1444 int s, error;
1445
1446 s = splnet();
1447
1448 error = ether_ioctl(ifp, cmd, data);
1449 if (error == ENETRESET) {
1450 /*
1451 * Multicast list has changed; set the hardware filter
1452 * accordingly.
1453 */
1454 if (ifp->if_flags & IFF_RUNNING)
1455 error = mec_init(ifp);
1456 else
1457 error = 0;
1458 }
1459
1460 /* Try to get more packets going. */
1461 mec_start(ifp);
1462
1463 splx(s);
1464 return error;
1465 }
1466
1467 static void
1468 mec_watchdog(struct ifnet *ifp)
1469 {
1470 struct mec_softc *sc = ifp->if_softc;
1471
1472 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1473 ifp->if_oerrors++;
1474
1475 mec_init(ifp);
1476 }
1477
1478 static void
1479 mec_tick(void *arg)
1480 {
1481 struct mec_softc *sc = arg;
1482 int s;
1483
1484 s = splnet();
1485 mii_tick(&sc->sc_mii);
1486 splx(s);
1487
1488 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1489 }
1490
1491 static void
1492 mec_setfilter(struct mec_softc *sc)
1493 {
1494 struct ethercom *ec = &sc->sc_ethercom;
1495 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1496 struct ether_multi *enm;
1497 struct ether_multistep step;
1498 bus_space_tag_t st = sc->sc_st;
1499 bus_space_handle_t sh = sc->sc_sh;
1500 uint64_t mchash;
1501 uint32_t control, hash;
1502 int mcnt;
1503
1504 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1505 control &= ~MEC_MAC_FILTER_MASK;
1506
1507 if (ifp->if_flags & IFF_PROMISC) {
1508 control |= MEC_MAC_FILTER_PROMISC;
1509 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1510 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1511 return;
1512 }
1513
1514 mcnt = 0;
1515 mchash = 0;
1516 ETHER_LOCK(ec);
1517 ETHER_FIRST_MULTI(step, ec, enm);
1518 while (enm != NULL) {
1519 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1520 /* set allmulti for a range of multicast addresses */
1521 control |= MEC_MAC_FILTER_ALLMULTI;
1522 bus_space_write_8(st, sh, MEC_MULTICAST,
1523 0xffffffffffffffffULL);
1524 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1525 ETHER_UNLOCK(ec);
1526 return;
1527 }
1528
1529 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1530
1531 hash = mec_calchash(enm->enm_addrlo);
1532 mchash |= 1 << hash;
1533 mcnt++;
1534 ETHER_NEXT_MULTI(step, enm);
1535 }
1536 ETHER_UNLOCK(ec);
1537
1538 ifp->if_flags &= ~IFF_ALLMULTI;
1539
1540 if (mcnt > 0)
1541 control |= MEC_MAC_FILTER_MATCHMULTI;
1542
1543 bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1544 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1545 }
1546
1547 static int
1548 mec_intr(void *arg)
1549 {
1550 struct mec_softc *sc = arg;
1551 bus_space_tag_t st = sc->sc_st;
1552 bus_space_handle_t sh = sc->sc_sh;
1553 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1554 uint32_t statreg, statack, txptr;
1555 int handled, sent;
1556
1557 DPRINTF(MEC_DEBUG_INTR, ("%s: called\n", __func__));
1558
1559 handled = sent = 0;
1560
1561 for (;;) {
1562 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1563
1564 DPRINTF(MEC_DEBUG_INTR,
1565 ("%s: INT_STAT = 0x%08x\n", __func__, statreg));
1566
1567 statack = statreg & MEC_INT_STATUS_MASK;
1568 if (statack == 0)
1569 break;
1570 bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1571
1572 handled = 1;
1573
1574 if (statack &
1575 (MEC_INT_RX_THRESHOLD |
1576 MEC_INT_RX_FIFO_UNDERFLOW)) {
1577 mec_rxintr(sc);
1578 }
1579
1580 if (statack &
1581 (MEC_INT_TX_EMPTY |
1582 MEC_INT_TX_PACKET_SENT |
1583 MEC_INT_TX_ABORT)) {
1584 txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1585 >> MEC_INT_TX_RING_BUFFER_SHIFT;
1586 mec_txintr(sc, txptr);
1587 sent = 1;
1588 if ((statack & MEC_INT_TX_EMPTY) != 0) {
1589 /*
1590 * disable TX interrupt to stop
1591 * TX empty interrupt
1592 */
1593 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1594 DPRINTF(MEC_DEBUG_INTR,
1595 ("%s: disable TX_INT\n", __func__));
1596 }
1597 #ifdef MEC_EVENT_COUNTERS
1598 if ((statack & MEC_INT_TX_EMPTY) != 0)
1599 MEC_EVCNT_INCR(&sc->sc_ev_txempty);
1600 if ((statack & MEC_INT_TX_PACKET_SENT) != 0)
1601 MEC_EVCNT_INCR(&sc->sc_ev_txsent);
1602 #endif
1603 }
1604
1605 if (statack &
1606 (MEC_INT_TX_LINK_FAIL |
1607 MEC_INT_TX_MEM_ERROR |
1608 MEC_INT_TX_ABORT |
1609 MEC_INT_RX_DMA_UNDERFLOW)) {
1610 printf("%s: %s: interrupt status = 0x%08x\n",
1611 device_xname(sc->sc_dev), __func__, statreg);
1612 mec_init(ifp);
1613 break;
1614 }
1615 }
1616
1617 if (sent) {
1618 /* try to get more packets going */
1619 if_schedule_deferred_start(ifp);
1620 }
1621
1622 if (handled)
1623 rnd_add_uint32(&sc->sc_rnd_source, statreg);
1624
1625 return handled;
1626 }
1627
1628 static void
1629 mec_rxintr(struct mec_softc *sc)
1630 {
1631 bus_space_tag_t st = sc->sc_st;
1632 bus_space_handle_t sh = sc->sc_sh;
1633 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1634 struct mbuf *m;
1635 struct mec_rxdesc *rxd;
1636 uint64_t rxstat;
1637 u_int len;
1638 int i;
1639 uint32_t crc;
1640
1641 DPRINTF(MEC_DEBUG_RXINTR, ("%s: called\n", __func__));
1642
1643 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1644 rxd = &sc->sc_rxdesc[i];
1645
1646 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1647 rxstat = rxd->rxd_stat;
1648
1649 DPRINTF(MEC_DEBUG_RXINTR,
1650 ("%s: rxstat = 0x%016llx, rxptr = %d\n",
1651 __func__, rxstat, i));
1652 DPRINTF(MEC_DEBUG_RXINTR, ("%s: rxfifo = 0x%08x\n",
1653 __func__, (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1654
1655 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1656 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1657 break;
1658 }
1659
1660 len = rxstat & MEC_RXSTAT_LEN;
1661
1662 if (len < ETHER_MIN_LEN ||
1663 len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1664 /* invalid length packet; drop it. */
1665 DPRINTF(MEC_DEBUG_RXINTR,
1666 ("%s: wrong packet\n", __func__));
1667 dropit:
1668 ifp->if_ierrors++;
1669 rxd->rxd_stat = 0;
1670 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1671 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1672 MEC_CDRXADDR(sc, i));
1673 continue;
1674 }
1675
1676 /*
1677 * If 802.1Q VLAN MTU is enabled, ignore the bad packet error.
1678 */
1679 if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0)
1680 rxstat &= ~MEC_RXSTAT_BADPACKET;
1681
1682 if (rxstat &
1683 (MEC_RXSTAT_BADPACKET |
1684 MEC_RXSTAT_LONGEVENT |
1685 MEC_RXSTAT_INVALID |
1686 MEC_RXSTAT_CRCERROR |
1687 MEC_RXSTAT_VIOLATION)) {
1688 printf("%s: mec_rxintr: status = 0x%016"PRIx64"\n",
1689 device_xname(sc->sc_dev), rxstat);
1690 goto dropit;
1691 }
1692
1693 /*
1694 * The MEC includes the CRC with every packet. Trim
1695 * it off here.
1696 */
1697 len -= ETHER_CRC_LEN;
1698
1699 /*
1700 * now allocate an mbuf (and possibly a cluster) to hold
1701 * the received packet.
1702 */
1703 MGETHDR(m, M_DONTWAIT, MT_DATA);
1704 if (m == NULL) {
1705 printf("%s: unable to allocate RX mbuf\n",
1706 device_xname(sc->sc_dev));
1707 goto dropit;
1708 }
1709 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1710 MCLGET(m, M_DONTWAIT);
1711 if ((m->m_flags & M_EXT) == 0) {
1712 printf("%s: unable to allocate RX cluster\n",
1713 device_xname(sc->sc_dev));
1714 m_freem(m);
1715 m = NULL;
1716 goto dropit;
1717 }
1718 }
1719
1720 /*
1721 * Note MEC chip seems to insert 2 byte padding at the top of
1722 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1723 */
1724 MEC_RXBUFSYNC(sc, i, len + ETHER_CRC_LEN, BUS_DMASYNC_POSTREAD);
1725 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1726 crc = be32dec(rxd->rxd_buf + MEC_ETHER_ALIGN + len);
1727 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1728 m->m_data += MEC_ETHER_ALIGN;
1729
1730 /* put RX buffer into FIFO again */
1731 rxd->rxd_stat = 0;
1732 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1733 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1734
1735 m_set_rcvif(m, ifp);
1736 m->m_pkthdr.len = m->m_len = len;
1737 if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0)
1738 mec_rxcsum(sc, m, RXSTAT_CKSUM(rxstat), crc);
1739
1740 /* Pass it on. */
1741 if_percpuq_enqueue(ifp->if_percpuq, m);
1742 }
1743
1744 /* update RX pointer */
1745 sc->sc_rxptr = i;
1746 }
1747
1748 static void
1749 mec_rxcsum(struct mec_softc *sc, struct mbuf *m, uint16_t rxcsum, uint32_t crc)
1750 {
1751 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1752 struct ether_header *eh;
1753 struct ip *ip;
1754 struct udphdr *uh;
1755 u_int len, pktlen, hlen;
1756 uint32_t csum_data, dsum;
1757 int csum_flags;
1758 const uint16_t *dp;
1759
1760 csum_data = 0;
1761 csum_flags = 0;
1762
1763 len = m->m_len;
1764 if (len < ETHER_HDR_LEN + sizeof(struct ip))
1765 goto out;
1766 pktlen = len - ETHER_HDR_LEN;
1767 eh = mtod(m, struct ether_header *);
1768 if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1769 goto out;
1770 ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN);
1771 if (ip->ip_v != IPVERSION)
1772 goto out;
1773
1774 hlen = ip->ip_hl << 2;
1775 if (hlen < sizeof(struct ip))
1776 goto out;
1777
1778 /*
1779 * Bail if too short, has random trailing garbage, truncated,
1780 * fragment, or has ethernet pad.
1781 */
1782 if (ntohs(ip->ip_len) < hlen ||
1783 ntohs(ip->ip_len) != pktlen ||
1784 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0)
1785 goto out;
1786
1787 switch (ip->ip_p) {
1788 case IPPROTO_TCP:
1789 if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 ||
1790 pktlen < (hlen + sizeof(struct tcphdr)))
1791 goto out;
1792 csum_flags = M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1793 break;
1794 case IPPROTO_UDP:
1795 if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 ||
1796 pktlen < (hlen + sizeof(struct udphdr)))
1797 goto out;
1798 uh = (struct udphdr *)((uint8_t *)ip + hlen);
1799 if (uh->uh_sum == 0)
1800 goto out; /* no checksum */
1801 csum_flags = M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1802 break;
1803 default:
1804 goto out;
1805 }
1806
1807 /*
1808 * The computed checksum includes Ethernet header, IP headers,
1809 * and CRC, so we have to deduct them.
1810 * Note IP header cksum should be 0xffff so we don't have to
1811 * dedecut them.
1812 */
1813 dsum = 0;
1814
1815 /* deduct Ethernet header */
1816 dp = (const uint16_t *)eh;
1817 for (hlen = 0; hlen < (ETHER_HDR_LEN / sizeof(uint16_t)); hlen++)
1818 dsum += ntohs(*dp++);
1819
1820 /* deduct CRC */
1821 if (len & 1) {
1822 dsum += (crc >> 24) & 0x00ff;
1823 dsum += (crc >> 8) & 0xffff;
1824 dsum += (crc << 8) & 0xff00;
1825 } else {
1826 dsum += (crc >> 16) & 0xffff;
1827 dsum += (crc >> 0) & 0xffff;
1828 }
1829 while (dsum >> 16)
1830 dsum = (dsum >> 16) + (dsum & 0xffff);
1831
1832 csum_data = rxcsum;
1833 csum_data += (uint16_t)~dsum;
1834
1835 while (csum_data >> 16)
1836 csum_data = (csum_data >> 16) + (csum_data & 0xffff);
1837
1838 out:
1839 m->m_pkthdr.csum_flags = csum_flags;
1840 m->m_pkthdr.csum_data = csum_data;
1841 }
1842
1843 static void
1844 mec_txintr(struct mec_softc *sc, uint32_t txptr)
1845 {
1846 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1847 struct mec_txdesc *txd;
1848 struct mec_txsoft *txs;
1849 bus_dmamap_t dmamap;
1850 uint64_t txstat;
1851 int i;
1852 u_int col;
1853
1854 DPRINTF(MEC_DEBUG_TXINTR, ("%s: called\n", __func__));
1855
1856 for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1857 i = MEC_NEXTTX(i), sc->sc_txpending--) {
1858 txd = &sc->sc_txdesc[i];
1859
1860 MEC_TXCMDSYNC(sc, i,
1861 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1862
1863 txstat = txd->txd_stat;
1864 DPRINTF(MEC_DEBUG_TXINTR,
1865 ("%s: dirty = %d, txstat = 0x%016llx\n",
1866 __func__, i, txstat));
1867 if ((txstat & MEC_TXSTAT_SENT) == 0) {
1868 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1869 break;
1870 }
1871
1872 txs = &sc->sc_txsoft[i];
1873 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1874 dmamap = txs->txs_dmamap;
1875 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1876 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1877 bus_dmamap_unload(sc->sc_dmat, dmamap);
1878 m_freem(txs->txs_mbuf);
1879 txs->txs_mbuf = NULL;
1880 }
1881
1882 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1883 ifp->if_collisions += col;
1884
1885 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1886 printf("%s: TX error: txstat = 0x%016"PRIx64"\n",
1887 device_xname(sc->sc_dev), txstat);
1888 ifp->if_oerrors++;
1889 } else
1890 ifp->if_opackets++;
1891 }
1892
1893 /* update the dirty TX buffer pointer */
1894 sc->sc_txdirty = i;
1895 DPRINTF(MEC_DEBUG_INTR,
1896 ("%s: sc_txdirty = %2d, sc_txpending = %2d\n",
1897 __func__, sc->sc_txdirty, sc->sc_txpending));
1898
1899 /* cancel the watchdog timer if there are no pending TX packets */
1900 if (sc->sc_txpending == 0)
1901 ifp->if_timer = 0;
1902 if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD)
1903 ifp->if_flags &= ~IFF_OACTIVE;
1904 }
1905
1906 static bool
1907 mec_shutdown(device_t self, int howto)
1908 {
1909 struct mec_softc *sc = device_private(self);
1910
1911 mec_stop(&sc->sc_ethercom.ec_if, 1);
1912 /* make sure to stop DMA etc. */
1913 mec_reset(sc);
1914
1915 return true;
1916 }
1917