if_mec.c revision 1.19 1 /* $NetBSD: if_mec.c,v 1.19 2008/04/29 15:50:39 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 2004 Izumi Tsutsui.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * Copyright (c) 2003 Christopher SEKIYA
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed for the
45 * NetBSD Project. See http://www.NetBSD.org/ for
46 * information about NetBSD.
47 * 4. The name of the author may not be used to endorse or promote products
48 * derived from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * MACE MAC-110 Ethernet driver
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.19 2008/04/29 15:50:39 tsutsui Exp $");
68
69 #include "opt_ddb.h"
70 #include "bpfilter.h"
71 #include "rnd.h"
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/device.h>
76 #include <sys/callout.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/socket.h>
81 #include <sys/ioctl.h>
82 #include <sys/errno.h>
83
84 #if NRND > 0
85 #include <sys/rnd.h>
86 #endif
87
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 #include <net/if_ether.h>
92
93 #if NBPFILTER > 0
94 #include <net/bpf.h>
95 #endif
96
97 #include <machine/bus.h>
98 #include <machine/intr.h>
99 #include <machine/machtype.h>
100
101 #include <dev/mii/mii.h>
102 #include <dev/mii/miivar.h>
103
104 #include <sgimips/mace/macevar.h>
105 #include <sgimips/mace/if_mecreg.h>
106
107 #include <dev/arcbios/arcbios.h>
108 #include <dev/arcbios/arcbiosvar.h>
109
110 /* #define MEC_DEBUG */
111
112 #ifdef MEC_DEBUG
113 #define MEC_DEBUG_RESET 0x01
114 #define MEC_DEBUG_START 0x02
115 #define MEC_DEBUG_STOP 0x04
116 #define MEC_DEBUG_INTR 0x08
117 #define MEC_DEBUG_RXINTR 0x10
118 #define MEC_DEBUG_TXINTR 0x20
119 uint32_t mec_debug = 0;
120 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
121 #else
122 #define DPRINTF(x, y) /* nothing */
123 #endif
124
125 /*
126 * Transmit descriptor list size
127 */
128 #define MEC_NTXDESC 64
129 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
130 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
131
132 /*
133 * software state for TX
134 */
135 struct mec_txsoft {
136 struct mbuf *txs_mbuf; /* head of our mbuf chain */
137 bus_dmamap_t txs_dmamap; /* our DMA map */
138 uint32_t txs_flags;
139 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
140 #define MEC_TXS_TXDBUF 0x00000080 /* txd_buf is used */
141 #define MEC_TXS_TXDPTR1 0x00000100 /* txd_ptr[0] is used */
142 };
143
144 /*
145 * Transmit buffer descriptor
146 */
147 #define MEC_TXDESCSIZE 128
148 #define MEC_NTXPTR 3
149 #define MEC_TXD_BUFOFFSET \
150 (sizeof(uint64_t) + MEC_NTXPTR * sizeof(uint64_t))
151 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
152 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
153 #define MEC_TXD_ALIGN 8
154 #define MEC_TXD_ROUNDUP(addr) \
155 (((addr) + (MEC_TXD_ALIGN - 1)) & ~((uint64_t)MEC_TXD_ALIGN - 1))
156
157 struct mec_txdesc {
158 volatile uint64_t txd_cmd;
159 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
160 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
161 #define TXCMD_BUFSTART(x) ((x) << 16)
162 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
163 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
164 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
165 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
166 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
167 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
168
169 #define txd_stat txd_cmd
170 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
171 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
172 #define MEC_TXSTAT_COLCNT_SHIFT 16
173 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
174 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
175 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
176 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
177 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
178 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
179 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
180 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
181 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
182 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
183 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
184
185 uint64_t txd_ptr[MEC_NTXPTR];
186 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
187 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
188 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
189 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
190 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
191
192 uint8_t txd_buf[MEC_TXD_BUFSIZE];
193 };
194
195 /*
196 * Receive buffer size
197 */
198 #define MEC_NRXDESC 16
199 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
200 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
201
202 /*
203 * Receive buffer description
204 */
205 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
206 #define MEC_RXD_NRXPAD 3
207 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
208 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
209 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
210
211 struct mec_rxdesc {
212 volatile uint64_t rxd_stat;
213 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
214 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
215 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
216 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
217 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
218 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
219 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
220 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
221 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
222 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
223 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
224 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
225 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
226 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
227 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
228 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
229 uint64_t rxd_pad1[MEC_RXD_NRXPAD];
230 uint8_t rxd_buf[MEC_RXD_BUFSIZE];
231 };
232
233 /*
234 * control structures for DMA ops
235 */
236 struct mec_control_data {
237 /*
238 * TX descriptors and buffers
239 */
240 struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
241
242 /*
243 * RX descriptors and buffers
244 */
245 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
246 };
247
248 /*
249 * It _seems_ there are some restrictions on descriptor address:
250 *
251 * - Base address of txdescs should be 8kbyte aligned
252 * - Each txdesc should be 128byte aligned
253 * - Each rxdesc should be 4kbyte aligned
254 *
255 * So we should specify 8k align to allocalte txdescs.
256 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
257 * so rxdescs are also allocated at 4kbyte aligned.
258 */
259 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
260
261 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
262 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
263 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
264
265 /*
266 * software state per device
267 */
268 struct mec_softc {
269 device_t sc_dev; /* generic device structures */
270
271 bus_space_tag_t sc_st; /* bus_space tag */
272 bus_space_handle_t sc_sh; /* bus_space handle */
273 bus_dma_tag_t sc_dmat; /* bus_dma tag */
274 void *sc_sdhook; /* shutdown hook */
275
276 struct ethercom sc_ethercom; /* Ethernet common part */
277
278 struct mii_data sc_mii; /* MII/media information */
279 int sc_phyaddr; /* MII address */
280 struct callout sc_tick_ch; /* tick callout */
281
282 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
283
284 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
285 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
286
287 /* pointer to allocated control data */
288 struct mec_control_data *sc_control_data;
289 #define sc_txdesc sc_control_data->mcd_txdesc
290 #define sc_rxdesc sc_control_data->mcd_rxdesc
291
292 /* software state for TX descs */
293 struct mec_txsoft sc_txsoft[MEC_NTXDESC];
294
295 int sc_txpending; /* number of TX requests pending */
296 int sc_txdirty; /* first dirty TX descriptor */
297 int sc_txlast; /* last used TX descriptor */
298
299 int sc_rxptr; /* next ready RX buffer */
300
301 #if NRND > 0
302 rndsource_element_t sc_rnd_source; /* random source */
303 #endif
304 };
305
306 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
307 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
308
309 #define MEC_TXDESCSYNC(sc, x, ops) \
310 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
311 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
312 #define MEC_TXCMDSYNC(sc, x, ops) \
313 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
314 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
315
316 #define MEC_RXSTATSYNC(sc, x, ops) \
317 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
318 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
319 #define MEC_RXBUFSYNC(sc, x, len, ops) \
320 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
321 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
322 MEC_ETHER_ALIGN + (len), (ops))
323
324 /* XXX these values should be moved to <net/if_ether.h> ? */
325 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
326 #define MEC_ETHER_ALIGN 2
327
328 static int mec_match(device_t, cfdata_t, void *);
329 static void mec_attach(device_t, device_t, void *);
330
331 static int mec_mii_readreg(device_t, int, int);
332 static void mec_mii_writereg(device_t, int, int, int);
333 static int mec_mii_wait(struct mec_softc *);
334 static void mec_statchg(device_t);
335
336 static void enaddr_aton(const char *, uint8_t *);
337
338 static int mec_init(struct ifnet * ifp);
339 static void mec_start(struct ifnet *);
340 static void mec_watchdog(struct ifnet *);
341 static void mec_tick(void *);
342 static int mec_ioctl(struct ifnet *, u_long, void *);
343 static void mec_reset(struct mec_softc *);
344 static void mec_setfilter(struct mec_softc *);
345 static int mec_intr(void *arg);
346 static void mec_stop(struct ifnet *, int);
347 static void mec_rxintr(struct mec_softc *);
348 static void mec_txintr(struct mec_softc *);
349 static void mec_shutdown(void *);
350
351 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
352 mec_match, mec_attach, NULL, NULL);
353
354 static int mec_matched = 0;
355
356 static int
357 mec_match(device_t parent, cfdata_t cf, void *aux)
358 {
359
360 /* allow only one device */
361 if (mec_matched)
362 return 0;
363
364 mec_matched = 1;
365 return 1;
366 }
367
368 static void
369 mec_attach(device_t parent, device_t self, void *aux)
370 {
371 struct mec_softc *sc = device_private(self);
372 struct mace_attach_args *maa = aux;
373 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
374 uint64_t address, command;
375 const char *macaddr;
376 struct mii_softc *child;
377 bus_dma_segment_t seg;
378 int i, err, rseg;
379 bool mac_is_fake;
380
381 sc->sc_dev = self;
382 sc->sc_st = maa->maa_st;
383 if (bus_space_subregion(sc->sc_st, maa->maa_sh,
384 maa->maa_offset, 0, &sc->sc_sh) != 0) {
385 aprint_error(": can't map i/o space\n");
386 return;
387 }
388
389 /* set up DMA structures */
390 sc->sc_dmat = maa->maa_dmat;
391
392 /*
393 * Allocate the control data structures, and create and load the
394 * DMA map for it.
395 */
396 if ((err = bus_dmamem_alloc(sc->sc_dmat,
397 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
398 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
399 aprint_error(": unable to allocate control data, error = %d\n",
400 err);
401 goto fail_0;
402 }
403 /*
404 * XXX needs re-think...
405 * control data structures contain whole RX data buffer, so
406 * BUS_DMA_COHERENT (which disables cache) may cause some performance
407 * issue on copying data from the RX buffer to mbuf on normal memory,
408 * though we have to make sure all bus_dmamap_sync(9) ops are called
409 * properly in that case.
410 */
411 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
412 sizeof(struct mec_control_data),
413 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
414 aprint_error(": unable to map control data, error = %d\n", err);
415 goto fail_1;
416 }
417 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
418
419 if ((err = bus_dmamap_create(sc->sc_dmat,
420 sizeof(struct mec_control_data), 1,
421 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
422 aprint_error(": unable to create control data DMA map,"
423 " error = %d\n", err);
424 goto fail_2;
425 }
426 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
427 sc->sc_control_data, sizeof(struct mec_control_data), NULL,
428 BUS_DMA_NOWAIT)) != 0) {
429 aprint_error(": unable to load control data DMA map,"
430 " error = %d\n", err);
431 goto fail_3;
432 }
433
434 /* create TX buffer DMA maps */
435 for (i = 0; i < MEC_NTXDESC; i++) {
436 if ((err = bus_dmamap_create(sc->sc_dmat,
437 MCLBYTES, 1, MCLBYTES, 0, 0,
438 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
439 aprint_error(": unable to create tx DMA map %d,"
440 " error = %d\n", i, err);
441 goto fail_4;
442 }
443 }
444
445 callout_init(&sc->sc_tick_ch, 0);
446
447 /* get Ethernet address from ARCBIOS */
448 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
449 aprint_error(": unable to get MAC address!\n");
450 goto fail_4;
451 }
452 /*
453 * On some machines the DS2502 chip storing the serial number/
454 * mac address is on the pci riser board - if this board is
455 * missing, ARCBIOS will not know a good ethernet address (but
456 * otherwise the machine will work fine).
457 */
458 mac_is_fake = false;
459 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
460 uint32_t ui = 0;
461 const char * netaddr =
462 ARCBIOS->GetEnvironmentVariable("netaddr");
463
464 /*
465 * Create a MAC address by abusing the "netaddr" env var
466 */
467 sc->sc_enaddr[0] = 0xf2;
468 sc->sc_enaddr[1] = 0x0b;
469 sc->sc_enaddr[2] = 0xa4;
470 if (netaddr) {
471 mac_is_fake = true;
472 while (*netaddr) {
473 int v = 0;
474 while (*netaddr && *netaddr != '.') {
475 if (*netaddr >= '0' && *netaddr <= '9')
476 v = v*10 + (*netaddr - '0');
477 netaddr++;
478 }
479 ui <<= 8;
480 ui |= v;
481 if (*netaddr == '.')
482 netaddr++;
483 }
484 }
485 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
486 }
487 if (!mac_is_fake)
488 enaddr_aton(macaddr, sc->sc_enaddr);
489
490 /* set the Ethernet address */
491 address = 0;
492 for (i = 0; i < ETHER_ADDR_LEN; i++) {
493 address = address << 8;
494 address |= sc->sc_enaddr[i];
495 }
496 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
497
498 /* reset device */
499 mec_reset(sc);
500
501 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
502
503 aprint_normal(": MAC-110 Ethernet, rev %u\n",
504 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
505
506 if (mac_is_fake)
507 aprint_normal_dev(self,
508 "could not get ethernet address from firmware"
509 " - generated one from the \"netaddr\" environment"
510 " variable\n");
511 aprint_normal_dev(self, "Ethernet address %s\n",
512 ether_sprintf(sc->sc_enaddr));
513
514 /* Done, now attach everything */
515
516 sc->sc_mii.mii_ifp = ifp;
517 sc->sc_mii.mii_readreg = mec_mii_readreg;
518 sc->sc_mii.mii_writereg = mec_mii_writereg;
519 sc->sc_mii.mii_statchg = mec_statchg;
520
521 /* Set up PHY properties */
522 sc->sc_ethercom.ec_mii = &sc->sc_mii;
523 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
524 ether_mediastatus);
525 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
526 MII_OFFSET_ANY, 0);
527
528 child = LIST_FIRST(&sc->sc_mii.mii_phys);
529 if (child == NULL) {
530 /* No PHY attached */
531 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
532 0, NULL);
533 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
534 } else {
535 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
536 sc->sc_phyaddr = child->mii_phy;
537 }
538
539 strcpy(ifp->if_xname, device_xname(self));
540 ifp->if_softc = sc;
541 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
542 ifp->if_ioctl = mec_ioctl;
543 ifp->if_start = mec_start;
544 ifp->if_watchdog = mec_watchdog;
545 ifp->if_init = mec_init;
546 ifp->if_stop = mec_stop;
547 ifp->if_mtu = ETHERMTU;
548 IFQ_SET_READY(&ifp->if_snd);
549
550 if_attach(ifp);
551 ether_ifattach(ifp, sc->sc_enaddr);
552
553 /* establish interrupt */
554 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
555
556 #if NRND > 0
557 rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
558 RND_TYPE_NET, 0);
559 #endif
560
561 /* set shutdown hook to reset interface on powerdown */
562 sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);
563
564 return;
565
566 /*
567 * Free any resources we've allocated during the failed attach
568 * attempt. Do this in reverse order and fall though.
569 */
570 fail_4:
571 for (i = 0; i < MEC_NTXDESC; i++) {
572 if (sc->sc_txsoft[i].txs_dmamap != NULL)
573 bus_dmamap_destroy(sc->sc_dmat,
574 sc->sc_txsoft[i].txs_dmamap);
575 }
576 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
577 fail_3:
578 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
579 fail_2:
580 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
581 sizeof(struct mec_control_data));
582 fail_1:
583 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
584 fail_0:
585 return;
586 }
587
588 static int
589 mec_mii_readreg(device_t self, int phy, int reg)
590 {
591 struct mec_softc *sc = device_private(self);
592 bus_space_tag_t st = sc->sc_st;
593 bus_space_handle_t sh = sc->sc_sh;
594 uint64_t val;
595 int i;
596
597 if (mec_mii_wait(sc) != 0)
598 return 0;
599
600 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
601 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
602 delay(25);
603 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
604 delay(25);
605 mec_mii_wait(sc);
606
607 for (i = 0; i < 20; i++) {
608 delay(30);
609
610 val = bus_space_read_8(st, sh, MEC_PHY_DATA);
611
612 if ((val & MEC_PHY_DATA_BUSY) == 0)
613 return val & MEC_PHY_DATA_VALUE;
614 }
615 return 0;
616 }
617
618 static void
619 mec_mii_writereg(device_t self, int phy, int reg, int val)
620 {
621 struct mec_softc *sc = device_private(self);
622 bus_space_tag_t st = sc->sc_st;
623 bus_space_handle_t sh = sc->sc_sh;
624
625 if (mec_mii_wait(sc) != 0) {
626 printf("timed out writing %x: %x\n", reg, val);
627 return;
628 }
629
630 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
631 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
632
633 delay(60);
634
635 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
636
637 delay(60);
638
639 mec_mii_wait(sc);
640 }
641
642 static int
643 mec_mii_wait(struct mec_softc *sc)
644 {
645 uint32_t busy;
646 int i, s;
647
648 for (i = 0; i < 100; i++) {
649 delay(30);
650
651 s = splhigh();
652 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
653 splx(s);
654
655 if ((busy & MEC_PHY_DATA_BUSY) == 0)
656 return 0;
657 #if 0
658 if (busy == 0xffff) /* XXX ? */
659 return 0;
660 #endif
661 }
662
663 printf("%s: MII timed out\n", device_xname(sc->sc_dev));
664 return 1;
665 }
666
667 static void
668 mec_statchg(device_t self)
669 {
670 struct mec_softc *sc = device_private(self);
671 bus_space_tag_t st = sc->sc_st;
672 bus_space_handle_t sh = sc->sc_sh;
673 uint32_t control;
674
675 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
676 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
677 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
678
679 /* must also set IPG here for duplex stuff ... */
680 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
681 control |= MEC_MAC_FULL_DUPLEX;
682 } else {
683 /* set IPG */
684 control |= MEC_MAC_IPG_DEFAULT;
685 }
686
687 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
688 }
689
690 /*
691 * XXX
692 * maybe this function should be moved to common part
693 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
694 */
695 static void
696 enaddr_aton(const char *str, uint8_t *eaddr)
697 {
698 int i;
699 char c;
700
701 for (i = 0; i < ETHER_ADDR_LEN; i++) {
702 if (*str == ':')
703 str++;
704
705 c = *str++;
706 if (isdigit(c)) {
707 eaddr[i] = (c - '0');
708 } else if (isxdigit(c)) {
709 eaddr[i] = (toupper(c) + 10 - 'A');
710 }
711 c = *str++;
712 if (isdigit(c)) {
713 eaddr[i] = (eaddr[i] << 4) | (c - '0');
714 } else if (isxdigit(c)) {
715 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
716 }
717 }
718 }
719
720 static int
721 mec_init(struct ifnet *ifp)
722 {
723 struct mec_softc *sc = ifp->if_softc;
724 bus_space_tag_t st = sc->sc_st;
725 bus_space_handle_t sh = sc->sc_sh;
726 struct mec_rxdesc *rxd;
727 int i, rc;
728
729 /* cancel any pending I/O */
730 mec_stop(ifp, 0);
731
732 /* reset device */
733 mec_reset(sc);
734
735 /* setup filter for multicast or promisc mode */
736 mec_setfilter(sc);
737
738 /* set the TX ring pointer to the base address */
739 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
740
741 sc->sc_txpending = 0;
742 sc->sc_txdirty = 0;
743 sc->sc_txlast = MEC_NTXDESC - 1;
744
745 /* put RX buffers into FIFO */
746 for (i = 0; i < MEC_NRXDESC; i++) {
747 rxd = &sc->sc_rxdesc[i];
748 rxd->rxd_stat = 0;
749 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
750 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
751 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
752 }
753 sc->sc_rxptr = 0;
754
755 #if 0 /* XXX no info */
756 bus_space_write_8(st, sh, MEC_TIMER, 0);
757 #endif
758
759 /*
760 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
761 * spurious interrupts when TX buffers are empty
762 */
763 bus_space_write_8(st, sh, MEC_DMA_CONTROL,
764 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
765 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
766 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
767 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
768
769 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
770
771 if ((rc = ether_mediachange(ifp)) != 0)
772 return rc;
773
774 ifp->if_flags |= IFF_RUNNING;
775 ifp->if_flags &= ~IFF_OACTIVE;
776 mec_start(ifp);
777
778 return 0;
779 }
780
781 static void
782 mec_reset(struct mec_softc *sc)
783 {
784 bus_space_tag_t st = sc->sc_st;
785 bus_space_handle_t sh = sc->sc_sh;
786 uint64_t control;
787
788 /* stop DMA first */
789 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
790
791 /* reset chip */
792 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
793 delay(1000);
794 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
795 delay(1000);
796
797 /* Default to 100/half and let auto-negotiation work its magic */
798 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
799 MEC_MAC_IPG_DEFAULT;
800
801 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
802 /* stop DMA again for sanity */
803 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
804
805 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
806 bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
807 }
808
809 static void
810 mec_start(struct ifnet *ifp)
811 {
812 struct mec_softc *sc = ifp->if_softc;
813 struct mbuf *m0, *m;
814 struct mec_txdesc *txd;
815 struct mec_txsoft *txs;
816 bus_dmamap_t dmamap;
817 bus_space_tag_t st = sc->sc_st;
818 bus_space_handle_t sh = sc->sc_sh;
819 uint64_t txdaddr;
820 int error, firsttx, nexttx, opending;
821 int len, bufoff, buflen, unaligned, txdlen;
822
823 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
824 return;
825
826 /*
827 * Remember the previous txpending and the first transmit descriptor.
828 */
829 opending = sc->sc_txpending;
830 firsttx = MEC_NEXTTX(sc->sc_txlast);
831
832 DPRINTF(MEC_DEBUG_START,
833 ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));
834
835 for (;;) {
836 /* Grab a packet off the queue. */
837 IFQ_POLL(&ifp->if_snd, m0);
838 if (m0 == NULL)
839 break;
840 m = NULL;
841
842 if (sc->sc_txpending == MEC_NTXDESC) {
843 break;
844 }
845
846 /*
847 * Get the next available transmit descriptor.
848 */
849 nexttx = MEC_NEXTTX(sc->sc_txlast);
850 txd = &sc->sc_txdesc[nexttx];
851 txs = &sc->sc_txsoft[nexttx];
852
853 buflen = 0;
854 bufoff = 0;
855 txdaddr = 0; /* XXX gcc */
856 txdlen = 0; /* XXX gcc */
857
858 len = m0->m_pkthdr.len;
859
860 DPRINTF(MEC_DEBUG_START,
861 ("mec_start: len = %d, nexttx = %d\n", len, nexttx));
862
863 if (len < ETHER_PAD_LEN) {
864 /*
865 * I don't know if MEC chip does auto padding,
866 * so if the packet is small enough,
867 * just copy it to the buffer in txdesc.
868 * Maybe this is the simple way.
869 */
870 DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));
871
872 IFQ_DEQUEUE(&ifp->if_snd, m0);
873 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
874 m_copydata(m0, 0, m0->m_pkthdr.len,
875 txd->txd_buf + bufoff);
876 memset(txd->txd_buf + bufoff + len, 0,
877 ETHER_PAD_LEN - len);
878 len = buflen = ETHER_PAD_LEN;
879
880 txs->txs_flags = MEC_TXS_TXDBUF | buflen;
881 } else {
882 /*
883 * If the packet won't fit the buffer in txdesc,
884 * we have to use concatenate pointer to handle it.
885 * While MEC can handle up to three segments to
886 * concatenate, MEC requires that both the second and
887 * third segments have to be 8 byte aligned.
888 * Since it's unlikely for mbuf clusters, we use
889 * only the first concatenate pointer. If the packet
890 * doesn't fit in one DMA segment, allocate new mbuf
891 * and copy the packet to it.
892 *
893 * Besides, if the start address of the first segments
894 * is not 8 byte aligned, such part have to be copied
895 * to the txdesc buffer. (XXX see below comments)
896 */
897 DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));
898
899 dmamap = txs->txs_dmamap;
900 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
901 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
902 DPRINTF(MEC_DEBUG_START,
903 ("mec_start: re-allocating mbuf\n"));
904 MGETHDR(m, M_DONTWAIT, MT_DATA);
905 if (m == NULL) {
906 printf("%s: unable to allocate "
907 "TX mbuf\n",
908 device_xname(sc->sc_dev));
909 break;
910 }
911 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
912 MCLGET(m, M_DONTWAIT);
913 if ((m->m_flags & M_EXT) == 0) {
914 printf("%s: unable to allocate "
915 "TX cluster\n",
916 device_xname(sc->sc_dev));
917 m_freem(m);
918 break;
919 }
920 }
921 /*
922 * Each packet has the Ethernet header, so
923 * in many case the header isn't 4-byte aligned
924 * and data after the header is 4-byte aligned.
925 * Thus adding 2-byte offset before copying to
926 * new mbuf avoids unaligned copy and this may
927 * improve some performance.
928 * As noted above, unaligned part has to be
929 * copied to txdesc buffer so this may cause
930 * extra copy ops, but for now MEC always
931 * requires some data in txdesc buffer,
932 * so we always have to copy some data anyway.
933 */
934 m->m_data += MEC_ETHER_ALIGN;
935 m_copydata(m0, 0, len, mtod(m, void *));
936 m->m_pkthdr.len = m->m_len = len;
937 error = bus_dmamap_load_mbuf(sc->sc_dmat,
938 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
939 if (error) {
940 printf("%s: unable to load TX buffer, "
941 "error = %d\n",
942 device_xname(sc->sc_dev), error);
943 break;
944 }
945 }
946 IFQ_DEQUEUE(&ifp->if_snd, m0);
947 if (m != NULL) {
948 m_freem(m0);
949 m0 = m;
950 }
951
952 /* handle unaligned part */
953 txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
954 txs->txs_flags = MEC_TXS_TXDPTR1;
955 unaligned =
956 dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
957 DPRINTF(MEC_DEBUG_START,
958 ("mec_start: ds_addr = 0x%08x, unaligned = %d\n",
959 (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
960 if (unaligned != 0) {
961 buflen = MEC_TXD_ALIGN - unaligned;
962 bufoff = MEC_TXD_BUFSTART(buflen);
963 DPRINTF(MEC_DEBUG_START,
964 ("mec_start: unaligned, "
965 "buflen = %d, bufoff = %d\n",
966 buflen, bufoff));
967 memcpy(txd->txd_buf + bufoff,
968 mtod(m0, void *), buflen);
969 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
970 }
971 #if 1
972 else {
973 /*
974 * XXX needs hardware info XXX
975 * It seems MEC always requires some data
976 * in txd_buf[] even if buffer is
977 * 8-byte aligned otherwise DMA abort error
978 * occurs later...
979 */
980 buflen = MEC_TXD_ALIGN;
981 bufoff = MEC_TXD_BUFSTART(buflen);
982 memcpy(txd->txd_buf + bufoff,
983 mtod(m0, void *), buflen);
984 DPRINTF(MEC_DEBUG_START,
985 ("mec_start: aligned, "
986 "buflen = %d, bufoff = %d\n",
987 buflen, bufoff));
988 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
989 txdaddr += MEC_TXD_ALIGN;
990 }
991 #endif
992 txdlen = len - buflen;
993 DPRINTF(MEC_DEBUG_START,
994 ("mec_start: txdaddr = 0x%08llx, txdlen = %d\n",
995 txdaddr, txdlen));
996
997 /*
998 * sync the DMA map for TX mbuf
999 *
1000 * XXX unaligned part doesn't have to be sync'ed,
1001 * but it's harmless...
1002 */
1003 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1004 dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1005 }
1006
1007 #if NBPFILTER > 0
1008 /*
1009 * Pass packet to bpf if there is a listener.
1010 */
1011 if (ifp->if_bpf)
1012 bpf_mtap(ifp->if_bpf, m0);
1013 #endif
1014
1015 /*
1016 * setup the transmit descriptor.
1017 */
1018
1019 /* TXINT bit will be set later on the last packet */
1020 txd->txd_cmd = (len - 1);
1021 /* but also set TXINT bit on a half of TXDESC */
1022 if (sc->sc_txpending == (MEC_NTXDESC / 2))
1023 txd->txd_cmd |= MEC_TXCMD_TXINT;
1024
1025 if (txs->txs_flags & MEC_TXS_TXDBUF)
1026 txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
1027 if (txs->txs_flags & MEC_TXS_TXDPTR1) {
1028 txd->txd_cmd |= MEC_TXCMD_PTR1;
1029 txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
1030 /*
1031 * Store a pointer to the packet so we can
1032 * free it later.
1033 */
1034 txs->txs_mbuf = m0;
1035 } else {
1036 txd->txd_ptr[0] = 0;
1037 /*
1038 * In this case all data are copied to buffer in txdesc,
1039 * we can free TX mbuf here.
1040 */
1041 m_freem(m0);
1042 }
1043
1044 DPRINTF(MEC_DEBUG_START,
1045 ("mec_start: txd_cmd = 0x%016llx, txd_ptr = 0x%016llx\n",
1046 txd->txd_cmd, txd->txd_ptr[0]));
1047 DPRINTF(MEC_DEBUG_START,
1048 ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1049 len, len, buflen, buflen));
1050
1051 /* sync TX descriptor */
1052 MEC_TXDESCSYNC(sc, nexttx,
1053 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1054
1055 /* advance the TX pointer. */
1056 sc->sc_txpending++;
1057 sc->sc_txlast = nexttx;
1058 }
1059
1060 if (sc->sc_txpending == MEC_NTXDESC) {
1061 /* No more slots; notify upper layer. */
1062 ifp->if_flags |= IFF_OACTIVE;
1063 }
1064
1065 if (sc->sc_txpending != opending) {
1066 /*
1067 * Cause a TX interrupt to happen on the last packet
1068 * we enqueued.
1069 */
1070 sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT;
1071 MEC_TXCMDSYNC(sc, sc->sc_txlast,
1072 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1073
1074 /* start TX */
1075 bus_space_write_8(st, sh, MEC_TX_RING_PTR,
1076 MEC_NEXTTX(sc->sc_txlast));
1077
1078 /*
1079 * If the transmitter was idle,
1080 * reset the txdirty pointer and re-enable TX interrupt.
1081 */
1082 if (opending == 0) {
1083 sc->sc_txdirty = firsttx;
1084 bus_space_write_8(st, sh, MEC_TX_ALIAS,
1085 MEC_TX_ALIAS_INT_ENABLE);
1086 }
1087
1088 /* Set a watchdog timer in case the chip flakes out. */
1089 ifp->if_timer = 5;
1090 }
1091 }
1092
1093 static void
1094 mec_stop(struct ifnet *ifp, int disable)
1095 {
1096 struct mec_softc *sc = ifp->if_softc;
1097 struct mec_txsoft *txs;
1098 int i;
1099
1100 DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n"));
1101
1102 ifp->if_timer = 0;
1103 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1104
1105 callout_stop(&sc->sc_tick_ch);
1106 mii_down(&sc->sc_mii);
1107
1108 /* release any TX buffers */
1109 for (i = 0; i < MEC_NTXDESC; i++) {
1110 txs = &sc->sc_txsoft[i];
1111 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1112 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1113 m_freem(txs->txs_mbuf);
1114 txs->txs_mbuf = NULL;
1115 }
1116 }
1117 }
1118
1119 static int
1120 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1121 {
1122 int s, error;
1123
1124 s = splnet();
1125
1126 error = ether_ioctl(ifp, cmd, data);
1127 if (error == ENETRESET) {
1128 /*
1129 * Multicast list has changed; set the hardware filter
1130 * accordingly.
1131 */
1132 if (ifp->if_flags & IFF_RUNNING)
1133 error = mec_init(ifp);
1134 else
1135 error = 0;
1136 }
1137
1138 /* Try to get more packets going. */
1139 mec_start(ifp);
1140
1141 splx(s);
1142 return error;
1143 }
1144
1145 static void
1146 mec_watchdog(struct ifnet *ifp)
1147 {
1148 struct mec_softc *sc = ifp->if_softc;
1149
1150 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1151 ifp->if_oerrors++;
1152
1153 mec_init(ifp);
1154 }
1155
1156 static void
1157 mec_tick(void *arg)
1158 {
1159 struct mec_softc *sc = arg;
1160 int s;
1161
1162 s = splnet();
1163 mii_tick(&sc->sc_mii);
1164 splx(s);
1165
1166 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1167 }
1168
1169 static void
1170 mec_setfilter(struct mec_softc *sc)
1171 {
1172 struct ethercom *ec = &sc->sc_ethercom;
1173 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1174 struct ether_multi *enm;
1175 struct ether_multistep step;
1176 bus_space_tag_t st = sc->sc_st;
1177 bus_space_handle_t sh = sc->sc_sh;
1178 uint64_t mchash;
1179 uint32_t control, hash;
1180 int mcnt;
1181
1182 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1183 control &= ~MEC_MAC_FILTER_MASK;
1184
1185 if (ifp->if_flags & IFF_PROMISC) {
1186 control |= MEC_MAC_FILTER_PROMISC;
1187 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1188 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1189 return;
1190 }
1191
1192 mcnt = 0;
1193 mchash = 0;
1194 ETHER_FIRST_MULTI(step, ec, enm);
1195 while (enm != NULL) {
1196 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1197 /* set allmulti for a range of multicast addresses */
1198 control |= MEC_MAC_FILTER_ALLMULTI;
1199 bus_space_write_8(st, sh, MEC_MULTICAST,
1200 0xffffffffffffffffULL);
1201 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1202 return;
1203 }
1204
1205 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1206
1207 hash = mec_calchash(enm->enm_addrlo);
1208 mchash |= 1 << hash;
1209 mcnt++;
1210 ETHER_NEXT_MULTI(step, enm);
1211 }
1212
1213 ifp->if_flags &= ~IFF_ALLMULTI;
1214
1215 if (mcnt > 0)
1216 control |= MEC_MAC_FILTER_MATCHMULTI;
1217
1218 bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1219 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1220 }
1221
1222 static int
1223 mec_intr(void *arg)
1224 {
1225 struct mec_softc *sc = arg;
1226 bus_space_tag_t st = sc->sc_st;
1227 bus_space_handle_t sh = sc->sc_sh;
1228 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1229 uint32_t statreg, statack, dmac;
1230 int handled, sent;
1231
1232 DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n"));
1233
1234 handled = sent = 0;
1235
1236 for (;;) {
1237 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1238
1239 DPRINTF(MEC_DEBUG_INTR,
1240 ("mec_intr: INT_STAT = 0x%08x\n", statreg));
1241
1242 statack = statreg & MEC_INT_STATUS_MASK;
1243 if (statack == 0)
1244 break;
1245 bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1246
1247 handled = 1;
1248
1249 if (statack &
1250 (MEC_INT_RX_THRESHOLD |
1251 MEC_INT_RX_FIFO_UNDERFLOW)) {
1252 mec_rxintr(sc);
1253 }
1254
1255 dmac = bus_space_read_8(st, sh, MEC_DMA_CONTROL);
1256 DPRINTF(MEC_DEBUG_INTR,
1257 ("mec_intr: DMA_CONT = 0x%08x\n", dmac));
1258
1259 if (statack &
1260 (MEC_INT_TX_EMPTY |
1261 MEC_INT_TX_PACKET_SENT |
1262 MEC_INT_TX_ABORT)) {
1263 mec_txintr(sc);
1264 sent = 1;
1265 if ((statack & MEC_INT_TX_EMPTY) != 0 &&
1266 (dmac & MEC_DMA_TX_INT_ENABLE) != 0) {
1267 /*
1268 * disable TX interrupt to stop
1269 * TX empty interrupt
1270 */
1271 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1272 DPRINTF(MEC_DEBUG_INTR,
1273 ("mec_intr: disable TX_INT\n"));
1274 }
1275 }
1276
1277 if (statack &
1278 (MEC_INT_TX_LINK_FAIL |
1279 MEC_INT_TX_MEM_ERROR |
1280 MEC_INT_TX_ABORT |
1281 MEC_INT_RX_FIFO_UNDERFLOW |
1282 MEC_INT_RX_DMA_UNDERFLOW)) {
1283 printf("%s: mec_intr: interrupt status = 0x%08x\n",
1284 device_xname(sc->sc_dev), statreg);
1285 }
1286 }
1287
1288 if (sent) {
1289 /* try to get more packets going */
1290 mec_start(ifp);
1291 }
1292
1293 #if NRND > 0
1294 if (handled)
1295 rnd_add_uint32(&sc->sc_rnd_source, statreg);
1296 #endif
1297
1298 return handled;
1299 }
1300
1301 static void
1302 mec_rxintr(struct mec_softc *sc)
1303 {
1304 bus_space_tag_t st = sc->sc_st;
1305 bus_space_handle_t sh = sc->sc_sh;
1306 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1307 struct mbuf *m;
1308 struct mec_rxdesc *rxd;
1309 uint64_t rxstat;
1310 u_int len;
1311 int i;
1312
1313 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n"));
1314
1315 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1316 rxd = &sc->sc_rxdesc[i];
1317
1318 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1319 rxstat = rxd->rxd_stat;
1320
1321 DPRINTF(MEC_DEBUG_RXINTR,
1322 ("mec_rxintr: rxstat = 0x%016llx, rxptr = %d\n",
1323 rxstat, i));
1324 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%08x\n",
1325 (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1326
1327 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1328 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1329 break;
1330 }
1331
1332 len = rxstat & MEC_RXSTAT_LEN;
1333
1334 if (len < ETHER_MIN_LEN ||
1335 len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1336 /* invalid length packet; drop it. */
1337 DPRINTF(MEC_DEBUG_RXINTR,
1338 ("mec_rxintr: wrong packet\n"));
1339 dropit:
1340 ifp->if_ierrors++;
1341 rxd->rxd_stat = 0;
1342 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1343 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1344 MEC_CDRXADDR(sc, i));
1345 continue;
1346 }
1347
1348 if (rxstat &
1349 (MEC_RXSTAT_BADPACKET |
1350 MEC_RXSTAT_LONGEVENT |
1351 MEC_RXSTAT_INVALID |
1352 MEC_RXSTAT_CRCERROR |
1353 MEC_RXSTAT_VIOLATION)) {
1354 printf("%s: mec_rxintr: status = 0x%016llx\n",
1355 device_xname(sc->sc_dev), rxstat);
1356 goto dropit;
1357 }
1358
1359 /*
1360 * The MEC includes the CRC with every packet. Trim
1361 * it off here.
1362 */
1363 len -= ETHER_CRC_LEN;
1364
1365 /*
1366 * now allocate an mbuf (and possibly a cluster) to hold
1367 * the received packet.
1368 */
1369 MGETHDR(m, M_DONTWAIT, MT_DATA);
1370 if (m == NULL) {
1371 printf("%s: unable to allocate RX mbuf\n",
1372 device_xname(sc->sc_dev));
1373 goto dropit;
1374 }
1375 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1376 MCLGET(m, M_DONTWAIT);
1377 if ((m->m_flags & M_EXT) == 0) {
1378 printf("%s: unable to allocate RX cluster\n",
1379 device_xname(sc->sc_dev));
1380 m_freem(m);
1381 m = NULL;
1382 goto dropit;
1383 }
1384 }
1385
1386 /*
1387 * Note MEC chip seems to insert 2 byte padding at the top of
1388 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1389 */
1390 MEC_RXBUFSYNC(sc, i, len, BUS_DMASYNC_POSTREAD);
1391 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1392 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1393 m->m_data += MEC_ETHER_ALIGN;
1394
1395 /* put RX buffer into FIFO again */
1396 rxd->rxd_stat = 0;
1397 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1398 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1399
1400 m->m_pkthdr.rcvif = ifp;
1401 m->m_pkthdr.len = m->m_len = len;
1402
1403 ifp->if_ipackets++;
1404
1405 #if NBPFILTER > 0
1406 /*
1407 * Pass this up to any BPF listeners, but only
1408 * pass it up the stack if it's for us.
1409 */
1410 if (ifp->if_bpf)
1411 bpf_mtap(ifp->if_bpf, m);
1412 #endif
1413
1414 /* Pass it on. */
1415 (*ifp->if_input)(ifp, m);
1416 }
1417
1418 /* update RX pointer */
1419 sc->sc_rxptr = i;
1420 }
1421
1422 static void
1423 mec_txintr(struct mec_softc *sc)
1424 {
1425 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1426 struct mec_txdesc *txd;
1427 struct mec_txsoft *txs;
1428 bus_dmamap_t dmamap;
1429 uint64_t txstat;
1430 int i;
1431 u_int col;
1432
1433 ifp->if_flags &= ~IFF_OACTIVE;
1434
1435 DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n"));
1436
1437 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
1438 i = MEC_NEXTTX(i), sc->sc_txpending--) {
1439 txd = &sc->sc_txdesc[i];
1440
1441 MEC_TXDESCSYNC(sc, i,
1442 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1443
1444 txstat = txd->txd_stat;
1445 DPRINTF(MEC_DEBUG_TXINTR,
1446 ("mec_txintr: dirty = %d, txstat = 0x%016llx\n",
1447 i, txstat));
1448 if ((txstat & MEC_TXSTAT_SENT) == 0) {
1449 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1450 break;
1451 }
1452
1453 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1454 printf("%s: TX error: txstat = 0x%016llx\n",
1455 device_xname(sc->sc_dev), txstat);
1456 ifp->if_oerrors++;
1457 continue;
1458 }
1459
1460 txs = &sc->sc_txsoft[i];
1461 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1462 dmamap = txs->txs_dmamap;
1463 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1464 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1465 bus_dmamap_unload(sc->sc_dmat, dmamap);
1466 m_freem(txs->txs_mbuf);
1467 txs->txs_mbuf = NULL;
1468 }
1469
1470 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1471 ifp->if_collisions += col;
1472 ifp->if_opackets++;
1473 }
1474
1475 /* update the dirty TX buffer pointer */
1476 sc->sc_txdirty = i;
1477 DPRINTF(MEC_DEBUG_INTR,
1478 ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n",
1479 sc->sc_txdirty, sc->sc_txpending));
1480
1481 /* cancel the watchdog timer if there are no pending TX packets */
1482 if (sc->sc_txpending == 0)
1483 ifp->if_timer = 0;
1484 }
1485
1486 static void
1487 mec_shutdown(void *arg)
1488 {
1489 struct mec_softc *sc = arg;
1490
1491 mec_stop(&sc->sc_ethercom.ec_if, 1);
1492 /* make sure to stop DMA etc. */
1493 mec_reset(sc);
1494 }
1495