if_mec.c revision 1.29 1 /* $NetBSD: if_mec.c,v 1.29 2008/08/14 03:43:50 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Izumi Tsutsui. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed for the
42 * NetBSD Project. See http://www.NetBSD.org/ for
43 * information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 /*
60 * MACE MAC-110 Ethernet driver
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.29 2008/08/14 03:43:50 tsutsui Exp $");
65
66 #include "opt_ddb.h"
67 #include "bpfilter.h"
68 #include "rnd.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/device.h>
73 #include <sys/callout.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/ioctl.h>
79 #include <sys/errno.h>
80
81 #if NRND > 0
82 #include <sys/rnd.h>
83 #endif
84
85 #include <net/if.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 #include <net/if_ether.h>
89
90 #if NBPFILTER > 0
91 #include <net/bpf.h>
92 #endif
93
94 #include <machine/bus.h>
95 #include <machine/intr.h>
96 #include <machine/machtype.h>
97
98 #include <dev/mii/mii.h>
99 #include <dev/mii/miivar.h>
100
101 #include <sgimips/mace/macevar.h>
102 #include <sgimips/mace/if_mecreg.h>
103
104 #include <dev/arcbios/arcbios.h>
105 #include <dev/arcbios/arcbiosvar.h>
106
107 /* #define MEC_DEBUG */
108
109 #ifdef MEC_DEBUG
110 #define MEC_DEBUG_RESET 0x01
111 #define MEC_DEBUG_START 0x02
112 #define MEC_DEBUG_STOP 0x04
113 #define MEC_DEBUG_INTR 0x08
114 #define MEC_DEBUG_RXINTR 0x10
115 #define MEC_DEBUG_TXINTR 0x20
116 uint32_t mec_debug = 0;
117 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
118 #else
119 #define DPRINTF(x, y) /* nothing */
120 #endif
121
122 /*
123 * Transmit descriptor list size
124 */
125 #define MEC_NTXDESC 64
126 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
127 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
128 #define MEC_NTXDESC_RSVD 4
129 #define MEC_NTXDESC_INTR 8
130
131 /*
132 * software state for TX
133 */
134 struct mec_txsoft {
135 struct mbuf *txs_mbuf; /* head of our mbuf chain */
136 bus_dmamap_t txs_dmamap; /* our DMA map */
137 uint32_t txs_flags;
138 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
139 #define MEC_TXS_TXDBUF 0x00000080 /* txd_buf is used */
140 #define MEC_TXS_TXDPTR1 0x00000100 /* txd_ptr[0] is used */
141 };
142
143 /*
144 * Transmit buffer descriptor
145 */
146 #define MEC_TXDESCSIZE 128
147 #define MEC_NTXPTR 3
148 #define MEC_TXD_BUFOFFSET \
149 (sizeof(uint64_t) + MEC_NTXPTR * sizeof(uint64_t))
150 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
151 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
152 #define MEC_TXD_ALIGN 8
153 #define MEC_TXD_ROUNDUP(addr) \
154 (((addr) + (MEC_TXD_ALIGN - 1)) & ~((uint64_t)MEC_TXD_ALIGN - 1))
155
156 struct mec_txdesc {
157 volatile uint64_t txd_cmd;
158 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
159 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
160 #define TXCMD_BUFSTART(x) ((x) << 16)
161 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
162 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
163 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
164 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
165 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
166 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
167
168 #define txd_stat txd_cmd
169 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
170 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
171 #define MEC_TXSTAT_COLCNT_SHIFT 16
172 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
173 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
174 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
175 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
176 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
177 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
178 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
179 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
180 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
181 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
182 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
183
184 uint64_t txd_ptr[MEC_NTXPTR];
185 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
186 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
187 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
188 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
189 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
190
191 uint8_t txd_buf[MEC_TXD_BUFSIZE];
192 };
193
194 /*
195 * Receive buffer size
196 */
197 #define MEC_NRXDESC 16
198 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
199 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
200
201 /*
202 * Receive buffer description
203 */
204 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
205 #define MEC_RXD_NRXPAD 3
206 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
207 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
208 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
209
210 struct mec_rxdesc {
211 volatile uint64_t rxd_stat;
212 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
213 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
214 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
215 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
216 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
217 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
218 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
219 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
220 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
221 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
222 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
223 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
224 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
225 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
226 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
227 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
228 uint64_t rxd_pad1[MEC_RXD_NRXPAD];
229 uint8_t rxd_buf[MEC_RXD_BUFSIZE];
230 };
231
232 /*
233 * control structures for DMA ops
234 */
235 struct mec_control_data {
236 /*
237 * TX descriptors and buffers
238 */
239 struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
240
241 /*
242 * RX descriptors and buffers
243 */
244 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
245 };
246
247 /*
248 * It _seems_ there are some restrictions on descriptor address:
249 *
250 * - Base address of txdescs should be 8kbyte aligned
251 * - Each txdesc should be 128byte aligned
252 * - Each rxdesc should be 4kbyte aligned
253 *
254 * So we should specify 8k align to allocalte txdescs.
255 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
256 * so rxdescs are also allocated at 4kbyte aligned.
257 */
258 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
259
260 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
261 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
262 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
263
264 /*
265 * software state per device
266 */
267 struct mec_softc {
268 device_t sc_dev; /* generic device structures */
269
270 bus_space_tag_t sc_st; /* bus_space tag */
271 bus_space_handle_t sc_sh; /* bus_space handle */
272 bus_dma_tag_t sc_dmat; /* bus_dma tag */
273 void *sc_sdhook; /* shutdown hook */
274
275 struct ethercom sc_ethercom; /* Ethernet common part */
276
277 struct mii_data sc_mii; /* MII/media information */
278 int sc_phyaddr; /* MII address */
279 struct callout sc_tick_ch; /* tick callout */
280
281 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
282
283 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
284 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
285
286 /* pointer to allocated control data */
287 struct mec_control_data *sc_control_data;
288 #define sc_txdesc sc_control_data->mcd_txdesc
289 #define sc_rxdesc sc_control_data->mcd_rxdesc
290
291 /* software state for TX descs */
292 struct mec_txsoft sc_txsoft[MEC_NTXDESC];
293
294 int sc_txpending; /* number of TX requests pending */
295 int sc_txdirty; /* first dirty TX descriptor */
296 int sc_txlast; /* last used TX descriptor */
297
298 int sc_rxptr; /* next ready RX buffer */
299
300 #if NRND > 0
301 rndsource_element_t sc_rnd_source; /* random source */
302 #endif
303 };
304
305 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
306 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
307
308 #define MEC_TXDESCSYNC(sc, x, ops) \
309 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
310 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
311 #define MEC_TXCMDSYNC(sc, x, ops) \
312 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
313 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
314
315 #define MEC_RXSTATSYNC(sc, x, ops) \
316 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
317 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
318 #define MEC_RXBUFSYNC(sc, x, len, ops) \
319 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
320 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
321 MEC_ETHER_ALIGN + (len), (ops))
322
323 /* XXX these values should be moved to <net/if_ether.h> ? */
324 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
325 #define MEC_ETHER_ALIGN 2
326
327 static int mec_match(device_t, cfdata_t, void *);
328 static void mec_attach(device_t, device_t, void *);
329
330 static int mec_mii_readreg(device_t, int, int);
331 static void mec_mii_writereg(device_t, int, int, int);
332 static int mec_mii_wait(struct mec_softc *);
333 static void mec_statchg(device_t);
334
335 static void enaddr_aton(const char *, uint8_t *);
336
337 static int mec_init(struct ifnet * ifp);
338 static void mec_start(struct ifnet *);
339 static void mec_watchdog(struct ifnet *);
340 static void mec_tick(void *);
341 static int mec_ioctl(struct ifnet *, u_long, void *);
342 static void mec_reset(struct mec_softc *);
343 static void mec_setfilter(struct mec_softc *);
344 static int mec_intr(void *arg);
345 static void mec_stop(struct ifnet *, int);
346 static void mec_rxintr(struct mec_softc *);
347 static void mec_txintr(struct mec_softc *, uint32_t);
348 static void mec_shutdown(void *);
349
350 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
351 mec_match, mec_attach, NULL, NULL);
352
353 static int mec_matched = 0;
354
355 static int
356 mec_match(device_t parent, cfdata_t cf, void *aux)
357 {
358
359 /* allow only one device */
360 if (mec_matched)
361 return 0;
362
363 mec_matched = 1;
364 return 1;
365 }
366
367 static void
368 mec_attach(device_t parent, device_t self, void *aux)
369 {
370 struct mec_softc *sc = device_private(self);
371 struct mace_attach_args *maa = aux;
372 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
373 uint64_t address, command;
374 const char *macaddr;
375 struct mii_softc *child;
376 bus_dma_segment_t seg;
377 int i, err, rseg;
378 bool mac_is_fake;
379
380 sc->sc_dev = self;
381 sc->sc_st = maa->maa_st;
382 if (bus_space_subregion(sc->sc_st, maa->maa_sh,
383 maa->maa_offset, 0, &sc->sc_sh) != 0) {
384 aprint_error(": can't map i/o space\n");
385 return;
386 }
387
388 /* set up DMA structures */
389 sc->sc_dmat = maa->maa_dmat;
390
391 /*
392 * Allocate the control data structures, and create and load the
393 * DMA map for it.
394 */
395 if ((err = bus_dmamem_alloc(sc->sc_dmat,
396 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
397 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
398 aprint_error(": unable to allocate control data, error = %d\n",
399 err);
400 goto fail_0;
401 }
402 /*
403 * XXX needs re-think...
404 * control data structures contain whole RX data buffer, so
405 * BUS_DMA_COHERENT (which disables cache) may cause some performance
406 * issue on copying data from the RX buffer to mbuf on normal memory,
407 * though we have to make sure all bus_dmamap_sync(9) ops are called
408 * properly in that case.
409 */
410 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
411 sizeof(struct mec_control_data),
412 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
413 aprint_error(": unable to map control data, error = %d\n", err);
414 goto fail_1;
415 }
416 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
417
418 if ((err = bus_dmamap_create(sc->sc_dmat,
419 sizeof(struct mec_control_data), 1,
420 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
421 aprint_error(": unable to create control data DMA map,"
422 " error = %d\n", err);
423 goto fail_2;
424 }
425 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
426 sc->sc_control_data, sizeof(struct mec_control_data), NULL,
427 BUS_DMA_NOWAIT)) != 0) {
428 aprint_error(": unable to load control data DMA map,"
429 " error = %d\n", err);
430 goto fail_3;
431 }
432
433 /* create TX buffer DMA maps */
434 for (i = 0; i < MEC_NTXDESC; i++) {
435 if ((err = bus_dmamap_create(sc->sc_dmat,
436 MCLBYTES, 1, MCLBYTES, PAGE_SIZE, 0,
437 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
438 aprint_error(": unable to create tx DMA map %d,"
439 " error = %d\n", i, err);
440 goto fail_4;
441 }
442 }
443
444 callout_init(&sc->sc_tick_ch, 0);
445
446 /* get Ethernet address from ARCBIOS */
447 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
448 aprint_error(": unable to get MAC address!\n");
449 goto fail_4;
450 }
451 /*
452 * On some machines the DS2502 chip storing the serial number/
453 * mac address is on the pci riser board - if this board is
454 * missing, ARCBIOS will not know a good ethernet address (but
455 * otherwise the machine will work fine).
456 */
457 mac_is_fake = false;
458 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
459 uint32_t ui = 0;
460 const char * netaddr =
461 ARCBIOS->GetEnvironmentVariable("netaddr");
462
463 /*
464 * Create a MAC address by abusing the "netaddr" env var
465 */
466 sc->sc_enaddr[0] = 0xf2;
467 sc->sc_enaddr[1] = 0x0b;
468 sc->sc_enaddr[2] = 0xa4;
469 if (netaddr) {
470 mac_is_fake = true;
471 while (*netaddr) {
472 int v = 0;
473 while (*netaddr && *netaddr != '.') {
474 if (*netaddr >= '0' && *netaddr <= '9')
475 v = v*10 + (*netaddr - '0');
476 netaddr++;
477 }
478 ui <<= 8;
479 ui |= v;
480 if (*netaddr == '.')
481 netaddr++;
482 }
483 }
484 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
485 }
486 if (!mac_is_fake)
487 enaddr_aton(macaddr, sc->sc_enaddr);
488
489 /* set the Ethernet address */
490 address = 0;
491 for (i = 0; i < ETHER_ADDR_LEN; i++) {
492 address = address << 8;
493 address |= sc->sc_enaddr[i];
494 }
495 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
496
497 /* reset device */
498 mec_reset(sc);
499
500 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
501
502 aprint_normal(": MAC-110 Ethernet, rev %u\n",
503 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
504
505 if (mac_is_fake)
506 aprint_normal_dev(self,
507 "could not get ethernet address from firmware"
508 " - generated one from the \"netaddr\" environment"
509 " variable\n");
510 aprint_normal_dev(self, "Ethernet address %s\n",
511 ether_sprintf(sc->sc_enaddr));
512
513 /* Done, now attach everything */
514
515 sc->sc_mii.mii_ifp = ifp;
516 sc->sc_mii.mii_readreg = mec_mii_readreg;
517 sc->sc_mii.mii_writereg = mec_mii_writereg;
518 sc->sc_mii.mii_statchg = mec_statchg;
519
520 /* Set up PHY properties */
521 sc->sc_ethercom.ec_mii = &sc->sc_mii;
522 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
523 ether_mediastatus);
524 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
525 MII_OFFSET_ANY, 0);
526
527 child = LIST_FIRST(&sc->sc_mii.mii_phys);
528 if (child == NULL) {
529 /* No PHY attached */
530 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
531 0, NULL);
532 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
533 } else {
534 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
535 sc->sc_phyaddr = child->mii_phy;
536 }
537
538 strcpy(ifp->if_xname, device_xname(self));
539 ifp->if_softc = sc;
540 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
541 ifp->if_ioctl = mec_ioctl;
542 ifp->if_start = mec_start;
543 ifp->if_watchdog = mec_watchdog;
544 ifp->if_init = mec_init;
545 ifp->if_stop = mec_stop;
546 ifp->if_mtu = ETHERMTU;
547 IFQ_SET_READY(&ifp->if_snd);
548
549 if_attach(ifp);
550 ether_ifattach(ifp, sc->sc_enaddr);
551
552 /* establish interrupt */
553 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
554
555 #if NRND > 0
556 rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
557 RND_TYPE_NET, 0);
558 #endif
559
560 /* set shutdown hook to reset interface on powerdown */
561 sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);
562
563 return;
564
565 /*
566 * Free any resources we've allocated during the failed attach
567 * attempt. Do this in reverse order and fall though.
568 */
569 fail_4:
570 for (i = 0; i < MEC_NTXDESC; i++) {
571 if (sc->sc_txsoft[i].txs_dmamap != NULL)
572 bus_dmamap_destroy(sc->sc_dmat,
573 sc->sc_txsoft[i].txs_dmamap);
574 }
575 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
576 fail_3:
577 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
578 fail_2:
579 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
580 sizeof(struct mec_control_data));
581 fail_1:
582 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
583 fail_0:
584 return;
585 }
586
587 static int
588 mec_mii_readreg(device_t self, int phy, int reg)
589 {
590 struct mec_softc *sc = device_private(self);
591 bus_space_tag_t st = sc->sc_st;
592 bus_space_handle_t sh = sc->sc_sh;
593 uint64_t val;
594 int i;
595
596 if (mec_mii_wait(sc) != 0)
597 return 0;
598
599 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
600 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
601 delay(25);
602 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
603 delay(25);
604 mec_mii_wait(sc);
605
606 for (i = 0; i < 20; i++) {
607 delay(30);
608
609 val = bus_space_read_8(st, sh, MEC_PHY_DATA);
610
611 if ((val & MEC_PHY_DATA_BUSY) == 0)
612 return val & MEC_PHY_DATA_VALUE;
613 }
614 return 0;
615 }
616
617 static void
618 mec_mii_writereg(device_t self, int phy, int reg, int val)
619 {
620 struct mec_softc *sc = device_private(self);
621 bus_space_tag_t st = sc->sc_st;
622 bus_space_handle_t sh = sc->sc_sh;
623
624 if (mec_mii_wait(sc) != 0) {
625 printf("timed out writing %x: %x\n", reg, val);
626 return;
627 }
628
629 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
630 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
631
632 delay(60);
633
634 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
635
636 delay(60);
637
638 mec_mii_wait(sc);
639 }
640
641 static int
642 mec_mii_wait(struct mec_softc *sc)
643 {
644 uint32_t busy;
645 int i, s;
646
647 for (i = 0; i < 100; i++) {
648 delay(30);
649
650 s = splhigh();
651 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
652 splx(s);
653
654 if ((busy & MEC_PHY_DATA_BUSY) == 0)
655 return 0;
656 #if 0
657 if (busy == 0xffff) /* XXX ? */
658 return 0;
659 #endif
660 }
661
662 printf("%s: MII timed out\n", device_xname(sc->sc_dev));
663 return 1;
664 }
665
666 static void
667 mec_statchg(device_t self)
668 {
669 struct mec_softc *sc = device_private(self);
670 bus_space_tag_t st = sc->sc_st;
671 bus_space_handle_t sh = sc->sc_sh;
672 uint32_t control;
673
674 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
675 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
676 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
677
678 /* must also set IPG here for duplex stuff ... */
679 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
680 control |= MEC_MAC_FULL_DUPLEX;
681 } else {
682 /* set IPG */
683 control |= MEC_MAC_IPG_DEFAULT;
684 }
685
686 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
687 }
688
689 /*
690 * XXX
691 * maybe this function should be moved to common part
692 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
693 */
694 static void
695 enaddr_aton(const char *str, uint8_t *eaddr)
696 {
697 int i;
698 char c;
699
700 for (i = 0; i < ETHER_ADDR_LEN; i++) {
701 if (*str == ':')
702 str++;
703
704 c = *str++;
705 if (isdigit(c)) {
706 eaddr[i] = (c - '0');
707 } else if (isxdigit(c)) {
708 eaddr[i] = (toupper(c) + 10 - 'A');
709 }
710 c = *str++;
711 if (isdigit(c)) {
712 eaddr[i] = (eaddr[i] << 4) | (c - '0');
713 } else if (isxdigit(c)) {
714 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
715 }
716 }
717 }
718
719 static int
720 mec_init(struct ifnet *ifp)
721 {
722 struct mec_softc *sc = ifp->if_softc;
723 bus_space_tag_t st = sc->sc_st;
724 bus_space_handle_t sh = sc->sc_sh;
725 struct mec_rxdesc *rxd;
726 int i, rc;
727
728 /* cancel any pending I/O */
729 mec_stop(ifp, 0);
730
731 /* reset device */
732 mec_reset(sc);
733
734 /* setup filter for multicast or promisc mode */
735 mec_setfilter(sc);
736
737 /* set the TX ring pointer to the base address */
738 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
739
740 sc->sc_txpending = 0;
741 sc->sc_txdirty = 0;
742 sc->sc_txlast = MEC_NTXDESC - 1;
743
744 /* put RX buffers into FIFO */
745 for (i = 0; i < MEC_NRXDESC; i++) {
746 rxd = &sc->sc_rxdesc[i];
747 rxd->rxd_stat = 0;
748 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
749 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
750 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
751 }
752 sc->sc_rxptr = 0;
753
754 #if 0 /* XXX no info */
755 bus_space_write_8(st, sh, MEC_TIMER, 0);
756 #endif
757
758 /*
759 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
760 * spurious interrupts when TX buffers are empty
761 */
762 bus_space_write_8(st, sh, MEC_DMA_CONTROL,
763 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
764 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
765 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
766 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
767
768 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
769
770 if ((rc = ether_mediachange(ifp)) != 0)
771 return rc;
772
773 ifp->if_flags |= IFF_RUNNING;
774 ifp->if_flags &= ~IFF_OACTIVE;
775 mec_start(ifp);
776
777 return 0;
778 }
779
780 static void
781 mec_reset(struct mec_softc *sc)
782 {
783 bus_space_tag_t st = sc->sc_st;
784 bus_space_handle_t sh = sc->sc_sh;
785 uint64_t control;
786
787 /* stop DMA first */
788 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
789
790 /* reset chip */
791 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
792 delay(1000);
793 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
794 delay(1000);
795
796 /* Default to 100/half and let auto-negotiation work its magic */
797 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
798 MEC_MAC_IPG_DEFAULT;
799
800 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
801 /* stop DMA again for sanity */
802 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
803
804 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
805 bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
806 }
807
808 static void
809 mec_start(struct ifnet *ifp)
810 {
811 struct mec_softc *sc = ifp->if_softc;
812 struct mbuf *m0, *m;
813 struct mec_txdesc *txd;
814 struct mec_txsoft *txs;
815 bus_dmamap_t dmamap;
816 bus_space_tag_t st = sc->sc_st;
817 bus_space_handle_t sh = sc->sc_sh;
818 uint64_t txdaddr;
819 int error, firsttx, nexttx, opending;
820 int len, bufoff, buflen, unaligned, txdlen;
821
822 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
823 return;
824
825 /*
826 * Remember the previous txpending and the first transmit descriptor.
827 */
828 opending = sc->sc_txpending;
829 firsttx = MEC_NEXTTX(sc->sc_txlast);
830
831 DPRINTF(MEC_DEBUG_START,
832 ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));
833
834 while (sc->sc_txpending < MEC_NTXDESC - 1) {
835 /* Grab a packet off the queue. */
836 IFQ_POLL(&ifp->if_snd, m0);
837 if (m0 == NULL)
838 break;
839 m = NULL;
840
841 /*
842 * Get the next available transmit descriptor.
843 */
844 nexttx = MEC_NEXTTX(sc->sc_txlast);
845 txd = &sc->sc_txdesc[nexttx];
846 txs = &sc->sc_txsoft[nexttx];
847
848 buflen = 0;
849 bufoff = 0;
850 txdaddr = 0; /* XXX gcc */
851 txdlen = 0; /* XXX gcc */
852
853 len = m0->m_pkthdr.len;
854
855 DPRINTF(MEC_DEBUG_START,
856 ("mec_start: len = %d, nexttx = %d\n", len, nexttx));
857
858 if (len < ETHER_PAD_LEN) {
859 /*
860 * I don't know if MEC chip does auto padding,
861 * so if the packet is small enough,
862 * just copy it to the buffer in txdesc.
863 * Maybe this is the simple way.
864 */
865 DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));
866
867 IFQ_DEQUEUE(&ifp->if_snd, m0);
868 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
869 m_copydata(m0, 0, m0->m_pkthdr.len,
870 txd->txd_buf + bufoff);
871 memset(txd->txd_buf + bufoff + len, 0,
872 ETHER_PAD_LEN - len);
873 len = buflen = ETHER_PAD_LEN;
874
875 txs->txs_flags = MEC_TXS_TXDBUF | buflen;
876 } else {
877 /*
878 * If the packet won't fit the buffer in txdesc,
879 * we have to use concatenate pointer to handle it.
880 * While MEC can handle up to three segments to
881 * concatenate, MEC requires that both the second and
882 * third segments have to be 8 byte aligned.
883 * Since it's unlikely for mbuf clusters, we use
884 * only the first concatenate pointer. If the packet
885 * doesn't fit in one DMA segment, allocate new mbuf
886 * and copy the packet to it.
887 *
888 * Besides, if the start address of the first segments
889 * is not 8 byte aligned, such part have to be copied
890 * to the txdesc buffer. (XXX see below comments)
891 */
892 DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));
893
894 dmamap = txs->txs_dmamap;
895 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
896 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
897 DPRINTF(MEC_DEBUG_START,
898 ("mec_start: re-allocating mbuf\n"));
899 MGETHDR(m, M_DONTWAIT, MT_DATA);
900 if (m == NULL) {
901 printf("%s: unable to allocate "
902 "TX mbuf\n",
903 device_xname(sc->sc_dev));
904 break;
905 }
906 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
907 MCLGET(m, M_DONTWAIT);
908 if ((m->m_flags & M_EXT) == 0) {
909 printf("%s: unable to allocate "
910 "TX cluster\n",
911 device_xname(sc->sc_dev));
912 m_freem(m);
913 break;
914 }
915 }
916 /*
917 * Each packet has the Ethernet header, so
918 * in many case the header isn't 4-byte aligned
919 * and data after the header is 4-byte aligned.
920 * Thus adding 2-byte offset before copying to
921 * new mbuf avoids unaligned copy and this may
922 * improve some performance.
923 * As noted above, unaligned part has to be
924 * copied to txdesc buffer so this may cause
925 * extra copy ops, but for now MEC always
926 * requires some data in txdesc buffer,
927 * so we always have to copy some data anyway.
928 */
929 m->m_data += MEC_ETHER_ALIGN;
930 m_copydata(m0, 0, len, mtod(m, void *));
931 m->m_pkthdr.len = m->m_len = len;
932 error = bus_dmamap_load_mbuf(sc->sc_dmat,
933 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
934 if (error) {
935 printf("%s: unable to load TX buffer, "
936 "error = %d\n",
937 device_xname(sc->sc_dev), error);
938 break;
939 }
940 }
941 IFQ_DEQUEUE(&ifp->if_snd, m0);
942 if (m != NULL) {
943 m_freem(m0);
944 m0 = m;
945 }
946
947 /* handle unaligned part */
948 txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
949 txs->txs_flags = MEC_TXS_TXDPTR1;
950 unaligned =
951 dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
952 DPRINTF(MEC_DEBUG_START,
953 ("mec_start: ds_addr = 0x%08x, unaligned = %d\n",
954 (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
955 if (unaligned != 0) {
956 buflen = MEC_TXD_ALIGN - unaligned;
957 bufoff = MEC_TXD_BUFSTART(buflen);
958 DPRINTF(MEC_DEBUG_START,
959 ("mec_start: unaligned, "
960 "buflen = %d, bufoff = %d\n",
961 buflen, bufoff));
962 memcpy(txd->txd_buf + bufoff,
963 mtod(m0, void *), buflen);
964 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
965 }
966 #if 1
967 else {
968 /*
969 * XXX needs hardware info XXX
970 * It seems MEC always requires some data
971 * in txd_buf[] even if buffer is
972 * 8-byte aligned otherwise DMA abort error
973 * occurs later...
974 */
975 buflen = MEC_TXD_ALIGN;
976 bufoff = MEC_TXD_BUFSTART(buflen);
977 memcpy(txd->txd_buf + bufoff,
978 mtod(m0, void *), buflen);
979 DPRINTF(MEC_DEBUG_START,
980 ("mec_start: aligned, "
981 "buflen = %d, bufoff = %d\n",
982 buflen, bufoff));
983 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
984 txdaddr += MEC_TXD_ALIGN;
985 }
986 #endif
987 txdlen = len - buflen;
988 DPRINTF(MEC_DEBUG_START,
989 ("mec_start: txdaddr = 0x%08llx, txdlen = %d\n",
990 txdaddr, txdlen));
991
992 /*
993 * sync the DMA map for TX mbuf
994 *
995 * XXX unaligned part doesn't have to be sync'ed,
996 * but it's harmless...
997 */
998 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
999 dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1000 }
1001
1002 #if NBPFILTER > 0
1003 /*
1004 * Pass packet to bpf if there is a listener.
1005 */
1006 if (ifp->if_bpf)
1007 bpf_mtap(ifp->if_bpf, m0);
1008 #endif
1009
1010 /*
1011 * setup the transmit descriptor.
1012 */
1013 txd->txd_cmd = (len - 1);
1014
1015 /*
1016 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1017 * if more than half txdescs have been queued
1018 * because TX_EMPTY interrupts will rarely happen
1019 * if TX queue is so stacked.
1020 */
1021 if (sc->sc_txpending > (MEC_NTXDESC / 2) &&
1022 (nexttx & (MEC_NTXDESC_INTR - 1)) == 0)
1023 txd->txd_cmd |= MEC_TXCMD_TXINT;
1024
1025 if (txs->txs_flags & MEC_TXS_TXDBUF)
1026 txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
1027 if (txs->txs_flags & MEC_TXS_TXDPTR1) {
1028 txd->txd_cmd |= MEC_TXCMD_PTR1;
1029 txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
1030 /*
1031 * Store a pointer to the packet so we can
1032 * free it later.
1033 */
1034 txs->txs_mbuf = m0;
1035 } else {
1036 txd->txd_ptr[0] = 0;
1037 /*
1038 * In this case all data are copied to buffer in txdesc,
1039 * we can free TX mbuf here.
1040 */
1041 m_freem(m0);
1042 }
1043
1044 DPRINTF(MEC_DEBUG_START,
1045 ("mec_start: txd_cmd = 0x%016llx, txd_ptr = 0x%016llx\n",
1046 txd->txd_cmd, txd->txd_ptr[0]));
1047 DPRINTF(MEC_DEBUG_START,
1048 ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1049 len, len, buflen, buflen));
1050
1051 /* sync TX descriptor */
1052 MEC_TXDESCSYNC(sc, nexttx,
1053 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1054
1055 /* start TX */
1056 bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx));
1057
1058 /* advance the TX pointer. */
1059 sc->sc_txpending++;
1060 sc->sc_txlast = nexttx;
1061 }
1062
1063 if (sc->sc_txpending == MEC_NTXDESC - 1) {
1064 /* No more slots; notify upper layer. */
1065 ifp->if_flags |= IFF_OACTIVE;
1066 }
1067
1068 if (sc->sc_txpending != opending) {
1069 /*
1070 * If the transmitter was idle,
1071 * reset the txdirty pointer and re-enable TX interrupt.
1072 */
1073 if (opending == 0) {
1074 sc->sc_txdirty = firsttx;
1075 bus_space_write_8(st, sh, MEC_TX_ALIAS,
1076 MEC_TX_ALIAS_INT_ENABLE);
1077 }
1078
1079 /* Set a watchdog timer in case the chip flakes out. */
1080 ifp->if_timer = 5;
1081 }
1082 }
1083
1084 static void
1085 mec_stop(struct ifnet *ifp, int disable)
1086 {
1087 struct mec_softc *sc = ifp->if_softc;
1088 struct mec_txsoft *txs;
1089 int i;
1090
1091 DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n"));
1092
1093 ifp->if_timer = 0;
1094 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1095
1096 callout_stop(&sc->sc_tick_ch);
1097 mii_down(&sc->sc_mii);
1098
1099 /* release any TX buffers */
1100 for (i = 0; i < MEC_NTXDESC; i++) {
1101 txs = &sc->sc_txsoft[i];
1102 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1103 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1104 m_freem(txs->txs_mbuf);
1105 txs->txs_mbuf = NULL;
1106 }
1107 }
1108 }
1109
1110 static int
1111 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1112 {
1113 int s, error;
1114
1115 s = splnet();
1116
1117 error = ether_ioctl(ifp, cmd, data);
1118 if (error == ENETRESET) {
1119 /*
1120 * Multicast list has changed; set the hardware filter
1121 * accordingly.
1122 */
1123 if (ifp->if_flags & IFF_RUNNING)
1124 error = mec_init(ifp);
1125 else
1126 error = 0;
1127 }
1128
1129 /* Try to get more packets going. */
1130 mec_start(ifp);
1131
1132 splx(s);
1133 return error;
1134 }
1135
1136 static void
1137 mec_watchdog(struct ifnet *ifp)
1138 {
1139 struct mec_softc *sc = ifp->if_softc;
1140
1141 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1142 ifp->if_oerrors++;
1143
1144 mec_init(ifp);
1145 }
1146
1147 static void
1148 mec_tick(void *arg)
1149 {
1150 struct mec_softc *sc = arg;
1151 int s;
1152
1153 s = splnet();
1154 mii_tick(&sc->sc_mii);
1155 splx(s);
1156
1157 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1158 }
1159
1160 static void
1161 mec_setfilter(struct mec_softc *sc)
1162 {
1163 struct ethercom *ec = &sc->sc_ethercom;
1164 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1165 struct ether_multi *enm;
1166 struct ether_multistep step;
1167 bus_space_tag_t st = sc->sc_st;
1168 bus_space_handle_t sh = sc->sc_sh;
1169 uint64_t mchash;
1170 uint32_t control, hash;
1171 int mcnt;
1172
1173 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1174 control &= ~MEC_MAC_FILTER_MASK;
1175
1176 if (ifp->if_flags & IFF_PROMISC) {
1177 control |= MEC_MAC_FILTER_PROMISC;
1178 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1179 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1180 return;
1181 }
1182
1183 mcnt = 0;
1184 mchash = 0;
1185 ETHER_FIRST_MULTI(step, ec, enm);
1186 while (enm != NULL) {
1187 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1188 /* set allmulti for a range of multicast addresses */
1189 control |= MEC_MAC_FILTER_ALLMULTI;
1190 bus_space_write_8(st, sh, MEC_MULTICAST,
1191 0xffffffffffffffffULL);
1192 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1193 return;
1194 }
1195
1196 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1197
1198 hash = mec_calchash(enm->enm_addrlo);
1199 mchash |= 1 << hash;
1200 mcnt++;
1201 ETHER_NEXT_MULTI(step, enm);
1202 }
1203
1204 ifp->if_flags &= ~IFF_ALLMULTI;
1205
1206 if (mcnt > 0)
1207 control |= MEC_MAC_FILTER_MATCHMULTI;
1208
1209 bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1210 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1211 }
1212
1213 static int
1214 mec_intr(void *arg)
1215 {
1216 struct mec_softc *sc = arg;
1217 bus_space_tag_t st = sc->sc_st;
1218 bus_space_handle_t sh = sc->sc_sh;
1219 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1220 uint32_t statreg, statack, txptr;
1221 int handled, sent;
1222
1223 DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n"));
1224
1225 handled = sent = 0;
1226
1227 for (;;) {
1228 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1229
1230 DPRINTF(MEC_DEBUG_INTR,
1231 ("mec_intr: INT_STAT = 0x%08x\n", statreg));
1232
1233 statack = statreg & MEC_INT_STATUS_MASK;
1234 if (statack == 0)
1235 break;
1236 bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1237
1238 handled = 1;
1239
1240 if (statack &
1241 (MEC_INT_RX_THRESHOLD |
1242 MEC_INT_RX_FIFO_UNDERFLOW)) {
1243 mec_rxintr(sc);
1244 }
1245
1246 if (statack &
1247 (MEC_INT_TX_EMPTY |
1248 MEC_INT_TX_PACKET_SENT |
1249 MEC_INT_TX_ABORT)) {
1250 txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1251 >> MEC_INT_TX_RING_BUFFER_SHIFT;
1252 mec_txintr(sc, txptr);
1253 sent = 1;
1254 if ((statack & MEC_INT_TX_EMPTY) != 0) {
1255 /*
1256 * disable TX interrupt to stop
1257 * TX empty interrupt
1258 */
1259 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1260 DPRINTF(MEC_DEBUG_INTR,
1261 ("mec_intr: disable TX_INT\n"));
1262 }
1263 }
1264
1265 if (statack &
1266 (MEC_INT_TX_LINK_FAIL |
1267 MEC_INT_TX_MEM_ERROR |
1268 MEC_INT_TX_ABORT |
1269 MEC_INT_RX_FIFO_UNDERFLOW |
1270 MEC_INT_RX_DMA_UNDERFLOW)) {
1271 printf("%s: mec_intr: interrupt status = 0x%08x\n",
1272 device_xname(sc->sc_dev), statreg);
1273 mec_init(ifp);
1274 break;
1275 }
1276 }
1277
1278 if (sent && !IFQ_IS_EMPTY(&ifp->if_snd)) {
1279 /* try to get more packets going */
1280 mec_start(ifp);
1281 }
1282
1283 #if NRND > 0
1284 if (handled)
1285 rnd_add_uint32(&sc->sc_rnd_source, statreg);
1286 #endif
1287
1288 return handled;
1289 }
1290
1291 static void
1292 mec_rxintr(struct mec_softc *sc)
1293 {
1294 bus_space_tag_t st = sc->sc_st;
1295 bus_space_handle_t sh = sc->sc_sh;
1296 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1297 struct mbuf *m;
1298 struct mec_rxdesc *rxd;
1299 uint64_t rxstat;
1300 u_int len;
1301 int i;
1302
1303 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n"));
1304
1305 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1306 rxd = &sc->sc_rxdesc[i];
1307
1308 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1309 rxstat = rxd->rxd_stat;
1310
1311 DPRINTF(MEC_DEBUG_RXINTR,
1312 ("mec_rxintr: rxstat = 0x%016llx, rxptr = %d\n",
1313 rxstat, i));
1314 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%08x\n",
1315 (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1316
1317 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1318 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1319 break;
1320 }
1321
1322 len = rxstat & MEC_RXSTAT_LEN;
1323
1324 if (len < ETHER_MIN_LEN ||
1325 len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1326 /* invalid length packet; drop it. */
1327 DPRINTF(MEC_DEBUG_RXINTR,
1328 ("mec_rxintr: wrong packet\n"));
1329 dropit:
1330 ifp->if_ierrors++;
1331 rxd->rxd_stat = 0;
1332 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1333 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1334 MEC_CDRXADDR(sc, i));
1335 continue;
1336 }
1337
1338 if (rxstat &
1339 (MEC_RXSTAT_BADPACKET |
1340 MEC_RXSTAT_LONGEVENT |
1341 MEC_RXSTAT_INVALID |
1342 MEC_RXSTAT_CRCERROR |
1343 MEC_RXSTAT_VIOLATION)) {
1344 printf("%s: mec_rxintr: status = 0x%016llx\n",
1345 device_xname(sc->sc_dev), rxstat);
1346 goto dropit;
1347 }
1348
1349 /*
1350 * The MEC includes the CRC with every packet. Trim
1351 * it off here.
1352 */
1353 len -= ETHER_CRC_LEN;
1354
1355 /*
1356 * now allocate an mbuf (and possibly a cluster) to hold
1357 * the received packet.
1358 */
1359 MGETHDR(m, M_DONTWAIT, MT_DATA);
1360 if (m == NULL) {
1361 printf("%s: unable to allocate RX mbuf\n",
1362 device_xname(sc->sc_dev));
1363 goto dropit;
1364 }
1365 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1366 MCLGET(m, M_DONTWAIT);
1367 if ((m->m_flags & M_EXT) == 0) {
1368 printf("%s: unable to allocate RX cluster\n",
1369 device_xname(sc->sc_dev));
1370 m_freem(m);
1371 m = NULL;
1372 goto dropit;
1373 }
1374 }
1375
1376 /*
1377 * Note MEC chip seems to insert 2 byte padding at the top of
1378 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1379 */
1380 MEC_RXBUFSYNC(sc, i, len, BUS_DMASYNC_POSTREAD);
1381 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1382 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1383 m->m_data += MEC_ETHER_ALIGN;
1384
1385 /* put RX buffer into FIFO again */
1386 rxd->rxd_stat = 0;
1387 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1388 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1389
1390 m->m_pkthdr.rcvif = ifp;
1391 m->m_pkthdr.len = m->m_len = len;
1392
1393 ifp->if_ipackets++;
1394
1395 #if NBPFILTER > 0
1396 /*
1397 * Pass this up to any BPF listeners, but only
1398 * pass it up the stack if it's for us.
1399 */
1400 if (ifp->if_bpf)
1401 bpf_mtap(ifp->if_bpf, m);
1402 #endif
1403
1404 /* Pass it on. */
1405 (*ifp->if_input)(ifp, m);
1406 }
1407
1408 /* update RX pointer */
1409 sc->sc_rxptr = i;
1410 }
1411
1412 static void
1413 mec_txintr(struct mec_softc *sc, uint32_t txptr)
1414 {
1415 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1416 struct mec_txdesc *txd;
1417 struct mec_txsoft *txs;
1418 bus_dmamap_t dmamap;
1419 uint64_t txstat;
1420 int i;
1421 u_int col;
1422
1423 DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n"));
1424
1425 for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1426 i = MEC_NEXTTX(i), sc->sc_txpending--) {
1427 txd = &sc->sc_txdesc[i];
1428
1429 MEC_TXDESCSYNC(sc, i,
1430 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1431
1432 txstat = txd->txd_stat;
1433 DPRINTF(MEC_DEBUG_TXINTR,
1434 ("mec_txintr: dirty = %d, txstat = 0x%016llx\n",
1435 i, txstat));
1436 if ((txstat & MEC_TXSTAT_SENT) == 0) {
1437 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1438 break;
1439 }
1440
1441 txs = &sc->sc_txsoft[i];
1442 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1443 dmamap = txs->txs_dmamap;
1444 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1445 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1446 bus_dmamap_unload(sc->sc_dmat, dmamap);
1447 m_freem(txs->txs_mbuf);
1448 txs->txs_mbuf = NULL;
1449 }
1450
1451 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1452 ifp->if_collisions += col;
1453
1454 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1455 printf("%s: TX error: txstat = 0x%016llx\n",
1456 device_xname(sc->sc_dev), txstat);
1457 ifp->if_oerrors++;
1458 } else
1459 ifp->if_opackets++;
1460 }
1461
1462 /* update the dirty TX buffer pointer */
1463 sc->sc_txdirty = i;
1464 DPRINTF(MEC_DEBUG_INTR,
1465 ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n",
1466 sc->sc_txdirty, sc->sc_txpending));
1467
1468 /* cancel the watchdog timer if there are no pending TX packets */
1469 if (sc->sc_txpending == 0)
1470 ifp->if_timer = 0;
1471 if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD)
1472 ifp->if_flags &= ~IFF_OACTIVE;
1473 }
1474
1475 static void
1476 mec_shutdown(void *arg)
1477 {
1478 struct mec_softc *sc = arg;
1479
1480 mec_stop(&sc->sc_ethercom.ec_if, 1);
1481 /* make sure to stop DMA etc. */
1482 mec_reset(sc);
1483 }
1484