if_mec.c revision 1.20 1 /* $NetBSD: if_mec.c,v 1.20 2008/05/14 13:29:28 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Izumi Tsutsui. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed for the
42 * NetBSD Project. See http://www.NetBSD.org/ for
43 * information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 /*
60 * MACE MAC-110 Ethernet driver
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.20 2008/05/14 13:29:28 tsutsui Exp $");
65
66 #include "opt_ddb.h"
67 #include "bpfilter.h"
68 #include "rnd.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/device.h>
73 #include <sys/callout.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/ioctl.h>
79 #include <sys/errno.h>
80
81 #if NRND > 0
82 #include <sys/rnd.h>
83 #endif
84
85 #include <net/if.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 #include <net/if_ether.h>
89
90 #if NBPFILTER > 0
91 #include <net/bpf.h>
92 #endif
93
94 #include <machine/bus.h>
95 #include <machine/intr.h>
96 #include <machine/machtype.h>
97
98 #include <dev/mii/mii.h>
99 #include <dev/mii/miivar.h>
100
101 #include <sgimips/mace/macevar.h>
102 #include <sgimips/mace/if_mecreg.h>
103
104 #include <dev/arcbios/arcbios.h>
105 #include <dev/arcbios/arcbiosvar.h>
106
107 /* #define MEC_DEBUG */
108
109 #ifdef MEC_DEBUG
110 #define MEC_DEBUG_RESET 0x01
111 #define MEC_DEBUG_START 0x02
112 #define MEC_DEBUG_STOP 0x04
113 #define MEC_DEBUG_INTR 0x08
114 #define MEC_DEBUG_RXINTR 0x10
115 #define MEC_DEBUG_TXINTR 0x20
116 uint32_t mec_debug = 0;
117 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
118 #else
119 #define DPRINTF(x, y) /* nothing */
120 #endif
121
122 /*
123 * Transmit descriptor list size
124 */
125 #define MEC_NTXDESC 64
126 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
127 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
128
129 /*
130 * software state for TX
131 */
132 struct mec_txsoft {
133 struct mbuf *txs_mbuf; /* head of our mbuf chain */
134 bus_dmamap_t txs_dmamap; /* our DMA map */
135 uint32_t txs_flags;
136 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
137 #define MEC_TXS_TXDBUF 0x00000080 /* txd_buf is used */
138 #define MEC_TXS_TXDPTR1 0x00000100 /* txd_ptr[0] is used */
139 };
140
141 /*
142 * Transmit buffer descriptor
143 */
144 #define MEC_TXDESCSIZE 128
145 #define MEC_NTXPTR 3
146 #define MEC_TXD_BUFOFFSET \
147 (sizeof(uint64_t) + MEC_NTXPTR * sizeof(uint64_t))
148 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
149 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
150 #define MEC_TXD_ALIGN 8
151 #define MEC_TXD_ROUNDUP(addr) \
152 (((addr) + (MEC_TXD_ALIGN - 1)) & ~((uint64_t)MEC_TXD_ALIGN - 1))
153
154 struct mec_txdesc {
155 volatile uint64_t txd_cmd;
156 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
157 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
158 #define TXCMD_BUFSTART(x) ((x) << 16)
159 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
160 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
161 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
162 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
163 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
164 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
165
166 #define txd_stat txd_cmd
167 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
168 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
169 #define MEC_TXSTAT_COLCNT_SHIFT 16
170 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
171 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
172 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
173 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
174 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
175 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
176 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
177 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
178 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
179 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
180 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
181
182 uint64_t txd_ptr[MEC_NTXPTR];
183 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
184 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
185 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
186 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
187 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
188
189 uint8_t txd_buf[MEC_TXD_BUFSIZE];
190 };
191
192 /*
193 * Receive buffer size
194 */
195 #define MEC_NRXDESC 16
196 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
197 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
198
199 /*
200 * Receive buffer description
201 */
202 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
203 #define MEC_RXD_NRXPAD 3
204 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
205 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
206 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
207
208 struct mec_rxdesc {
209 volatile uint64_t rxd_stat;
210 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
211 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
212 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
213 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
214 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
215 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
216 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
217 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
218 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
219 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
220 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
221 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
222 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
223 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
224 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
225 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
226 uint64_t rxd_pad1[MEC_RXD_NRXPAD];
227 uint8_t rxd_buf[MEC_RXD_BUFSIZE];
228 };
229
230 /*
231 * control structures for DMA ops
232 */
233 struct mec_control_data {
234 /*
235 * TX descriptors and buffers
236 */
237 struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
238
239 /*
240 * RX descriptors and buffers
241 */
242 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
243 };
244
245 /*
246 * It _seems_ there are some restrictions on descriptor address:
247 *
248 * - Base address of txdescs should be 8kbyte aligned
249 * - Each txdesc should be 128byte aligned
250 * - Each rxdesc should be 4kbyte aligned
251 *
252 * So we should specify 8k align to allocalte txdescs.
253 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
254 * so rxdescs are also allocated at 4kbyte aligned.
255 */
256 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
257
258 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
259 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
260 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
261
262 /*
263 * software state per device
264 */
265 struct mec_softc {
266 device_t sc_dev; /* generic device structures */
267
268 bus_space_tag_t sc_st; /* bus_space tag */
269 bus_space_handle_t sc_sh; /* bus_space handle */
270 bus_dma_tag_t sc_dmat; /* bus_dma tag */
271 void *sc_sdhook; /* shutdown hook */
272
273 struct ethercom sc_ethercom; /* Ethernet common part */
274
275 struct mii_data sc_mii; /* MII/media information */
276 int sc_phyaddr; /* MII address */
277 struct callout sc_tick_ch; /* tick callout */
278
279 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
280
281 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
282 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
283
284 /* pointer to allocated control data */
285 struct mec_control_data *sc_control_data;
286 #define sc_txdesc sc_control_data->mcd_txdesc
287 #define sc_rxdesc sc_control_data->mcd_rxdesc
288
289 /* software state for TX descs */
290 struct mec_txsoft sc_txsoft[MEC_NTXDESC];
291
292 int sc_txpending; /* number of TX requests pending */
293 int sc_txdirty; /* first dirty TX descriptor */
294 int sc_txlast; /* last used TX descriptor */
295
296 int sc_rxptr; /* next ready RX buffer */
297
298 #if NRND > 0
299 rndsource_element_t sc_rnd_source; /* random source */
300 #endif
301 };
302
303 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
304 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
305
306 #define MEC_TXDESCSYNC(sc, x, ops) \
307 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
308 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
309 #define MEC_TXCMDSYNC(sc, x, ops) \
310 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
311 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
312
313 #define MEC_RXSTATSYNC(sc, x, ops) \
314 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
315 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
316 #define MEC_RXBUFSYNC(sc, x, len, ops) \
317 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
318 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
319 MEC_ETHER_ALIGN + (len), (ops))
320
321 /* XXX these values should be moved to <net/if_ether.h> ? */
322 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
323 #define MEC_ETHER_ALIGN 2
324
325 static int mec_match(device_t, cfdata_t, void *);
326 static void mec_attach(device_t, device_t, void *);
327
328 static int mec_mii_readreg(device_t, int, int);
329 static void mec_mii_writereg(device_t, int, int, int);
330 static int mec_mii_wait(struct mec_softc *);
331 static void mec_statchg(device_t);
332
333 static void enaddr_aton(const char *, uint8_t *);
334
335 static int mec_init(struct ifnet * ifp);
336 static void mec_start(struct ifnet *);
337 static void mec_watchdog(struct ifnet *);
338 static void mec_tick(void *);
339 static int mec_ioctl(struct ifnet *, u_long, void *);
340 static void mec_reset(struct mec_softc *);
341 static void mec_setfilter(struct mec_softc *);
342 static int mec_intr(void *arg);
343 static void mec_stop(struct ifnet *, int);
344 static void mec_rxintr(struct mec_softc *);
345 static void mec_txintr(struct mec_softc *);
346 static void mec_shutdown(void *);
347
348 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
349 mec_match, mec_attach, NULL, NULL);
350
351 static int mec_matched = 0;
352
353 static int
354 mec_match(device_t parent, cfdata_t cf, void *aux)
355 {
356
357 /* allow only one device */
358 if (mec_matched)
359 return 0;
360
361 mec_matched = 1;
362 return 1;
363 }
364
365 static void
366 mec_attach(device_t parent, device_t self, void *aux)
367 {
368 struct mec_softc *sc = device_private(self);
369 struct mace_attach_args *maa = aux;
370 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
371 uint64_t address, command;
372 const char *macaddr;
373 struct mii_softc *child;
374 bus_dma_segment_t seg;
375 int i, err, rseg;
376 bool mac_is_fake;
377
378 sc->sc_dev = self;
379 sc->sc_st = maa->maa_st;
380 if (bus_space_subregion(sc->sc_st, maa->maa_sh,
381 maa->maa_offset, 0, &sc->sc_sh) != 0) {
382 aprint_error(": can't map i/o space\n");
383 return;
384 }
385
386 /* set up DMA structures */
387 sc->sc_dmat = maa->maa_dmat;
388
389 /*
390 * Allocate the control data structures, and create and load the
391 * DMA map for it.
392 */
393 if ((err = bus_dmamem_alloc(sc->sc_dmat,
394 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
395 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
396 aprint_error(": unable to allocate control data, error = %d\n",
397 err);
398 goto fail_0;
399 }
400 /*
401 * XXX needs re-think...
402 * control data structures contain whole RX data buffer, so
403 * BUS_DMA_COHERENT (which disables cache) may cause some performance
404 * issue on copying data from the RX buffer to mbuf on normal memory,
405 * though we have to make sure all bus_dmamap_sync(9) ops are called
406 * properly in that case.
407 */
408 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
409 sizeof(struct mec_control_data),
410 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
411 aprint_error(": unable to map control data, error = %d\n", err);
412 goto fail_1;
413 }
414 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
415
416 if ((err = bus_dmamap_create(sc->sc_dmat,
417 sizeof(struct mec_control_data), 1,
418 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
419 aprint_error(": unable to create control data DMA map,"
420 " error = %d\n", err);
421 goto fail_2;
422 }
423 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
424 sc->sc_control_data, sizeof(struct mec_control_data), NULL,
425 BUS_DMA_NOWAIT)) != 0) {
426 aprint_error(": unable to load control data DMA map,"
427 " error = %d\n", err);
428 goto fail_3;
429 }
430
431 /* create TX buffer DMA maps */
432 for (i = 0; i < MEC_NTXDESC; i++) {
433 if ((err = bus_dmamap_create(sc->sc_dmat,
434 MCLBYTES, 1, MCLBYTES, 0, 0,
435 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
436 aprint_error(": unable to create tx DMA map %d,"
437 " error = %d\n", i, err);
438 goto fail_4;
439 }
440 }
441
442 callout_init(&sc->sc_tick_ch, 0);
443
444 /* get Ethernet address from ARCBIOS */
445 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
446 aprint_error(": unable to get MAC address!\n");
447 goto fail_4;
448 }
449 /*
450 * On some machines the DS2502 chip storing the serial number/
451 * mac address is on the pci riser board - if this board is
452 * missing, ARCBIOS will not know a good ethernet address (but
453 * otherwise the machine will work fine).
454 */
455 mac_is_fake = false;
456 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
457 uint32_t ui = 0;
458 const char * netaddr =
459 ARCBIOS->GetEnvironmentVariable("netaddr");
460
461 /*
462 * Create a MAC address by abusing the "netaddr" env var
463 */
464 sc->sc_enaddr[0] = 0xf2;
465 sc->sc_enaddr[1] = 0x0b;
466 sc->sc_enaddr[2] = 0xa4;
467 if (netaddr) {
468 mac_is_fake = true;
469 while (*netaddr) {
470 int v = 0;
471 while (*netaddr && *netaddr != '.') {
472 if (*netaddr >= '0' && *netaddr <= '9')
473 v = v*10 + (*netaddr - '0');
474 netaddr++;
475 }
476 ui <<= 8;
477 ui |= v;
478 if (*netaddr == '.')
479 netaddr++;
480 }
481 }
482 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
483 }
484 if (!mac_is_fake)
485 enaddr_aton(macaddr, sc->sc_enaddr);
486
487 /* set the Ethernet address */
488 address = 0;
489 for (i = 0; i < ETHER_ADDR_LEN; i++) {
490 address = address << 8;
491 address |= sc->sc_enaddr[i];
492 }
493 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
494
495 /* reset device */
496 mec_reset(sc);
497
498 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
499
500 aprint_normal(": MAC-110 Ethernet, rev %u\n",
501 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
502
503 if (mac_is_fake)
504 aprint_normal_dev(self,
505 "could not get ethernet address from firmware"
506 " - generated one from the \"netaddr\" environment"
507 " variable\n");
508 aprint_normal_dev(self, "Ethernet address %s\n",
509 ether_sprintf(sc->sc_enaddr));
510
511 /* Done, now attach everything */
512
513 sc->sc_mii.mii_ifp = ifp;
514 sc->sc_mii.mii_readreg = mec_mii_readreg;
515 sc->sc_mii.mii_writereg = mec_mii_writereg;
516 sc->sc_mii.mii_statchg = mec_statchg;
517
518 /* Set up PHY properties */
519 sc->sc_ethercom.ec_mii = &sc->sc_mii;
520 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
521 ether_mediastatus);
522 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
523 MII_OFFSET_ANY, 0);
524
525 child = LIST_FIRST(&sc->sc_mii.mii_phys);
526 if (child == NULL) {
527 /* No PHY attached */
528 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
529 0, NULL);
530 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
531 } else {
532 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
533 sc->sc_phyaddr = child->mii_phy;
534 }
535
536 strcpy(ifp->if_xname, device_xname(self));
537 ifp->if_softc = sc;
538 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
539 ifp->if_ioctl = mec_ioctl;
540 ifp->if_start = mec_start;
541 ifp->if_watchdog = mec_watchdog;
542 ifp->if_init = mec_init;
543 ifp->if_stop = mec_stop;
544 ifp->if_mtu = ETHERMTU;
545 IFQ_SET_READY(&ifp->if_snd);
546
547 if_attach(ifp);
548 ether_ifattach(ifp, sc->sc_enaddr);
549
550 /* establish interrupt */
551 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
552
553 #if NRND > 0
554 rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
555 RND_TYPE_NET, 0);
556 #endif
557
558 /* set shutdown hook to reset interface on powerdown */
559 sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);
560
561 return;
562
563 /*
564 * Free any resources we've allocated during the failed attach
565 * attempt. Do this in reverse order and fall though.
566 */
567 fail_4:
568 for (i = 0; i < MEC_NTXDESC; i++) {
569 if (sc->sc_txsoft[i].txs_dmamap != NULL)
570 bus_dmamap_destroy(sc->sc_dmat,
571 sc->sc_txsoft[i].txs_dmamap);
572 }
573 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
574 fail_3:
575 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
576 fail_2:
577 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
578 sizeof(struct mec_control_data));
579 fail_1:
580 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
581 fail_0:
582 return;
583 }
584
585 static int
586 mec_mii_readreg(device_t self, int phy, int reg)
587 {
588 struct mec_softc *sc = device_private(self);
589 bus_space_tag_t st = sc->sc_st;
590 bus_space_handle_t sh = sc->sc_sh;
591 uint64_t val;
592 int i;
593
594 if (mec_mii_wait(sc) != 0)
595 return 0;
596
597 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
598 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
599 delay(25);
600 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
601 delay(25);
602 mec_mii_wait(sc);
603
604 for (i = 0; i < 20; i++) {
605 delay(30);
606
607 val = bus_space_read_8(st, sh, MEC_PHY_DATA);
608
609 if ((val & MEC_PHY_DATA_BUSY) == 0)
610 return val & MEC_PHY_DATA_VALUE;
611 }
612 return 0;
613 }
614
615 static void
616 mec_mii_writereg(device_t self, int phy, int reg, int val)
617 {
618 struct mec_softc *sc = device_private(self);
619 bus_space_tag_t st = sc->sc_st;
620 bus_space_handle_t sh = sc->sc_sh;
621
622 if (mec_mii_wait(sc) != 0) {
623 printf("timed out writing %x: %x\n", reg, val);
624 return;
625 }
626
627 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
628 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
629
630 delay(60);
631
632 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
633
634 delay(60);
635
636 mec_mii_wait(sc);
637 }
638
639 static int
640 mec_mii_wait(struct mec_softc *sc)
641 {
642 uint32_t busy;
643 int i, s;
644
645 for (i = 0; i < 100; i++) {
646 delay(30);
647
648 s = splhigh();
649 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
650 splx(s);
651
652 if ((busy & MEC_PHY_DATA_BUSY) == 0)
653 return 0;
654 #if 0
655 if (busy == 0xffff) /* XXX ? */
656 return 0;
657 #endif
658 }
659
660 printf("%s: MII timed out\n", device_xname(sc->sc_dev));
661 return 1;
662 }
663
664 static void
665 mec_statchg(device_t self)
666 {
667 struct mec_softc *sc = device_private(self);
668 bus_space_tag_t st = sc->sc_st;
669 bus_space_handle_t sh = sc->sc_sh;
670 uint32_t control;
671
672 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
673 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
674 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
675
676 /* must also set IPG here for duplex stuff ... */
677 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
678 control |= MEC_MAC_FULL_DUPLEX;
679 } else {
680 /* set IPG */
681 control |= MEC_MAC_IPG_DEFAULT;
682 }
683
684 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
685 }
686
687 /*
688 * XXX
689 * maybe this function should be moved to common part
690 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
691 */
692 static void
693 enaddr_aton(const char *str, uint8_t *eaddr)
694 {
695 int i;
696 char c;
697
698 for (i = 0; i < ETHER_ADDR_LEN; i++) {
699 if (*str == ':')
700 str++;
701
702 c = *str++;
703 if (isdigit(c)) {
704 eaddr[i] = (c - '0');
705 } else if (isxdigit(c)) {
706 eaddr[i] = (toupper(c) + 10 - 'A');
707 }
708 c = *str++;
709 if (isdigit(c)) {
710 eaddr[i] = (eaddr[i] << 4) | (c - '0');
711 } else if (isxdigit(c)) {
712 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
713 }
714 }
715 }
716
717 static int
718 mec_init(struct ifnet *ifp)
719 {
720 struct mec_softc *sc = ifp->if_softc;
721 bus_space_tag_t st = sc->sc_st;
722 bus_space_handle_t sh = sc->sc_sh;
723 struct mec_rxdesc *rxd;
724 int i, rc;
725
726 /* cancel any pending I/O */
727 mec_stop(ifp, 0);
728
729 /* reset device */
730 mec_reset(sc);
731
732 /* setup filter for multicast or promisc mode */
733 mec_setfilter(sc);
734
735 /* set the TX ring pointer to the base address */
736 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
737
738 sc->sc_txpending = 0;
739 sc->sc_txdirty = 0;
740 sc->sc_txlast = MEC_NTXDESC - 1;
741
742 /* put RX buffers into FIFO */
743 for (i = 0; i < MEC_NRXDESC; i++) {
744 rxd = &sc->sc_rxdesc[i];
745 rxd->rxd_stat = 0;
746 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
747 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
748 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
749 }
750 sc->sc_rxptr = 0;
751
752 #if 0 /* XXX no info */
753 bus_space_write_8(st, sh, MEC_TIMER, 0);
754 #endif
755
756 /*
757 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
758 * spurious interrupts when TX buffers are empty
759 */
760 bus_space_write_8(st, sh, MEC_DMA_CONTROL,
761 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
762 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
763 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
764 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
765
766 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
767
768 if ((rc = ether_mediachange(ifp)) != 0)
769 return rc;
770
771 ifp->if_flags |= IFF_RUNNING;
772 ifp->if_flags &= ~IFF_OACTIVE;
773 mec_start(ifp);
774
775 return 0;
776 }
777
778 static void
779 mec_reset(struct mec_softc *sc)
780 {
781 bus_space_tag_t st = sc->sc_st;
782 bus_space_handle_t sh = sc->sc_sh;
783 uint64_t control;
784
785 /* stop DMA first */
786 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
787
788 /* reset chip */
789 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
790 delay(1000);
791 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
792 delay(1000);
793
794 /* Default to 100/half and let auto-negotiation work its magic */
795 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
796 MEC_MAC_IPG_DEFAULT;
797
798 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
799 /* stop DMA again for sanity */
800 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
801
802 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
803 bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
804 }
805
806 static void
807 mec_start(struct ifnet *ifp)
808 {
809 struct mec_softc *sc = ifp->if_softc;
810 struct mbuf *m0, *m;
811 struct mec_txdesc *txd;
812 struct mec_txsoft *txs;
813 bus_dmamap_t dmamap;
814 bus_space_tag_t st = sc->sc_st;
815 bus_space_handle_t sh = sc->sc_sh;
816 uint64_t txdaddr;
817 int error, firsttx, nexttx, opending;
818 int len, bufoff, buflen, unaligned, txdlen;
819
820 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
821 return;
822
823 /*
824 * Remember the previous txpending and the first transmit descriptor.
825 */
826 opending = sc->sc_txpending;
827 firsttx = MEC_NEXTTX(sc->sc_txlast);
828
829 DPRINTF(MEC_DEBUG_START,
830 ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));
831
832 for (;;) {
833 /* Grab a packet off the queue. */
834 IFQ_POLL(&ifp->if_snd, m0);
835 if (m0 == NULL)
836 break;
837 m = NULL;
838
839 if (sc->sc_txpending == MEC_NTXDESC) {
840 break;
841 }
842
843 /*
844 * Get the next available transmit descriptor.
845 */
846 nexttx = MEC_NEXTTX(sc->sc_txlast);
847 txd = &sc->sc_txdesc[nexttx];
848 txs = &sc->sc_txsoft[nexttx];
849
850 buflen = 0;
851 bufoff = 0;
852 txdaddr = 0; /* XXX gcc */
853 txdlen = 0; /* XXX gcc */
854
855 len = m0->m_pkthdr.len;
856
857 DPRINTF(MEC_DEBUG_START,
858 ("mec_start: len = %d, nexttx = %d\n", len, nexttx));
859
860 if (len < ETHER_PAD_LEN) {
861 /*
862 * I don't know if MEC chip does auto padding,
863 * so if the packet is small enough,
864 * just copy it to the buffer in txdesc.
865 * Maybe this is the simple way.
866 */
867 DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));
868
869 IFQ_DEQUEUE(&ifp->if_snd, m0);
870 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
871 m_copydata(m0, 0, m0->m_pkthdr.len,
872 txd->txd_buf + bufoff);
873 memset(txd->txd_buf + bufoff + len, 0,
874 ETHER_PAD_LEN - len);
875 len = buflen = ETHER_PAD_LEN;
876
877 txs->txs_flags = MEC_TXS_TXDBUF | buflen;
878 } else {
879 /*
880 * If the packet won't fit the buffer in txdesc,
881 * we have to use concatenate pointer to handle it.
882 * While MEC can handle up to three segments to
883 * concatenate, MEC requires that both the second and
884 * third segments have to be 8 byte aligned.
885 * Since it's unlikely for mbuf clusters, we use
886 * only the first concatenate pointer. If the packet
887 * doesn't fit in one DMA segment, allocate new mbuf
888 * and copy the packet to it.
889 *
890 * Besides, if the start address of the first segments
891 * is not 8 byte aligned, such part have to be copied
892 * to the txdesc buffer. (XXX see below comments)
893 */
894 DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));
895
896 dmamap = txs->txs_dmamap;
897 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
898 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
899 DPRINTF(MEC_DEBUG_START,
900 ("mec_start: re-allocating mbuf\n"));
901 MGETHDR(m, M_DONTWAIT, MT_DATA);
902 if (m == NULL) {
903 printf("%s: unable to allocate "
904 "TX mbuf\n",
905 device_xname(sc->sc_dev));
906 break;
907 }
908 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
909 MCLGET(m, M_DONTWAIT);
910 if ((m->m_flags & M_EXT) == 0) {
911 printf("%s: unable to allocate "
912 "TX cluster\n",
913 device_xname(sc->sc_dev));
914 m_freem(m);
915 break;
916 }
917 }
918 /*
919 * Each packet has the Ethernet header, so
920 * in many case the header isn't 4-byte aligned
921 * and data after the header is 4-byte aligned.
922 * Thus adding 2-byte offset before copying to
923 * new mbuf avoids unaligned copy and this may
924 * improve some performance.
925 * As noted above, unaligned part has to be
926 * copied to txdesc buffer so this may cause
927 * extra copy ops, but for now MEC always
928 * requires some data in txdesc buffer,
929 * so we always have to copy some data anyway.
930 */
931 m->m_data += MEC_ETHER_ALIGN;
932 m_copydata(m0, 0, len, mtod(m, void *));
933 m->m_pkthdr.len = m->m_len = len;
934 error = bus_dmamap_load_mbuf(sc->sc_dmat,
935 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
936 if (error) {
937 printf("%s: unable to load TX buffer, "
938 "error = %d\n",
939 device_xname(sc->sc_dev), error);
940 break;
941 }
942 }
943 IFQ_DEQUEUE(&ifp->if_snd, m0);
944 if (m != NULL) {
945 m_freem(m0);
946 m0 = m;
947 }
948
949 /* handle unaligned part */
950 txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
951 txs->txs_flags = MEC_TXS_TXDPTR1;
952 unaligned =
953 dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
954 DPRINTF(MEC_DEBUG_START,
955 ("mec_start: ds_addr = 0x%08x, unaligned = %d\n",
956 (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
957 if (unaligned != 0) {
958 buflen = MEC_TXD_ALIGN - unaligned;
959 bufoff = MEC_TXD_BUFSTART(buflen);
960 DPRINTF(MEC_DEBUG_START,
961 ("mec_start: unaligned, "
962 "buflen = %d, bufoff = %d\n",
963 buflen, bufoff));
964 memcpy(txd->txd_buf + bufoff,
965 mtod(m0, void *), buflen);
966 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
967 }
968 #if 1
969 else {
970 /*
971 * XXX needs hardware info XXX
972 * It seems MEC always requires some data
973 * in txd_buf[] even if buffer is
974 * 8-byte aligned otherwise DMA abort error
975 * occurs later...
976 */
977 buflen = MEC_TXD_ALIGN;
978 bufoff = MEC_TXD_BUFSTART(buflen);
979 memcpy(txd->txd_buf + bufoff,
980 mtod(m0, void *), buflen);
981 DPRINTF(MEC_DEBUG_START,
982 ("mec_start: aligned, "
983 "buflen = %d, bufoff = %d\n",
984 buflen, bufoff));
985 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
986 txdaddr += MEC_TXD_ALIGN;
987 }
988 #endif
989 txdlen = len - buflen;
990 DPRINTF(MEC_DEBUG_START,
991 ("mec_start: txdaddr = 0x%08llx, txdlen = %d\n",
992 txdaddr, txdlen));
993
994 /*
995 * sync the DMA map for TX mbuf
996 *
997 * XXX unaligned part doesn't have to be sync'ed,
998 * but it's harmless...
999 */
1000 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1001 dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1002 }
1003
1004 #if NBPFILTER > 0
1005 /*
1006 * Pass packet to bpf if there is a listener.
1007 */
1008 if (ifp->if_bpf)
1009 bpf_mtap(ifp->if_bpf, m0);
1010 #endif
1011
1012 /*
1013 * setup the transmit descriptor.
1014 */
1015
1016 /* TXINT bit will be set later on the last packet */
1017 txd->txd_cmd = (len - 1);
1018 /* but also set TXINT bit on a half of TXDESC */
1019 if (sc->sc_txpending == (MEC_NTXDESC / 2))
1020 txd->txd_cmd |= MEC_TXCMD_TXINT;
1021
1022 if (txs->txs_flags & MEC_TXS_TXDBUF)
1023 txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
1024 if (txs->txs_flags & MEC_TXS_TXDPTR1) {
1025 txd->txd_cmd |= MEC_TXCMD_PTR1;
1026 txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
1027 /*
1028 * Store a pointer to the packet so we can
1029 * free it later.
1030 */
1031 txs->txs_mbuf = m0;
1032 } else {
1033 txd->txd_ptr[0] = 0;
1034 /*
1035 * In this case all data are copied to buffer in txdesc,
1036 * we can free TX mbuf here.
1037 */
1038 m_freem(m0);
1039 }
1040
1041 DPRINTF(MEC_DEBUG_START,
1042 ("mec_start: txd_cmd = 0x%016llx, txd_ptr = 0x%016llx\n",
1043 txd->txd_cmd, txd->txd_ptr[0]));
1044 DPRINTF(MEC_DEBUG_START,
1045 ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1046 len, len, buflen, buflen));
1047
1048 /* sync TX descriptor */
1049 MEC_TXDESCSYNC(sc, nexttx,
1050 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1051
1052 /* advance the TX pointer. */
1053 sc->sc_txpending++;
1054 sc->sc_txlast = nexttx;
1055 }
1056
1057 if (sc->sc_txpending == MEC_NTXDESC) {
1058 /* No more slots; notify upper layer. */
1059 ifp->if_flags |= IFF_OACTIVE;
1060 }
1061
1062 if (sc->sc_txpending != opending) {
1063 /*
1064 * Cause a TX interrupt to happen on the last packet
1065 * we enqueued.
1066 */
1067 sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT;
1068 MEC_TXCMDSYNC(sc, sc->sc_txlast,
1069 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1070
1071 /* start TX */
1072 bus_space_write_8(st, sh, MEC_TX_RING_PTR,
1073 MEC_NEXTTX(sc->sc_txlast));
1074
1075 /*
1076 * If the transmitter was idle,
1077 * reset the txdirty pointer and re-enable TX interrupt.
1078 */
1079 if (opending == 0) {
1080 sc->sc_txdirty = firsttx;
1081 bus_space_write_8(st, sh, MEC_TX_ALIAS,
1082 MEC_TX_ALIAS_INT_ENABLE);
1083 }
1084
1085 /* Set a watchdog timer in case the chip flakes out. */
1086 ifp->if_timer = 5;
1087 }
1088 }
1089
1090 static void
1091 mec_stop(struct ifnet *ifp, int disable)
1092 {
1093 struct mec_softc *sc = ifp->if_softc;
1094 struct mec_txsoft *txs;
1095 int i;
1096
1097 DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n"));
1098
1099 ifp->if_timer = 0;
1100 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1101
1102 callout_stop(&sc->sc_tick_ch);
1103 mii_down(&sc->sc_mii);
1104
1105 /* release any TX buffers */
1106 for (i = 0; i < MEC_NTXDESC; i++) {
1107 txs = &sc->sc_txsoft[i];
1108 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1109 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1110 m_freem(txs->txs_mbuf);
1111 txs->txs_mbuf = NULL;
1112 }
1113 }
1114 }
1115
1116 static int
1117 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1118 {
1119 int s, error;
1120
1121 s = splnet();
1122
1123 error = ether_ioctl(ifp, cmd, data);
1124 if (error == ENETRESET) {
1125 /*
1126 * Multicast list has changed; set the hardware filter
1127 * accordingly.
1128 */
1129 if (ifp->if_flags & IFF_RUNNING)
1130 error = mec_init(ifp);
1131 else
1132 error = 0;
1133 }
1134
1135 /* Try to get more packets going. */
1136 mec_start(ifp);
1137
1138 splx(s);
1139 return error;
1140 }
1141
1142 static void
1143 mec_watchdog(struct ifnet *ifp)
1144 {
1145 struct mec_softc *sc = ifp->if_softc;
1146
1147 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1148 ifp->if_oerrors++;
1149
1150 mec_init(ifp);
1151 }
1152
1153 static void
1154 mec_tick(void *arg)
1155 {
1156 struct mec_softc *sc = arg;
1157 int s;
1158
1159 s = splnet();
1160 mii_tick(&sc->sc_mii);
1161 splx(s);
1162
1163 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1164 }
1165
1166 static void
1167 mec_setfilter(struct mec_softc *sc)
1168 {
1169 struct ethercom *ec = &sc->sc_ethercom;
1170 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1171 struct ether_multi *enm;
1172 struct ether_multistep step;
1173 bus_space_tag_t st = sc->sc_st;
1174 bus_space_handle_t sh = sc->sc_sh;
1175 uint64_t mchash;
1176 uint32_t control, hash;
1177 int mcnt;
1178
1179 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1180 control &= ~MEC_MAC_FILTER_MASK;
1181
1182 if (ifp->if_flags & IFF_PROMISC) {
1183 control |= MEC_MAC_FILTER_PROMISC;
1184 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1185 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1186 return;
1187 }
1188
1189 mcnt = 0;
1190 mchash = 0;
1191 ETHER_FIRST_MULTI(step, ec, enm);
1192 while (enm != NULL) {
1193 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1194 /* set allmulti for a range of multicast addresses */
1195 control |= MEC_MAC_FILTER_ALLMULTI;
1196 bus_space_write_8(st, sh, MEC_MULTICAST,
1197 0xffffffffffffffffULL);
1198 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1199 return;
1200 }
1201
1202 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1203
1204 hash = mec_calchash(enm->enm_addrlo);
1205 mchash |= 1 << hash;
1206 mcnt++;
1207 ETHER_NEXT_MULTI(step, enm);
1208 }
1209
1210 ifp->if_flags &= ~IFF_ALLMULTI;
1211
1212 if (mcnt > 0)
1213 control |= MEC_MAC_FILTER_MATCHMULTI;
1214
1215 bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1216 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1217 }
1218
1219 static int
1220 mec_intr(void *arg)
1221 {
1222 struct mec_softc *sc = arg;
1223 bus_space_tag_t st = sc->sc_st;
1224 bus_space_handle_t sh = sc->sc_sh;
1225 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1226 uint32_t statreg, statack, dmac;
1227 int handled, sent;
1228
1229 DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n"));
1230
1231 handled = sent = 0;
1232
1233 for (;;) {
1234 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1235
1236 DPRINTF(MEC_DEBUG_INTR,
1237 ("mec_intr: INT_STAT = 0x%08x\n", statreg));
1238
1239 statack = statreg & MEC_INT_STATUS_MASK;
1240 if (statack == 0)
1241 break;
1242 bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1243
1244 handled = 1;
1245
1246 if (statack &
1247 (MEC_INT_RX_THRESHOLD |
1248 MEC_INT_RX_FIFO_UNDERFLOW)) {
1249 mec_rxintr(sc);
1250 }
1251
1252 dmac = bus_space_read_8(st, sh, MEC_DMA_CONTROL);
1253 DPRINTF(MEC_DEBUG_INTR,
1254 ("mec_intr: DMA_CONT = 0x%08x\n", dmac));
1255
1256 if (statack &
1257 (MEC_INT_TX_EMPTY |
1258 MEC_INT_TX_PACKET_SENT |
1259 MEC_INT_TX_ABORT)) {
1260 mec_txintr(sc);
1261 sent = 1;
1262 if ((statack & MEC_INT_TX_EMPTY) != 0 &&
1263 (dmac & MEC_DMA_TX_INT_ENABLE) != 0) {
1264 /*
1265 * disable TX interrupt to stop
1266 * TX empty interrupt
1267 */
1268 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1269 DPRINTF(MEC_DEBUG_INTR,
1270 ("mec_intr: disable TX_INT\n"));
1271 }
1272 }
1273
1274 if (statack &
1275 (MEC_INT_TX_LINK_FAIL |
1276 MEC_INT_TX_MEM_ERROR |
1277 MEC_INT_TX_ABORT |
1278 MEC_INT_RX_FIFO_UNDERFLOW |
1279 MEC_INT_RX_DMA_UNDERFLOW)) {
1280 printf("%s: mec_intr: interrupt status = 0x%08x\n",
1281 device_xname(sc->sc_dev), statreg);
1282 }
1283 }
1284
1285 if (sent) {
1286 /* try to get more packets going */
1287 mec_start(ifp);
1288 }
1289
1290 #if NRND > 0
1291 if (handled)
1292 rnd_add_uint32(&sc->sc_rnd_source, statreg);
1293 #endif
1294
1295 return handled;
1296 }
1297
1298 static void
1299 mec_rxintr(struct mec_softc *sc)
1300 {
1301 bus_space_tag_t st = sc->sc_st;
1302 bus_space_handle_t sh = sc->sc_sh;
1303 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1304 struct mbuf *m;
1305 struct mec_rxdesc *rxd;
1306 uint64_t rxstat;
1307 u_int len;
1308 int i;
1309
1310 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n"));
1311
1312 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1313 rxd = &sc->sc_rxdesc[i];
1314
1315 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1316 rxstat = rxd->rxd_stat;
1317
1318 DPRINTF(MEC_DEBUG_RXINTR,
1319 ("mec_rxintr: rxstat = 0x%016llx, rxptr = %d\n",
1320 rxstat, i));
1321 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%08x\n",
1322 (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1323
1324 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1325 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1326 break;
1327 }
1328
1329 len = rxstat & MEC_RXSTAT_LEN;
1330
1331 if (len < ETHER_MIN_LEN ||
1332 len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1333 /* invalid length packet; drop it. */
1334 DPRINTF(MEC_DEBUG_RXINTR,
1335 ("mec_rxintr: wrong packet\n"));
1336 dropit:
1337 ifp->if_ierrors++;
1338 rxd->rxd_stat = 0;
1339 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1340 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1341 MEC_CDRXADDR(sc, i));
1342 continue;
1343 }
1344
1345 if (rxstat &
1346 (MEC_RXSTAT_BADPACKET |
1347 MEC_RXSTAT_LONGEVENT |
1348 MEC_RXSTAT_INVALID |
1349 MEC_RXSTAT_CRCERROR |
1350 MEC_RXSTAT_VIOLATION)) {
1351 printf("%s: mec_rxintr: status = 0x%016llx\n",
1352 device_xname(sc->sc_dev), rxstat);
1353 goto dropit;
1354 }
1355
1356 /*
1357 * The MEC includes the CRC with every packet. Trim
1358 * it off here.
1359 */
1360 len -= ETHER_CRC_LEN;
1361
1362 /*
1363 * now allocate an mbuf (and possibly a cluster) to hold
1364 * the received packet.
1365 */
1366 MGETHDR(m, M_DONTWAIT, MT_DATA);
1367 if (m == NULL) {
1368 printf("%s: unable to allocate RX mbuf\n",
1369 device_xname(sc->sc_dev));
1370 goto dropit;
1371 }
1372 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1373 MCLGET(m, M_DONTWAIT);
1374 if ((m->m_flags & M_EXT) == 0) {
1375 printf("%s: unable to allocate RX cluster\n",
1376 device_xname(sc->sc_dev));
1377 m_freem(m);
1378 m = NULL;
1379 goto dropit;
1380 }
1381 }
1382
1383 /*
1384 * Note MEC chip seems to insert 2 byte padding at the top of
1385 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1386 */
1387 MEC_RXBUFSYNC(sc, i, len, BUS_DMASYNC_POSTREAD);
1388 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1389 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1390 m->m_data += MEC_ETHER_ALIGN;
1391
1392 /* put RX buffer into FIFO again */
1393 rxd->rxd_stat = 0;
1394 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1395 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1396
1397 m->m_pkthdr.rcvif = ifp;
1398 m->m_pkthdr.len = m->m_len = len;
1399
1400 ifp->if_ipackets++;
1401
1402 #if NBPFILTER > 0
1403 /*
1404 * Pass this up to any BPF listeners, but only
1405 * pass it up the stack if it's for us.
1406 */
1407 if (ifp->if_bpf)
1408 bpf_mtap(ifp->if_bpf, m);
1409 #endif
1410
1411 /* Pass it on. */
1412 (*ifp->if_input)(ifp, m);
1413 }
1414
1415 /* update RX pointer */
1416 sc->sc_rxptr = i;
1417 }
1418
1419 static void
1420 mec_txintr(struct mec_softc *sc)
1421 {
1422 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1423 struct mec_txdesc *txd;
1424 struct mec_txsoft *txs;
1425 bus_dmamap_t dmamap;
1426 uint64_t txstat;
1427 int i;
1428 u_int col;
1429
1430 ifp->if_flags &= ~IFF_OACTIVE;
1431
1432 DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n"));
1433
1434 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
1435 i = MEC_NEXTTX(i), sc->sc_txpending--) {
1436 txd = &sc->sc_txdesc[i];
1437
1438 MEC_TXDESCSYNC(sc, i,
1439 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1440
1441 txstat = txd->txd_stat;
1442 DPRINTF(MEC_DEBUG_TXINTR,
1443 ("mec_txintr: dirty = %d, txstat = 0x%016llx\n",
1444 i, txstat));
1445 if ((txstat & MEC_TXSTAT_SENT) == 0) {
1446 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1447 break;
1448 }
1449
1450 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1451 printf("%s: TX error: txstat = 0x%016llx\n",
1452 device_xname(sc->sc_dev), txstat);
1453 ifp->if_oerrors++;
1454 continue;
1455 }
1456
1457 txs = &sc->sc_txsoft[i];
1458 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1459 dmamap = txs->txs_dmamap;
1460 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1461 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1462 bus_dmamap_unload(sc->sc_dmat, dmamap);
1463 m_freem(txs->txs_mbuf);
1464 txs->txs_mbuf = NULL;
1465 }
1466
1467 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1468 ifp->if_collisions += col;
1469 ifp->if_opackets++;
1470 }
1471
1472 /* update the dirty TX buffer pointer */
1473 sc->sc_txdirty = i;
1474 DPRINTF(MEC_DEBUG_INTR,
1475 ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n",
1476 sc->sc_txdirty, sc->sc_txpending));
1477
1478 /* cancel the watchdog timer if there are no pending TX packets */
1479 if (sc->sc_txpending == 0)
1480 ifp->if_timer = 0;
1481 }
1482
1483 static void
1484 mec_shutdown(void *arg)
1485 {
1486 struct mec_softc *sc = arg;
1487
1488 mec_stop(&sc->sc_ethercom.ec_if, 1);
1489 /* make sure to stop DMA etc. */
1490 mec_reset(sc);
1491 }
1492