if_mec.c revision 1.23 1 /* $NetBSD: if_mec.c,v 1.23 2008/08/10 16:18:43 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Izumi Tsutsui. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed for the
42 * NetBSD Project. See http://www.NetBSD.org/ for
43 * information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 /*
60 * MACE MAC-110 Ethernet driver
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.23 2008/08/10 16:18:43 tsutsui Exp $");
65
66 #include "opt_ddb.h"
67 #include "bpfilter.h"
68 #include "rnd.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/device.h>
73 #include <sys/callout.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/ioctl.h>
79 #include <sys/errno.h>
80
81 #if NRND > 0
82 #include <sys/rnd.h>
83 #endif
84
85 #include <net/if.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 #include <net/if_ether.h>
89
90 #if NBPFILTER > 0
91 #include <net/bpf.h>
92 #endif
93
94 #include <machine/bus.h>
95 #include <machine/intr.h>
96 #include <machine/machtype.h>
97
98 #include <dev/mii/mii.h>
99 #include <dev/mii/miivar.h>
100
101 #include <sgimips/mace/macevar.h>
102 #include <sgimips/mace/if_mecreg.h>
103
104 #include <dev/arcbios/arcbios.h>
105 #include <dev/arcbios/arcbiosvar.h>
106
107 /* #define MEC_DEBUG */
108
109 #ifdef MEC_DEBUG
110 #define MEC_DEBUG_RESET 0x01
111 #define MEC_DEBUG_START 0x02
112 #define MEC_DEBUG_STOP 0x04
113 #define MEC_DEBUG_INTR 0x08
114 #define MEC_DEBUG_RXINTR 0x10
115 #define MEC_DEBUG_TXINTR 0x20
116 uint32_t mec_debug = 0;
117 #define DPRINTF(x, y) if (mec_debug & (x)) printf y
118 #else
119 #define DPRINTF(x, y) /* nothing */
120 #endif
121
122 /*
123 * Transmit descriptor list size
124 */
125 #define MEC_NTXDESC 64
126 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1)
127 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK)
128 #define MEC_NTXDESC_RSVD 4
129
130 /*
131 * software state for TX
132 */
133 struct mec_txsoft {
134 struct mbuf *txs_mbuf; /* head of our mbuf chain */
135 bus_dmamap_t txs_dmamap; /* our DMA map */
136 uint32_t txs_flags;
137 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */
138 #define MEC_TXS_TXDBUF 0x00000080 /* txd_buf is used */
139 #define MEC_TXS_TXDPTR1 0x00000100 /* txd_ptr[0] is used */
140 };
141
142 /*
143 * Transmit buffer descriptor
144 */
145 #define MEC_TXDESCSIZE 128
146 #define MEC_NTXPTR 3
147 #define MEC_TXD_BUFOFFSET \
148 (sizeof(uint64_t) + MEC_NTXPTR * sizeof(uint64_t))
149 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
150 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len))
151 #define MEC_TXD_ALIGN 8
152 #define MEC_TXD_ROUNDUP(addr) \
153 (((addr) + (MEC_TXD_ALIGN - 1)) & ~((uint64_t)MEC_TXD_ALIGN - 1))
154
155 struct mec_txdesc {
156 volatile uint64_t txd_cmd;
157 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */
158 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */
159 #define TXCMD_BUFSTART(x) ((x) << 16)
160 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */
161 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */
162 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */
163 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */
164 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */
165 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */
166
167 #define txd_stat txd_cmd
168 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */
169 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */
170 #define MEC_TXSTAT_COLCNT_SHIFT 16
171 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */
172 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */
173 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */
174 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */
175 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */
176 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */
177 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */
178 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */
179 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */
180 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */
181 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */
182
183 uint64_t txd_ptr[MEC_NTXPTR];
184 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */
185 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */
186 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */
187 #define TXPTR_LEN(x) ((uint64_t)(x) << 32)
188 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */
189
190 uint8_t txd_buf[MEC_TXD_BUFSIZE];
191 };
192
193 /*
194 * Receive buffer size
195 */
196 #define MEC_NRXDESC 16
197 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1)
198 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK)
199
200 /*
201 * Receive buffer description
202 */
203 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */
204 #define MEC_RXD_NRXPAD 3
205 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD)
206 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t))
207 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
208
209 struct mec_rxdesc {
210 volatile uint64_t rxd_stat;
211 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */
212 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */
213 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */
214 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */
215 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */
216 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */
217 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */
218 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */
219 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */
220 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */
221 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */
222 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */
223 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */
224 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */
225 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */
226 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */
227 uint64_t rxd_pad1[MEC_RXD_NRXPAD];
228 uint8_t rxd_buf[MEC_RXD_BUFSIZE];
229 };
230
231 /*
232 * control structures for DMA ops
233 */
234 struct mec_control_data {
235 /*
236 * TX descriptors and buffers
237 */
238 struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
239
240 /*
241 * RX descriptors and buffers
242 */
243 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
244 };
245
246 /*
247 * It _seems_ there are some restrictions on descriptor address:
248 *
249 * - Base address of txdescs should be 8kbyte aligned
250 * - Each txdesc should be 128byte aligned
251 * - Each rxdesc should be 4kbyte aligned
252 *
253 * So we should specify 8k align to allocalte txdescs.
254 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
255 * so rxdescs are also allocated at 4kbyte aligned.
256 */
257 #define MEC_CONTROL_DATA_ALIGN (8 * 1024)
258
259 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x)
260 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)])
261 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)])
262
263 /*
264 * software state per device
265 */
266 struct mec_softc {
267 device_t sc_dev; /* generic device structures */
268
269 bus_space_tag_t sc_st; /* bus_space tag */
270 bus_space_handle_t sc_sh; /* bus_space handle */
271 bus_dma_tag_t sc_dmat; /* bus_dma tag */
272 void *sc_sdhook; /* shutdown hook */
273
274 struct ethercom sc_ethercom; /* Ethernet common part */
275
276 struct mii_data sc_mii; /* MII/media information */
277 int sc_phyaddr; /* MII address */
278 struct callout sc_tick_ch; /* tick callout */
279
280 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
281
282 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */
283 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
284
285 /* pointer to allocated control data */
286 struct mec_control_data *sc_control_data;
287 #define sc_txdesc sc_control_data->mcd_txdesc
288 #define sc_rxdesc sc_control_data->mcd_rxdesc
289
290 /* software state for TX descs */
291 struct mec_txsoft sc_txsoft[MEC_NTXDESC];
292
293 int sc_txpending; /* number of TX requests pending */
294 int sc_txdirty; /* first dirty TX descriptor */
295 int sc_txlast; /* last used TX descriptor */
296
297 int sc_rxptr; /* next ready RX buffer */
298
299 #if NRND > 0
300 rndsource_element_t sc_rnd_source; /* random source */
301 #endif
302 };
303
304 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x))
305 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x))
306
307 #define MEC_TXDESCSYNC(sc, x, ops) \
308 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
309 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
310 #define MEC_TXCMDSYNC(sc, x, ops) \
311 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
312 MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
313
314 #define MEC_RXSTATSYNC(sc, x, ops) \
315 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
316 MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
317 #define MEC_RXBUFSYNC(sc, x, len, ops) \
318 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
319 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \
320 MEC_ETHER_ALIGN + (len), (ops))
321
322 /* XXX these values should be moved to <net/if_ether.h> ? */
323 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
324 #define MEC_ETHER_ALIGN 2
325
326 static int mec_match(device_t, cfdata_t, void *);
327 static void mec_attach(device_t, device_t, void *);
328
329 static int mec_mii_readreg(device_t, int, int);
330 static void mec_mii_writereg(device_t, int, int, int);
331 static int mec_mii_wait(struct mec_softc *);
332 static void mec_statchg(device_t);
333
334 static void enaddr_aton(const char *, uint8_t *);
335
336 static int mec_init(struct ifnet * ifp);
337 static void mec_start(struct ifnet *);
338 static void mec_watchdog(struct ifnet *);
339 static void mec_tick(void *);
340 static int mec_ioctl(struct ifnet *, u_long, void *);
341 static void mec_reset(struct mec_softc *);
342 static void mec_setfilter(struct mec_softc *);
343 static int mec_intr(void *arg);
344 static void mec_stop(struct ifnet *, int);
345 static void mec_rxintr(struct mec_softc *);
346 static void mec_txintr(struct mec_softc *, uint32_t);
347 static void mec_shutdown(void *);
348
349 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
350 mec_match, mec_attach, NULL, NULL);
351
352 static int mec_matched = 0;
353
354 static int
355 mec_match(device_t parent, cfdata_t cf, void *aux)
356 {
357
358 /* allow only one device */
359 if (mec_matched)
360 return 0;
361
362 mec_matched = 1;
363 return 1;
364 }
365
366 static void
367 mec_attach(device_t parent, device_t self, void *aux)
368 {
369 struct mec_softc *sc = device_private(self);
370 struct mace_attach_args *maa = aux;
371 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
372 uint64_t address, command;
373 const char *macaddr;
374 struct mii_softc *child;
375 bus_dma_segment_t seg;
376 int i, err, rseg;
377 bool mac_is_fake;
378
379 sc->sc_dev = self;
380 sc->sc_st = maa->maa_st;
381 if (bus_space_subregion(sc->sc_st, maa->maa_sh,
382 maa->maa_offset, 0, &sc->sc_sh) != 0) {
383 aprint_error(": can't map i/o space\n");
384 return;
385 }
386
387 /* set up DMA structures */
388 sc->sc_dmat = maa->maa_dmat;
389
390 /*
391 * Allocate the control data structures, and create and load the
392 * DMA map for it.
393 */
394 if ((err = bus_dmamem_alloc(sc->sc_dmat,
395 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
396 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
397 aprint_error(": unable to allocate control data, error = %d\n",
398 err);
399 goto fail_0;
400 }
401 /*
402 * XXX needs re-think...
403 * control data structures contain whole RX data buffer, so
404 * BUS_DMA_COHERENT (which disables cache) may cause some performance
405 * issue on copying data from the RX buffer to mbuf on normal memory,
406 * though we have to make sure all bus_dmamap_sync(9) ops are called
407 * properly in that case.
408 */
409 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
410 sizeof(struct mec_control_data),
411 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
412 aprint_error(": unable to map control data, error = %d\n", err);
413 goto fail_1;
414 }
415 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
416
417 if ((err = bus_dmamap_create(sc->sc_dmat,
418 sizeof(struct mec_control_data), 1,
419 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
420 aprint_error(": unable to create control data DMA map,"
421 " error = %d\n", err);
422 goto fail_2;
423 }
424 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
425 sc->sc_control_data, sizeof(struct mec_control_data), NULL,
426 BUS_DMA_NOWAIT)) != 0) {
427 aprint_error(": unable to load control data DMA map,"
428 " error = %d\n", err);
429 goto fail_3;
430 }
431
432 /* create TX buffer DMA maps */
433 for (i = 0; i < MEC_NTXDESC; i++) {
434 if ((err = bus_dmamap_create(sc->sc_dmat,
435 MCLBYTES, 1, MCLBYTES, PAGE_SIZE, 0,
436 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
437 aprint_error(": unable to create tx DMA map %d,"
438 " error = %d\n", i, err);
439 goto fail_4;
440 }
441 }
442
443 callout_init(&sc->sc_tick_ch, 0);
444
445 /* get Ethernet address from ARCBIOS */
446 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
447 aprint_error(": unable to get MAC address!\n");
448 goto fail_4;
449 }
450 /*
451 * On some machines the DS2502 chip storing the serial number/
452 * mac address is on the pci riser board - if this board is
453 * missing, ARCBIOS will not know a good ethernet address (but
454 * otherwise the machine will work fine).
455 */
456 mac_is_fake = false;
457 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
458 uint32_t ui = 0;
459 const char * netaddr =
460 ARCBIOS->GetEnvironmentVariable("netaddr");
461
462 /*
463 * Create a MAC address by abusing the "netaddr" env var
464 */
465 sc->sc_enaddr[0] = 0xf2;
466 sc->sc_enaddr[1] = 0x0b;
467 sc->sc_enaddr[2] = 0xa4;
468 if (netaddr) {
469 mac_is_fake = true;
470 while (*netaddr) {
471 int v = 0;
472 while (*netaddr && *netaddr != '.') {
473 if (*netaddr >= '0' && *netaddr <= '9')
474 v = v*10 + (*netaddr - '0');
475 netaddr++;
476 }
477 ui <<= 8;
478 ui |= v;
479 if (*netaddr == '.')
480 netaddr++;
481 }
482 }
483 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
484 }
485 if (!mac_is_fake)
486 enaddr_aton(macaddr, sc->sc_enaddr);
487
488 /* set the Ethernet address */
489 address = 0;
490 for (i = 0; i < ETHER_ADDR_LEN; i++) {
491 address = address << 8;
492 address |= sc->sc_enaddr[i];
493 }
494 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
495
496 /* reset device */
497 mec_reset(sc);
498
499 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
500
501 aprint_normal(": MAC-110 Ethernet, rev %u\n",
502 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
503
504 if (mac_is_fake)
505 aprint_normal_dev(self,
506 "could not get ethernet address from firmware"
507 " - generated one from the \"netaddr\" environment"
508 " variable\n");
509 aprint_normal_dev(self, "Ethernet address %s\n",
510 ether_sprintf(sc->sc_enaddr));
511
512 /* Done, now attach everything */
513
514 sc->sc_mii.mii_ifp = ifp;
515 sc->sc_mii.mii_readreg = mec_mii_readreg;
516 sc->sc_mii.mii_writereg = mec_mii_writereg;
517 sc->sc_mii.mii_statchg = mec_statchg;
518
519 /* Set up PHY properties */
520 sc->sc_ethercom.ec_mii = &sc->sc_mii;
521 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
522 ether_mediastatus);
523 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
524 MII_OFFSET_ANY, 0);
525
526 child = LIST_FIRST(&sc->sc_mii.mii_phys);
527 if (child == NULL) {
528 /* No PHY attached */
529 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
530 0, NULL);
531 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
532 } else {
533 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
534 sc->sc_phyaddr = child->mii_phy;
535 }
536
537 strcpy(ifp->if_xname, device_xname(self));
538 ifp->if_softc = sc;
539 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
540 ifp->if_ioctl = mec_ioctl;
541 ifp->if_start = mec_start;
542 ifp->if_watchdog = mec_watchdog;
543 ifp->if_init = mec_init;
544 ifp->if_stop = mec_stop;
545 ifp->if_mtu = ETHERMTU;
546 IFQ_SET_READY(&ifp->if_snd);
547
548 if_attach(ifp);
549 ether_ifattach(ifp, sc->sc_enaddr);
550
551 /* establish interrupt */
552 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
553
554 #if NRND > 0
555 rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
556 RND_TYPE_NET, 0);
557 #endif
558
559 /* set shutdown hook to reset interface on powerdown */
560 sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);
561
562 return;
563
564 /*
565 * Free any resources we've allocated during the failed attach
566 * attempt. Do this in reverse order and fall though.
567 */
568 fail_4:
569 for (i = 0; i < MEC_NTXDESC; i++) {
570 if (sc->sc_txsoft[i].txs_dmamap != NULL)
571 bus_dmamap_destroy(sc->sc_dmat,
572 sc->sc_txsoft[i].txs_dmamap);
573 }
574 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
575 fail_3:
576 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
577 fail_2:
578 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
579 sizeof(struct mec_control_data));
580 fail_1:
581 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
582 fail_0:
583 return;
584 }
585
586 static int
587 mec_mii_readreg(device_t self, int phy, int reg)
588 {
589 struct mec_softc *sc = device_private(self);
590 bus_space_tag_t st = sc->sc_st;
591 bus_space_handle_t sh = sc->sc_sh;
592 uint64_t val;
593 int i;
594
595 if (mec_mii_wait(sc) != 0)
596 return 0;
597
598 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
599 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
600 delay(25);
601 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
602 delay(25);
603 mec_mii_wait(sc);
604
605 for (i = 0; i < 20; i++) {
606 delay(30);
607
608 val = bus_space_read_8(st, sh, MEC_PHY_DATA);
609
610 if ((val & MEC_PHY_DATA_BUSY) == 0)
611 return val & MEC_PHY_DATA_VALUE;
612 }
613 return 0;
614 }
615
616 static void
617 mec_mii_writereg(device_t self, int phy, int reg, int val)
618 {
619 struct mec_softc *sc = device_private(self);
620 bus_space_tag_t st = sc->sc_st;
621 bus_space_handle_t sh = sc->sc_sh;
622
623 if (mec_mii_wait(sc) != 0) {
624 printf("timed out writing %x: %x\n", reg, val);
625 return;
626 }
627
628 bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
629 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
630
631 delay(60);
632
633 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
634
635 delay(60);
636
637 mec_mii_wait(sc);
638 }
639
640 static int
641 mec_mii_wait(struct mec_softc *sc)
642 {
643 uint32_t busy;
644 int i, s;
645
646 for (i = 0; i < 100; i++) {
647 delay(30);
648
649 s = splhigh();
650 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
651 splx(s);
652
653 if ((busy & MEC_PHY_DATA_BUSY) == 0)
654 return 0;
655 #if 0
656 if (busy == 0xffff) /* XXX ? */
657 return 0;
658 #endif
659 }
660
661 printf("%s: MII timed out\n", device_xname(sc->sc_dev));
662 return 1;
663 }
664
665 static void
666 mec_statchg(device_t self)
667 {
668 struct mec_softc *sc = device_private(self);
669 bus_space_tag_t st = sc->sc_st;
670 bus_space_handle_t sh = sc->sc_sh;
671 uint32_t control;
672
673 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
674 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
675 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
676
677 /* must also set IPG here for duplex stuff ... */
678 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
679 control |= MEC_MAC_FULL_DUPLEX;
680 } else {
681 /* set IPG */
682 control |= MEC_MAC_IPG_DEFAULT;
683 }
684
685 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
686 }
687
688 /*
689 * XXX
690 * maybe this function should be moved to common part
691 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
692 */
693 static void
694 enaddr_aton(const char *str, uint8_t *eaddr)
695 {
696 int i;
697 char c;
698
699 for (i = 0; i < ETHER_ADDR_LEN; i++) {
700 if (*str == ':')
701 str++;
702
703 c = *str++;
704 if (isdigit(c)) {
705 eaddr[i] = (c - '0');
706 } else if (isxdigit(c)) {
707 eaddr[i] = (toupper(c) + 10 - 'A');
708 }
709 c = *str++;
710 if (isdigit(c)) {
711 eaddr[i] = (eaddr[i] << 4) | (c - '0');
712 } else if (isxdigit(c)) {
713 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
714 }
715 }
716 }
717
718 static int
719 mec_init(struct ifnet *ifp)
720 {
721 struct mec_softc *sc = ifp->if_softc;
722 bus_space_tag_t st = sc->sc_st;
723 bus_space_handle_t sh = sc->sc_sh;
724 struct mec_rxdesc *rxd;
725 int i, rc;
726
727 /* cancel any pending I/O */
728 mec_stop(ifp, 0);
729
730 /* reset device */
731 mec_reset(sc);
732
733 /* setup filter for multicast or promisc mode */
734 mec_setfilter(sc);
735
736 /* set the TX ring pointer to the base address */
737 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
738
739 sc->sc_txpending = 0;
740 sc->sc_txdirty = 0;
741 sc->sc_txlast = MEC_NTXDESC - 1;
742
743 /* put RX buffers into FIFO */
744 for (i = 0; i < MEC_NRXDESC; i++) {
745 rxd = &sc->sc_rxdesc[i];
746 rxd->rxd_stat = 0;
747 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
748 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
749 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
750 }
751 sc->sc_rxptr = 0;
752
753 #if 0 /* XXX no info */
754 bus_space_write_8(st, sh, MEC_TIMER, 0);
755 #endif
756
757 /*
758 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
759 * spurious interrupts when TX buffers are empty
760 */
761 bus_space_write_8(st, sh, MEC_DMA_CONTROL,
762 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
763 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
764 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
765 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
766
767 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
768
769 if ((rc = ether_mediachange(ifp)) != 0)
770 return rc;
771
772 ifp->if_flags |= IFF_RUNNING;
773 ifp->if_flags &= ~IFF_OACTIVE;
774 mec_start(ifp);
775
776 return 0;
777 }
778
779 static void
780 mec_reset(struct mec_softc *sc)
781 {
782 bus_space_tag_t st = sc->sc_st;
783 bus_space_handle_t sh = sc->sc_sh;
784 uint64_t control;
785
786 /* stop DMA first */
787 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
788
789 /* reset chip */
790 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
791 delay(1000);
792 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
793 delay(1000);
794
795 /* Default to 100/half and let auto-negotiation work its magic */
796 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
797 MEC_MAC_IPG_DEFAULT;
798
799 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
800 /* stop DMA again for sanity */
801 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
802
803 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
804 bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
805 }
806
807 static void
808 mec_start(struct ifnet *ifp)
809 {
810 struct mec_softc *sc = ifp->if_softc;
811 struct mbuf *m0, *m;
812 struct mec_txdesc *txd;
813 struct mec_txsoft *txs;
814 bus_dmamap_t dmamap;
815 bus_space_tag_t st = sc->sc_st;
816 bus_space_handle_t sh = sc->sc_sh;
817 uint64_t txdaddr;
818 int error, firsttx, nexttx, opending;
819 int len, bufoff, buflen, unaligned, txdlen;
820
821 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
822 return;
823
824 /*
825 * Remember the previous txpending and the first transmit descriptor.
826 */
827 opending = sc->sc_txpending;
828 firsttx = MEC_NEXTTX(sc->sc_txlast);
829
830 DPRINTF(MEC_DEBUG_START,
831 ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));
832
833 for (;;) {
834 /* Grab a packet off the queue. */
835 IFQ_POLL(&ifp->if_snd, m0);
836 if (m0 == NULL)
837 break;
838 m = NULL;
839
840 if (sc->sc_txpending == MEC_NTXDESC - 1) {
841 /* preserve the last entry to avoid wraparound */
842 break;
843 }
844
845 /*
846 * Get the next available transmit descriptor.
847 */
848 nexttx = MEC_NEXTTX(sc->sc_txlast);
849 txd = &sc->sc_txdesc[nexttx];
850 txs = &sc->sc_txsoft[nexttx];
851
852 buflen = 0;
853 bufoff = 0;
854 txdaddr = 0; /* XXX gcc */
855 txdlen = 0; /* XXX gcc */
856
857 len = m0->m_pkthdr.len;
858
859 DPRINTF(MEC_DEBUG_START,
860 ("mec_start: len = %d, nexttx = %d\n", len, nexttx));
861
862 if (len < ETHER_PAD_LEN) {
863 /*
864 * I don't know if MEC chip does auto padding,
865 * so if the packet is small enough,
866 * just copy it to the buffer in txdesc.
867 * Maybe this is the simple way.
868 */
869 DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));
870
871 IFQ_DEQUEUE(&ifp->if_snd, m0);
872 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
873 m_copydata(m0, 0, m0->m_pkthdr.len,
874 txd->txd_buf + bufoff);
875 memset(txd->txd_buf + bufoff + len, 0,
876 ETHER_PAD_LEN - len);
877 len = buflen = ETHER_PAD_LEN;
878
879 txs->txs_flags = MEC_TXS_TXDBUF | buflen;
880 } else {
881 /*
882 * If the packet won't fit the buffer in txdesc,
883 * we have to use concatenate pointer to handle it.
884 * While MEC can handle up to three segments to
885 * concatenate, MEC requires that both the second and
886 * third segments have to be 8 byte aligned.
887 * Since it's unlikely for mbuf clusters, we use
888 * only the first concatenate pointer. If the packet
889 * doesn't fit in one DMA segment, allocate new mbuf
890 * and copy the packet to it.
891 *
892 * Besides, if the start address of the first segments
893 * is not 8 byte aligned, such part have to be copied
894 * to the txdesc buffer. (XXX see below comments)
895 */
896 DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));
897
898 dmamap = txs->txs_dmamap;
899 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
900 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
901 DPRINTF(MEC_DEBUG_START,
902 ("mec_start: re-allocating mbuf\n"));
903 MGETHDR(m, M_DONTWAIT, MT_DATA);
904 if (m == NULL) {
905 printf("%s: unable to allocate "
906 "TX mbuf\n",
907 device_xname(sc->sc_dev));
908 break;
909 }
910 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
911 MCLGET(m, M_DONTWAIT);
912 if ((m->m_flags & M_EXT) == 0) {
913 printf("%s: unable to allocate "
914 "TX cluster\n",
915 device_xname(sc->sc_dev));
916 m_freem(m);
917 break;
918 }
919 }
920 /*
921 * Each packet has the Ethernet header, so
922 * in many case the header isn't 4-byte aligned
923 * and data after the header is 4-byte aligned.
924 * Thus adding 2-byte offset before copying to
925 * new mbuf avoids unaligned copy and this may
926 * improve some performance.
927 * As noted above, unaligned part has to be
928 * copied to txdesc buffer so this may cause
929 * extra copy ops, but for now MEC always
930 * requires some data in txdesc buffer,
931 * so we always have to copy some data anyway.
932 */
933 m->m_data += MEC_ETHER_ALIGN;
934 m_copydata(m0, 0, len, mtod(m, void *));
935 m->m_pkthdr.len = m->m_len = len;
936 error = bus_dmamap_load_mbuf(sc->sc_dmat,
937 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
938 if (error) {
939 printf("%s: unable to load TX buffer, "
940 "error = %d\n",
941 device_xname(sc->sc_dev), error);
942 break;
943 }
944 }
945 IFQ_DEQUEUE(&ifp->if_snd, m0);
946 if (m != NULL) {
947 m_freem(m0);
948 m0 = m;
949 }
950
951 /* handle unaligned part */
952 txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
953 txs->txs_flags = MEC_TXS_TXDPTR1;
954 unaligned =
955 dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
956 DPRINTF(MEC_DEBUG_START,
957 ("mec_start: ds_addr = 0x%08x, unaligned = %d\n",
958 (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
959 if (unaligned != 0) {
960 buflen = MEC_TXD_ALIGN - unaligned;
961 bufoff = MEC_TXD_BUFSTART(buflen);
962 DPRINTF(MEC_DEBUG_START,
963 ("mec_start: unaligned, "
964 "buflen = %d, bufoff = %d\n",
965 buflen, bufoff));
966 memcpy(txd->txd_buf + bufoff,
967 mtod(m0, void *), buflen);
968 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
969 }
970 #if 1
971 else {
972 /*
973 * XXX needs hardware info XXX
974 * It seems MEC always requires some data
975 * in txd_buf[] even if buffer is
976 * 8-byte aligned otherwise DMA abort error
977 * occurs later...
978 */
979 buflen = MEC_TXD_ALIGN;
980 bufoff = MEC_TXD_BUFSTART(buflen);
981 memcpy(txd->txd_buf + bufoff,
982 mtod(m0, void *), buflen);
983 DPRINTF(MEC_DEBUG_START,
984 ("mec_start: aligned, "
985 "buflen = %d, bufoff = %d\n",
986 buflen, bufoff));
987 txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
988 txdaddr += MEC_TXD_ALIGN;
989 }
990 #endif
991 txdlen = len - buflen;
992 DPRINTF(MEC_DEBUG_START,
993 ("mec_start: txdaddr = 0x%08llx, txdlen = %d\n",
994 txdaddr, txdlen));
995
996 /*
997 * sync the DMA map for TX mbuf
998 *
999 * XXX unaligned part doesn't have to be sync'ed,
1000 * but it's harmless...
1001 */
1002 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1003 dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1004 }
1005
1006 #if NBPFILTER > 0
1007 /*
1008 * Pass packet to bpf if there is a listener.
1009 */
1010 if (ifp->if_bpf)
1011 bpf_mtap(ifp->if_bpf, m0);
1012 #endif
1013
1014 /*
1015 * setup the transmit descriptor.
1016 */
1017
1018 /* TXINT bit will be set later on the last packet */
1019 txd->txd_cmd = (len - 1);
1020 /* but also set TXINT bit on a half of TXDESC */
1021 if (sc->sc_txpending == (MEC_NTXDESC / 2))
1022 txd->txd_cmd |= MEC_TXCMD_TXINT;
1023
1024 if (txs->txs_flags & MEC_TXS_TXDBUF)
1025 txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
1026 if (txs->txs_flags & MEC_TXS_TXDPTR1) {
1027 txd->txd_cmd |= MEC_TXCMD_PTR1;
1028 txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
1029 /*
1030 * Store a pointer to the packet so we can
1031 * free it later.
1032 */
1033 txs->txs_mbuf = m0;
1034 } else {
1035 txd->txd_ptr[0] = 0;
1036 /*
1037 * In this case all data are copied to buffer in txdesc,
1038 * we can free TX mbuf here.
1039 */
1040 m_freem(m0);
1041 }
1042
1043 DPRINTF(MEC_DEBUG_START,
1044 ("mec_start: txd_cmd = 0x%016llx, txd_ptr = 0x%016llx\n",
1045 txd->txd_cmd, txd->txd_ptr[0]));
1046 DPRINTF(MEC_DEBUG_START,
1047 ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1048 len, len, buflen, buflen));
1049
1050 /* sync TX descriptor */
1051 MEC_TXDESCSYNC(sc, nexttx,
1052 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1053
1054 /* advance the TX pointer. */
1055 sc->sc_txpending++;
1056 sc->sc_txlast = nexttx;
1057 }
1058
1059 if (sc->sc_txpending == MEC_NTXDESC - 1) {
1060 /* No more slots; notify upper layer. */
1061 ifp->if_flags |= IFF_OACTIVE;
1062 }
1063
1064 if (sc->sc_txpending != opending) {
1065 /*
1066 * Cause a TX interrupt to happen on the last packet
1067 * we enqueued.
1068 */
1069 sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT;
1070 MEC_TXCMDSYNC(sc, sc->sc_txlast,
1071 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1072
1073 /* start TX */
1074 bus_space_write_8(st, sh, MEC_TX_RING_PTR,
1075 MEC_NEXTTX(sc->sc_txlast));
1076
1077 /*
1078 * If the transmitter was idle,
1079 * reset the txdirty pointer and re-enable TX interrupt.
1080 */
1081 if (opending == 0) {
1082 sc->sc_txdirty = firsttx;
1083 bus_space_write_8(st, sh, MEC_TX_ALIAS,
1084 MEC_TX_ALIAS_INT_ENABLE);
1085 }
1086
1087 /* Set a watchdog timer in case the chip flakes out. */
1088 ifp->if_timer = 5;
1089 }
1090 }
1091
1092 static void
1093 mec_stop(struct ifnet *ifp, int disable)
1094 {
1095 struct mec_softc *sc = ifp->if_softc;
1096 struct mec_txsoft *txs;
1097 int i;
1098
1099 DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n"));
1100
1101 ifp->if_timer = 0;
1102 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1103
1104 callout_stop(&sc->sc_tick_ch);
1105 mii_down(&sc->sc_mii);
1106
1107 /* release any TX buffers */
1108 for (i = 0; i < MEC_NTXDESC; i++) {
1109 txs = &sc->sc_txsoft[i];
1110 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1111 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1112 m_freem(txs->txs_mbuf);
1113 txs->txs_mbuf = NULL;
1114 }
1115 }
1116 }
1117
1118 static int
1119 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1120 {
1121 int s, error;
1122
1123 s = splnet();
1124
1125 error = ether_ioctl(ifp, cmd, data);
1126 if (error == ENETRESET) {
1127 /*
1128 * Multicast list has changed; set the hardware filter
1129 * accordingly.
1130 */
1131 if (ifp->if_flags & IFF_RUNNING)
1132 error = mec_init(ifp);
1133 else
1134 error = 0;
1135 }
1136
1137 /* Try to get more packets going. */
1138 mec_start(ifp);
1139
1140 splx(s);
1141 return error;
1142 }
1143
1144 static void
1145 mec_watchdog(struct ifnet *ifp)
1146 {
1147 struct mec_softc *sc = ifp->if_softc;
1148
1149 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1150 ifp->if_oerrors++;
1151
1152 mec_init(ifp);
1153 }
1154
1155 static void
1156 mec_tick(void *arg)
1157 {
1158 struct mec_softc *sc = arg;
1159 int s;
1160
1161 s = splnet();
1162 mii_tick(&sc->sc_mii);
1163 splx(s);
1164
1165 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1166 }
1167
1168 static void
1169 mec_setfilter(struct mec_softc *sc)
1170 {
1171 struct ethercom *ec = &sc->sc_ethercom;
1172 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1173 struct ether_multi *enm;
1174 struct ether_multistep step;
1175 bus_space_tag_t st = sc->sc_st;
1176 bus_space_handle_t sh = sc->sc_sh;
1177 uint64_t mchash;
1178 uint32_t control, hash;
1179 int mcnt;
1180
1181 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1182 control &= ~MEC_MAC_FILTER_MASK;
1183
1184 if (ifp->if_flags & IFF_PROMISC) {
1185 control |= MEC_MAC_FILTER_PROMISC;
1186 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1187 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1188 return;
1189 }
1190
1191 mcnt = 0;
1192 mchash = 0;
1193 ETHER_FIRST_MULTI(step, ec, enm);
1194 while (enm != NULL) {
1195 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1196 /* set allmulti for a range of multicast addresses */
1197 control |= MEC_MAC_FILTER_ALLMULTI;
1198 bus_space_write_8(st, sh, MEC_MULTICAST,
1199 0xffffffffffffffffULL);
1200 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1201 return;
1202 }
1203
1204 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1205
1206 hash = mec_calchash(enm->enm_addrlo);
1207 mchash |= 1 << hash;
1208 mcnt++;
1209 ETHER_NEXT_MULTI(step, enm);
1210 }
1211
1212 ifp->if_flags &= ~IFF_ALLMULTI;
1213
1214 if (mcnt > 0)
1215 control |= MEC_MAC_FILTER_MATCHMULTI;
1216
1217 bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1218 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1219 }
1220
1221 static int
1222 mec_intr(void *arg)
1223 {
1224 struct mec_softc *sc = arg;
1225 bus_space_tag_t st = sc->sc_st;
1226 bus_space_handle_t sh = sc->sc_sh;
1227 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1228 uint32_t statreg, statack, txptr;
1229 int handled, sent;
1230
1231 DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n"));
1232
1233 handled = sent = 0;
1234
1235 for (;;) {
1236 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1237
1238 DPRINTF(MEC_DEBUG_INTR,
1239 ("mec_intr: INT_STAT = 0x%08x\n", statreg));
1240
1241 statack = statreg & MEC_INT_STATUS_MASK;
1242 if (statack == 0)
1243 break;
1244 bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1245
1246 handled = 1;
1247
1248 if (statack &
1249 (MEC_INT_RX_THRESHOLD |
1250 MEC_INT_RX_FIFO_UNDERFLOW)) {
1251 mec_rxintr(sc);
1252 }
1253
1254 if (statack &
1255 (MEC_INT_TX_EMPTY |
1256 MEC_INT_TX_PACKET_SENT |
1257 MEC_INT_TX_ABORT)) {
1258 txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1259 >> MEC_INT_TX_RING_BUFFER_SHIFT;
1260 mec_txintr(sc, txptr);
1261 sent = 1;
1262 if ((statack & MEC_INT_TX_EMPTY) != 0) {
1263 /*
1264 * disable TX interrupt to stop
1265 * TX empty interrupt
1266 */
1267 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1268 DPRINTF(MEC_DEBUG_INTR,
1269 ("mec_intr: disable TX_INT\n"));
1270 }
1271 }
1272
1273 if (statack &
1274 (MEC_INT_TX_LINK_FAIL |
1275 MEC_INT_TX_MEM_ERROR |
1276 MEC_INT_TX_ABORT |
1277 MEC_INT_RX_FIFO_UNDERFLOW |
1278 MEC_INT_RX_DMA_UNDERFLOW)) {
1279 printf("%s: mec_intr: interrupt status = 0x%08x\n",
1280 device_xname(sc->sc_dev), statreg);
1281 }
1282 }
1283
1284 if (sent && IFQ_IS_EMPTY(&ifp->if_snd)) {
1285 /* try to get more packets going */
1286 mec_start(ifp);
1287 }
1288
1289 #if NRND > 0
1290 if (handled)
1291 rnd_add_uint32(&sc->sc_rnd_source, statreg);
1292 #endif
1293
1294 return handled;
1295 }
1296
1297 static void
1298 mec_rxintr(struct mec_softc *sc)
1299 {
1300 bus_space_tag_t st = sc->sc_st;
1301 bus_space_handle_t sh = sc->sc_sh;
1302 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1303 struct mbuf *m;
1304 struct mec_rxdesc *rxd;
1305 uint64_t rxstat;
1306 u_int len;
1307 int i;
1308
1309 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n"));
1310
1311 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1312 rxd = &sc->sc_rxdesc[i];
1313
1314 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1315 rxstat = rxd->rxd_stat;
1316
1317 DPRINTF(MEC_DEBUG_RXINTR,
1318 ("mec_rxintr: rxstat = 0x%016llx, rxptr = %d\n",
1319 rxstat, i));
1320 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%08x\n",
1321 (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1322
1323 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1324 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1325 break;
1326 }
1327
1328 len = rxstat & MEC_RXSTAT_LEN;
1329
1330 if (len < ETHER_MIN_LEN ||
1331 len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1332 /* invalid length packet; drop it. */
1333 DPRINTF(MEC_DEBUG_RXINTR,
1334 ("mec_rxintr: wrong packet\n"));
1335 dropit:
1336 ifp->if_ierrors++;
1337 rxd->rxd_stat = 0;
1338 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1339 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1340 MEC_CDRXADDR(sc, i));
1341 continue;
1342 }
1343
1344 if (rxstat &
1345 (MEC_RXSTAT_BADPACKET |
1346 MEC_RXSTAT_LONGEVENT |
1347 MEC_RXSTAT_INVALID |
1348 MEC_RXSTAT_CRCERROR |
1349 MEC_RXSTAT_VIOLATION)) {
1350 printf("%s: mec_rxintr: status = 0x%016llx\n",
1351 device_xname(sc->sc_dev), rxstat);
1352 goto dropit;
1353 }
1354
1355 /*
1356 * The MEC includes the CRC with every packet. Trim
1357 * it off here.
1358 */
1359 len -= ETHER_CRC_LEN;
1360
1361 /*
1362 * now allocate an mbuf (and possibly a cluster) to hold
1363 * the received packet.
1364 */
1365 MGETHDR(m, M_DONTWAIT, MT_DATA);
1366 if (m == NULL) {
1367 printf("%s: unable to allocate RX mbuf\n",
1368 device_xname(sc->sc_dev));
1369 goto dropit;
1370 }
1371 if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1372 MCLGET(m, M_DONTWAIT);
1373 if ((m->m_flags & M_EXT) == 0) {
1374 printf("%s: unable to allocate RX cluster\n",
1375 device_xname(sc->sc_dev));
1376 m_freem(m);
1377 m = NULL;
1378 goto dropit;
1379 }
1380 }
1381
1382 /*
1383 * Note MEC chip seems to insert 2 byte padding at the top of
1384 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1385 */
1386 MEC_RXBUFSYNC(sc, i, len, BUS_DMASYNC_POSTREAD);
1387 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1388 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1389 m->m_data += MEC_ETHER_ALIGN;
1390
1391 /* put RX buffer into FIFO again */
1392 rxd->rxd_stat = 0;
1393 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1394 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1395
1396 m->m_pkthdr.rcvif = ifp;
1397 m->m_pkthdr.len = m->m_len = len;
1398
1399 ifp->if_ipackets++;
1400
1401 #if NBPFILTER > 0
1402 /*
1403 * Pass this up to any BPF listeners, but only
1404 * pass it up the stack if it's for us.
1405 */
1406 if (ifp->if_bpf)
1407 bpf_mtap(ifp->if_bpf, m);
1408 #endif
1409
1410 /* Pass it on. */
1411 (*ifp->if_input)(ifp, m);
1412 }
1413
1414 /* update RX pointer */
1415 sc->sc_rxptr = i;
1416 }
1417
1418 static void
1419 mec_txintr(struct mec_softc *sc, uint32_t txptr)
1420 {
1421 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1422 struct mec_txdesc *txd;
1423 struct mec_txsoft *txs;
1424 bus_dmamap_t dmamap;
1425 uint64_t txstat;
1426 int i;
1427 u_int col;
1428
1429 DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n"));
1430
1431 for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1432 i = MEC_NEXTTX(i), sc->sc_txpending--) {
1433 txd = &sc->sc_txdesc[i];
1434
1435 MEC_TXDESCSYNC(sc, i,
1436 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1437
1438 txstat = txd->txd_stat;
1439 DPRINTF(MEC_DEBUG_TXINTR,
1440 ("mec_txintr: dirty = %d, txstat = 0x%016llx\n",
1441 i, txstat));
1442 if ((txstat & MEC_TXSTAT_SENT) == 0) {
1443 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1444 break;
1445 }
1446
1447 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1448 printf("%s: TX error: txstat = 0x%016llx\n",
1449 device_xname(sc->sc_dev), txstat);
1450 ifp->if_oerrors++;
1451 continue;
1452 }
1453
1454 txs = &sc->sc_txsoft[i];
1455 if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1456 dmamap = txs->txs_dmamap;
1457 bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1458 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1459 bus_dmamap_unload(sc->sc_dmat, dmamap);
1460 m_freem(txs->txs_mbuf);
1461 txs->txs_mbuf = NULL;
1462 }
1463
1464 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1465 ifp->if_collisions += col;
1466 ifp->if_opackets++;
1467 }
1468
1469 /* update the dirty TX buffer pointer */
1470 sc->sc_txdirty = i;
1471 DPRINTF(MEC_DEBUG_INTR,
1472 ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n",
1473 sc->sc_txdirty, sc->sc_txpending));
1474
1475 /* cancel the watchdog timer if there are no pending TX packets */
1476 if (sc->sc_txpending == 0)
1477 ifp->if_timer = 0;
1478 if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD)
1479 ifp->if_flags &= ~IFF_OACTIVE;
1480 }
1481
1482 static void
1483 mec_shutdown(void *arg)
1484 {
1485 struct mec_softc *sc = arg;
1486
1487 mec_stop(&sc->sc_ethercom.ec_if, 1);
1488 /* make sure to stop DMA etc. */
1489 mec_reset(sc);
1490 }
1491