if_emac.c revision 1.37 1 /* $NetBSD: if_emac.c,v 1.37 2010/04/05 07:19:31 joerg Exp $ */
2
3 /*
4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * emac(4) supports following ibm4xx's EMACs.
40 * XXXX: ZMII and 'TCP Accelaration Hardware' not support yet...
41 *
42 * tested
43 * ------
44 * 405EP - 10/100 x2
45 * 405EX/EXr o 10/100/1000 x2 (EXr x1), STA v2, 256bit hash-Table, RGMII
46 * 405GP/GPr o 10/100
47 * 440EP - 10/100 x2, ZMII
48 * 440GP - 10/100 x2, ZMII
49 * 440GX - 10/100/1000 x4, ZMII/RGMII(ch 2, 3), TAH(ch 2, 3)
50 * 440SP - 10/100/1000
51 * 440SPe - 10/100/1000, STA v2
52 */
53
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: if_emac.c,v 1.37 2010/04/05 07:19:31 joerg Exp $");
56
57 #include "opt_emac.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/mbuf.h>
62 #include <sys/kernel.h>
63 #include <sys/socket.h>
64 #include <sys/ioctl.h>
65
66 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
67
68 #include <net/if.h>
69 #include <net/if_dl.h>
70 #include <net/if_media.h>
71 #include <net/if_ether.h>
72
73 #include <net/bpf.h>
74
75 #include <powerpc/ibm4xx/dcr4xx.h>
76 #include <powerpc/ibm4xx/mal405gp.h>
77 #include <powerpc/ibm4xx/dev/emacreg.h>
78 #include <powerpc/ibm4xx/dev/if_emacreg.h>
79 #include <powerpc/ibm4xx/dev/if_emacvar.h>
80 #include <powerpc/ibm4xx/dev/malvar.h>
81 #include <powerpc/ibm4xx/dev/opbreg.h>
82 #include <powerpc/ibm4xx/dev/opbvar.h>
83 #include <powerpc/ibm4xx/dev/plbvar.h>
84 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
85 #include <powerpc/ibm4xx/dev/rmiivar.h>
86 #endif
87
88 #include <dev/mii/miivar.h>
89
90 #include "locators.h"
91
92
93 /*
94 * Transmit descriptor list size. There are two Tx channels, each with
95 * up to 256 hardware descriptors available. We currently use one Tx
96 * channel. We tell the upper layers that they can queue a lot of
97 * packets, and we go ahead and manage up to 64 of them at a time. We
98 * allow up to 16 DMA segments per packet.
99 */
100 #define EMAC_NTXSEGS 16
101 #define EMAC_TXQUEUELEN 64
102 #define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1)
103 #define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4)
104 #define EMAC_NTXDESC 256
105 #define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1)
106 #define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK)
107 #define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK)
108
109 /*
110 * Receive descriptor list size. There is one Rx channel with up to 256
111 * hardware descriptors available. We allocate 64 receive descriptors,
112 * each with a 2k buffer (MCLBYTES).
113 */
114 #define EMAC_NRXDESC 64
115 #define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1)
116 #define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK)
117 #define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK)
118
119 /*
120 * Transmit/receive descriptors that are DMA'd to the EMAC.
121 */
122 struct emac_control_data {
123 struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
124 struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
125 };
126
127 #define EMAC_CDOFF(x) offsetof(struct emac_control_data, x)
128 #define EMAC_CDTXOFF(x) EMAC_CDOFF(ecd_txdesc[(x)])
129 #define EMAC_CDRXOFF(x) EMAC_CDOFF(ecd_rxdesc[(x)])
130
131 /*
132 * Software state for transmit jobs.
133 */
134 struct emac_txsoft {
135 struct mbuf *txs_mbuf; /* head of mbuf chain */
136 bus_dmamap_t txs_dmamap; /* our DMA map */
137 int txs_firstdesc; /* first descriptor in packet */
138 int txs_lastdesc; /* last descriptor in packet */
139 int txs_ndesc; /* # of descriptors used */
140 };
141
142 /*
143 * Software state for receive descriptors.
144 */
145 struct emac_rxsoft {
146 struct mbuf *rxs_mbuf; /* head of mbuf chain */
147 bus_dmamap_t rxs_dmamap; /* our DMA map */
148 };
149
150 /*
151 * Software state per device.
152 */
153 struct emac_softc {
154 device_t sc_dev; /* generic device information */
155 int sc_instance; /* instance no. */
156 bus_space_tag_t sc_st; /* bus space tag */
157 bus_space_handle_t sc_sh; /* bus space handle */
158 bus_dma_tag_t sc_dmat; /* bus DMA tag */
159 struct ethercom sc_ethercom; /* ethernet common data */
160 void *sc_sdhook; /* shutdown hook */
161 void *sc_powerhook; /* power management hook */
162
163 struct mii_data sc_mii; /* MII/media information */
164 struct callout sc_callout; /* tick callout */
165
166 uint32_t sc_mr1; /* copy of Mode Register 1 */
167 uint32_t sc_stacr_read; /* Read opcode of STAOPC of STACR */
168 uint32_t sc_stacr_write; /* Write opcode of STAOPC of STACR */
169 uint32_t sc_stacr_bits; /* misc bits of STACR */
170 bool sc_stacr_completed; /* Operation completed of STACR */
171 int sc_htsize; /* Hash Table size */
172
173 bus_dmamap_t sc_cddmamap; /* control data dma map */
174 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
175
176 /* Software state for transmit/receive descriptors. */
177 struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
178 struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
179
180 /* Control data structures. */
181 struct emac_control_data *sc_control_data;
182 #define sc_txdescs sc_control_data->ecd_txdesc
183 #define sc_rxdescs sc_control_data->ecd_rxdesc
184
185 #ifdef EMAC_EVENT_COUNTERS
186 struct evcnt sc_ev_rxintr; /* Rx interrupts */
187 struct evcnt sc_ev_txintr; /* Tx interrupts */
188 struct evcnt sc_ev_rxde; /* Rx descriptor interrupts */
189 struct evcnt sc_ev_txde; /* Tx descriptor interrupts */
190 struct evcnt sc_ev_intr; /* General EMAC interrupts */
191
192 struct evcnt sc_ev_txreap; /* Calls to Tx descriptor reaper */
193 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
194 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
195 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
196 struct evcnt sc_ev_tu; /* Tx underrun */
197 #endif /* EMAC_EVENT_COUNTERS */
198
199 int sc_txfree; /* number of free Tx descriptors */
200 int sc_txnext; /* next ready Tx descriptor */
201
202 int sc_txsfree; /* number of free Tx jobs */
203 int sc_txsnext; /* next ready Tx job */
204 int sc_txsdirty; /* dirty Tx jobs */
205
206 int sc_rxptr; /* next ready RX descriptor/descsoft */
207
208 void (*sc_rmii_enable)(device_t, int); /* reduced MII enable */
209 void (*sc_rmii_disable)(device_t, int); /* reduced MII disable*/
210 void (*sc_rmii_speed)(device_t, int, int); /* reduced MII speed */
211 };
212
213 #ifdef EMAC_EVENT_COUNTERS
214 #define EMAC_EVCNT_INCR(ev) (ev)->ev_count++
215 #else
216 #define EMAC_EVCNT_INCR(ev) /* nothing */
217 #endif
218
219 #define EMAC_CDTXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDTXOFF((x)))
220 #define EMAC_CDRXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDRXOFF((x)))
221
222 #define EMAC_CDTXSYNC(sc, x, n, ops) \
223 do { \
224 int __x, __n; \
225 \
226 __x = (x); \
227 __n = (n); \
228 \
229 /* If it will wrap around, sync to the end of the ring. */ \
230 if ((__x + __n) > EMAC_NTXDESC) { \
231 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
232 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * \
233 (EMAC_NTXDESC - __x), (ops)); \
234 __n -= (EMAC_NTXDESC - __x); \
235 __x = 0; \
236 } \
237 \
238 /* Now sync whatever is left. */ \
239 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
240 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
241 } while (/*CONSTCOND*/0)
242
243 #define EMAC_CDRXSYNC(sc, x, ops) \
244 do { \
245 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
246 EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops)); \
247 } while (/*CONSTCOND*/0)
248
249 #define EMAC_INIT_RXDESC(sc, x) \
250 do { \
251 struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
252 struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)]; \
253 struct mbuf *__m = __rxs->rxs_mbuf; \
254 \
255 /* \
256 * Note: We scoot the packet forward 2 bytes in the buffer \
257 * so that the payload after the Ethernet header is aligned \
258 * to a 4-byte boundary. \
259 */ \
260 __m->m_data = __m->m_ext.ext_buf + 2; \
261 \
262 __rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2; \
263 __rxd->md_data_len = __m->m_ext.ext_size - 2; \
264 __rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT | \
265 /* Set wrap on last descriptor. */ \
266 (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0); \
267 EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
268 } while (/*CONSTCOND*/0)
269
270 #define EMAC_WRITE(sc, reg, val) \
271 bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
272 #define EMAC_READ(sc, reg) \
273 bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
274
275 #define EMAC_SET_FILTER(aht, crc) \
276 do { \
277 (aht)[3 - (((crc) >> 26) >> 4)] |= 1 << (((crc) >> 26) & 0xf); \
278 } while (/*CONSTCOND*/0)
279 #define EMAC_SET_FILTER256(aht, crc) \
280 do { \
281 (aht)[7 - (((crc) >> 24) >> 5)] |= 1 << (((crc) >> 24) & 0x1f); \
282 } while (/*CONSTCOND*/0)
283
284 static int emac_match(device_t, cfdata_t, void *);
285 static void emac_attach(device_t, device_t, void *);
286
287 static int emac_intr(void *);
288 static void emac_shutdown(void *);
289
290 static void emac_start(struct ifnet *);
291 static int emac_ioctl(struct ifnet *, u_long, void *);
292 static int emac_init(struct ifnet *);
293 static void emac_stop(struct ifnet *, int);
294 static void emac_watchdog(struct ifnet *);
295
296 static int emac_add_rxbuf(struct emac_softc *, int);
297 static void emac_rxdrain(struct emac_softc *);
298 static int emac_set_filter(struct emac_softc *);
299 static int emac_txreap(struct emac_softc *);
300
301 static void emac_soft_reset(struct emac_softc *);
302 static void emac_smart_reset(struct emac_softc *);
303
304 static int emac_mii_readreg(device_t, int, int);
305 static void emac_mii_writereg(device_t, int, int, int);
306 static void emac_mii_statchg(device_t);
307 static uint32_t emac_mii_wait(struct emac_softc *);
308 static void emac_mii_tick(void *);
309
310 int emac_copy_small = 0;
311
312 CFATTACH_DECL_NEW(emac, sizeof(struct emac_softc),
313 emac_match, emac_attach, NULL, NULL);
314
315
316 static int
317 emac_match(device_t parent, cfdata_t cf, void *aux)
318 {
319 struct opb_attach_args *oaa = aux;
320
321 /* match only on-chip ethernet devices */
322 if (strcmp(oaa->opb_name, cf->cf_name) == 0)
323 return 1;
324
325 return 0;
326 }
327
328 static void
329 emac_attach(device_t parent, device_t self, void *aux)
330 {
331 struct opb_attach_args *oaa = aux;
332 struct emac_softc *sc = device_private(self);
333 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
334 struct mii_data *mii = &sc->sc_mii;
335 bus_dma_segment_t seg;
336 int error, i, nseg, opb_freq, opbc, mii_phy = MII_PHY_ANY;
337 const uint8_t *enaddr;
338 prop_dictionary_t dict = device_properties(self);
339 prop_data_t ea;
340
341 bus_space_map(oaa->opb_bt, oaa->opb_addr, EMAC_NREG, 0, &sc->sc_sh);
342
343 sc->sc_dev = self;
344 sc->sc_instance = oaa->opb_instance;
345 sc->sc_st = oaa->opb_bt;
346 sc->sc_dmat = oaa->opb_dmat;
347
348 callout_init(&sc->sc_callout, 0);
349
350 aprint_naive("\n");
351 aprint_normal(": Ethernet Media Access Controller\n");
352
353 /* Fetch the Ethernet address. */
354 ea = prop_dictionary_get(dict, "mac-address");
355 if (ea == NULL) {
356 aprint_error_dev(self, "unable to get mac-address property\n");
357 return;
358 }
359 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
360 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
361 enaddr = prop_data_data_nocopy(ea);
362 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
363
364 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
365 /* Fetch the MII offset. */
366 prop_dictionary_get_uint32(dict, "mii-phy", &mii_phy);
367
368 #ifdef EMAC_ZMII_PHY
369 if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_ZMII)
370 zmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
371 &sc->sc_rmii_disable, &sc->sc_rmii_speed);
372 #endif
373 #ifdef EMAC_RGMII_PHY
374 if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_RGMII)
375 rgmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
376 &sc->sc_rmii_disable, &sc->sc_rmii_speed);
377 #endif
378 #endif
379
380 /*
381 * Allocate the control data structures, and create and load the
382 * DMA map for it.
383 */
384 if ((error = bus_dmamem_alloc(sc->sc_dmat,
385 sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
386 aprint_error_dev(self,
387 "unable to allocate control data, error = %d\n", error);
388 goto fail_0;
389 }
390
391 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
392 sizeof(struct emac_control_data), (void **)&sc->sc_control_data,
393 BUS_DMA_COHERENT)) != 0) {
394 aprint_error_dev(self,
395 "unable to map control data, error = %d\n", error);
396 goto fail_1;
397 }
398
399 if ((error = bus_dmamap_create(sc->sc_dmat,
400 sizeof(struct emac_control_data), 1,
401 sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
402 aprint_error_dev(self,
403 "unable to create control data DMA map, error = %d\n",
404 error);
405 goto fail_2;
406 }
407
408 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
409 sc->sc_control_data, sizeof(struct emac_control_data), NULL,
410 0)) != 0) {
411 aprint_error_dev(self,
412 "unable to load control data DMA map, error = %d\n", error);
413 goto fail_3;
414 }
415
416 /*
417 * Create the transmit buffer DMA maps.
418 */
419 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
420 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
421 EMAC_NTXSEGS, MCLBYTES, 0, 0,
422 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
423 aprint_error_dev(self,
424 "unable to create tx DMA map %d, error = %d\n",
425 i, error);
426 goto fail_4;
427 }
428 }
429
430 /*
431 * Create the receive buffer DMA maps.
432 */
433 for (i = 0; i < EMAC_NRXDESC; i++) {
434 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
435 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
436 aprint_error_dev(self,
437 "unable to create rx DMA map %d, error = %d\n",
438 i, error);
439 goto fail_5;
440 }
441 sc->sc_rxsoft[i].rxs_mbuf = NULL;
442 }
443
444 /* Soft Reset the EMAC. The chip to a known state. */
445 emac_soft_reset(sc);
446
447 opb_freq = opb_get_frequency();
448 switch (opb_freq) {
449 case 50000000: opbc = STACR_OPBC_50MHZ; break;
450 case 66666666: opbc = STACR_OPBC_66MHZ; break;
451 case 83333333: opbc = STACR_OPBC_83MHZ; break;
452 case 100000000: opbc = STACR_OPBC_100MHZ; break;
453
454 default:
455 if (opb_freq > 100000000) {
456 opbc = STACR_OPBC_A100MHZ;
457 break;
458 }
459 aprint_error_dev(self, "unsupport OPB frequency %dMHz\n",
460 opb_freq / 1000 / 1000);
461 goto fail_5;
462 }
463 if (oaa->opb_flags & OPB_FLAGS_EMAC_GBE) {
464 sc->sc_mr1 =
465 MR1_RFS_GBE(MR1__FS_16KB) |
466 MR1_TFS_GBE(MR1__FS_16KB) |
467 MR1_TR0_MULTIPLE |
468 MR1_OBCI(opbc);
469 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
470
471 if (oaa->opb_flags & OPB_FLAGS_EMAC_STACV2) {
472 sc->sc_stacr_read = STACR_STAOPC_READ;
473 sc->sc_stacr_write = STACR_STAOPC_WRITE;
474 sc->sc_stacr_bits = STACR_OC;
475 sc->sc_stacr_completed = false;
476 } else {
477 sc->sc_stacr_read = STACR_READ;
478 sc->sc_stacr_write = STACR_WRITE;
479 sc->sc_stacr_completed = true;
480 }
481 } else {
482 /*
483 * Set up Mode Register 1 - set receive and transmit FIFOs to
484 * maximum size, allow transmit of multiple packets (only
485 * channel 0 is used).
486 *
487 * XXX: Allow pause packets??
488 */
489 sc->sc_mr1 =
490 MR1_RFS(MR1__FS_4KB) |
491 MR1_TFS(MR1__FS_2KB) |
492 MR1_TR0_MULTIPLE;
493
494 sc->sc_stacr_read = STACR_READ;
495 sc->sc_stacr_write = STACR_WRITE;
496 sc->sc_stacr_bits = STACR_OPBC(opbc);
497 sc->sc_stacr_completed = true;
498 }
499
500 intr_establish(oaa->opb_irq, IST_LEVEL, IPL_NET, emac_intr, sc);
501 mal_intr_establish(sc->sc_instance, sc);
502
503 if (oaa->opb_flags & OPB_FLAGS_EMAC_HT256)
504 sc->sc_htsize = 256;
505 else
506 sc->sc_htsize = 64;
507
508 /* Clear all interrupts */
509 EMAC_WRITE(sc, EMAC_ISR, ISR_ALL);
510
511 /*
512 * Initialise the media structures.
513 */
514 mii->mii_ifp = ifp;
515 mii->mii_readreg = emac_mii_readreg;
516 mii->mii_writereg = emac_mii_writereg;
517 mii->mii_statchg = emac_mii_statchg;
518
519 sc->sc_ethercom.ec_mii = mii;
520 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
521 mii_attach(self, mii, 0xffffffff, mii_phy, MII_OFFSET_ANY, 0);
522 if (LIST_FIRST(&mii->mii_phys) == NULL) {
523 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
524 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
525 } else
526 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
527
528 ifp = &sc->sc_ethercom.ec_if;
529 strcpy(ifp->if_xname, self->dv_xname);
530 ifp->if_softc = sc;
531 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
532 ifp->if_start = emac_start;
533 ifp->if_ioctl = emac_ioctl;
534 ifp->if_init = emac_init;
535 ifp->if_stop = emac_stop;
536 ifp->if_watchdog = emac_watchdog;
537 IFQ_SET_READY(&ifp->if_snd);
538
539 /*
540 * We can support 802.1Q VLAN-sized frames.
541 */
542 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
543
544 /*
545 * Attach the interface.
546 */
547 if_attach(ifp);
548 ether_ifattach(ifp, enaddr);
549
550 #ifdef EMAC_EVENT_COUNTERS
551 /*
552 * Attach the event counters.
553 */
554 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
555 NULL, self->dv_xname, "txintr");
556 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
557 NULL, self->dv_xname, "rxintr");
558 evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
559 NULL, self->dv_xname, "txde");
560 evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
561 NULL, self->dv_xname, "rxde");
562 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
563 NULL, self->dv_xname, "intr");
564
565 evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
566 NULL, self->dv_xname, "txreap");
567 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
568 NULL, self->dv_xname, "txsstall");
569 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
570 NULL, self->dv_xname, "txdstall");
571 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
572 NULL, self->dv_xname, "txdrop");
573 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
574 NULL, self->dv_xname, "tu");
575 #endif /* EMAC_EVENT_COUNTERS */
576
577 /*
578 * Make sure the interface is shutdown during reboot.
579 */
580 sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
581 if (sc->sc_sdhook == NULL)
582 aprint_error_dev(self,
583 "WARNING: unable to establish shutdown hook\n");
584
585 return;
586
587 /*
588 * Free any resources we've allocated during the failed attach
589 * attempt. Do this in reverse order and fall through.
590 */
591 fail_5:
592 for (i = 0; i < EMAC_NRXDESC; i++) {
593 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
594 bus_dmamap_destroy(sc->sc_dmat,
595 sc->sc_rxsoft[i].rxs_dmamap);
596 }
597 fail_4:
598 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
599 if (sc->sc_txsoft[i].txs_dmamap != NULL)
600 bus_dmamap_destroy(sc->sc_dmat,
601 sc->sc_txsoft[i].txs_dmamap);
602 }
603 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
604 fail_3:
605 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
606 fail_2:
607 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
608 sizeof(struct emac_control_data));
609 fail_1:
610 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
611 fail_0:
612 return;
613 }
614
615 /*
616 * EMAC General interrupt handler
617 */
618 static int
619 emac_intr(void *arg)
620 {
621 struct emac_softc *sc = arg;
622 uint32_t status;
623
624 EMAC_EVCNT_INCR(&sc->sc_ev_intr);
625 status = EMAC_READ(sc, EMAC_ISR);
626
627 /* Clear the interrupt status bits. */
628 EMAC_WRITE(sc, EMAC_ISR, status);
629
630 return 1;
631 }
632
633 static void
634 emac_shutdown(void *arg)
635 {
636 struct emac_softc *sc = arg;
637
638 emac_stop(&sc->sc_ethercom.ec_if, 0);
639 }
640
641
642 /*
643 * ifnet interface functions
644 */
645
646 static void
647 emac_start(struct ifnet *ifp)
648 {
649 struct emac_softc *sc = ifp->if_softc;
650 struct mbuf *m0;
651 struct emac_txsoft *txs;
652 bus_dmamap_t dmamap;
653 int error, firsttx, nexttx, lasttx, ofree, seg;
654
655 lasttx = 0; /* XXX gcc */
656
657 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
658 return;
659
660 /*
661 * Remember the previous number of free descriptors.
662 */
663 ofree = sc->sc_txfree;
664
665 /*
666 * Loop through the send queue, setting up transmit descriptors
667 * until we drain the queue, or use up all available transmit
668 * descriptors.
669 */
670 for (;;) {
671 /* Grab a packet off the queue. */
672 IFQ_POLL(&ifp->if_snd, m0);
673 if (m0 == NULL)
674 break;
675
676 /*
677 * Get a work queue entry. Reclaim used Tx descriptors if
678 * we are running low.
679 */
680 if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
681 emac_txreap(sc);
682 if (sc->sc_txsfree == 0) {
683 EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
684 break;
685 }
686 }
687
688 txs = &sc->sc_txsoft[sc->sc_txsnext];
689 dmamap = txs->txs_dmamap;
690
691 /*
692 * Load the DMA map. If this fails, the packet either
693 * didn't fit in the alloted number of segments, or we
694 * were short on resources. In this case, we'll copy
695 * and try again.
696 */
697 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
698 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
699 if (error) {
700 if (error == EFBIG) {
701 EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
702 aprint_error_ifnet(ifp,
703 "Tx packet consumes too many "
704 "DMA segments, dropping...\n");
705 IFQ_DEQUEUE(&ifp->if_snd, m0);
706 m_freem(m0);
707 continue;
708 }
709 /* Short on resources, just stop for now. */
710 break;
711 }
712
713 /*
714 * Ensure we have enough descriptors free to describe
715 * the packet.
716 */
717 if (dmamap->dm_nsegs > sc->sc_txfree) {
718 /*
719 * Not enough free descriptors to transmit this
720 * packet. We haven't committed anything yet,
721 * so just unload the DMA map, put the packet
722 * back on the queue, and punt. Notify the upper
723 * layer that there are not more slots left.
724 *
725 */
726 ifp->if_flags |= IFF_OACTIVE;
727 bus_dmamap_unload(sc->sc_dmat, dmamap);
728 EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
729 break;
730 }
731
732 IFQ_DEQUEUE(&ifp->if_snd, m0);
733
734 /*
735 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
736 */
737
738 /* Sync the DMA map. */
739 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
740 BUS_DMASYNC_PREWRITE);
741
742 /*
743 * Store a pointer to the packet so that we can free it
744 * later.
745 */
746 txs->txs_mbuf = m0;
747 txs->txs_firstdesc = sc->sc_txnext;
748 txs->txs_ndesc = dmamap->dm_nsegs;
749
750 /*
751 * Initialize the transmit descriptor.
752 */
753 firsttx = sc->sc_txnext;
754 for (nexttx = sc->sc_txnext, seg = 0;
755 seg < dmamap->dm_nsegs;
756 seg++, nexttx = EMAC_NEXTTX(nexttx)) {
757 struct mal_descriptor *txdesc =
758 &sc->sc_txdescs[nexttx];
759
760 /*
761 * If this is the first descriptor we're
762 * enqueueing, don't set the TX_READY bit just
763 * yet. That could cause a race condition.
764 * We'll do it below.
765 */
766 txdesc->md_data = dmamap->dm_segs[seg].ds_addr;
767 txdesc->md_data_len = dmamap->dm_segs[seg].ds_len;
768 txdesc->md_stat_ctrl =
769 (txdesc->md_stat_ctrl & MAL_TX_WRAP) |
770 (nexttx == firsttx ? 0 : MAL_TX_READY) |
771 EMAC_TXC_GFCS | EMAC_TXC_GPAD;
772 lasttx = nexttx;
773 }
774
775 /* Set the LAST bit on the last segment. */
776 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
777
778 /*
779 * Set up last segment descriptor to send an interrupt after
780 * that descriptor is transmitted, and bypass existing Tx
781 * descriptor reaping method (for now...).
782 */
783 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT;
784
785
786 txs->txs_lastdesc = lasttx;
787
788 /* Sync the descriptors we're using. */
789 EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
790 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
791
792 /*
793 * The entire packet chain is set up. Give the
794 * first descriptor to the chip now.
795 */
796 sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
797 EMAC_CDTXSYNC(sc, firsttx, 1,
798 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
799 /*
800 * Tell the EMAC that a new packet is available.
801 */
802 EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0 | TMR0_TFAE_2);
803
804 /* Advance the tx pointer. */
805 sc->sc_txfree -= txs->txs_ndesc;
806 sc->sc_txnext = nexttx;
807
808 sc->sc_txsfree--;
809 sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
810
811 /*
812 * Pass the packet to any BPF listeners.
813 */
814 bpf_mtap(ifp, m0);
815 }
816
817 if (sc->sc_txfree == 0)
818 /* No more slots left; notify upper layer. */
819 ifp->if_flags |= IFF_OACTIVE;
820
821 if (sc->sc_txfree != ofree)
822 /* Set a watchdog timer in case the chip flakes out. */
823 ifp->if_timer = 5;
824 }
825
826 static int
827 emac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
828 {
829 struct emac_softc *sc = ifp->if_softc;
830 int s, error;
831
832 s = splnet();
833
834 switch (cmd) {
835 case SIOCSIFMTU:
836 {
837 struct ifreq *ifr = (struct ifreq *)data;
838 int maxmtu;
839
840 if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU)
841 maxmtu = EMAC_MAX_MTU;
842 else
843 maxmtu = ETHERMTU;
844
845 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu)
846 error = EINVAL;
847 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
848 break;
849 else if (ifp->if_flags & IFF_UP)
850 error = emac_init(ifp);
851 else
852 error = 0;
853 break;
854 }
855
856 default:
857 error = ether_ioctl(ifp, cmd, data);
858 if (error == ENETRESET) {
859 /*
860 * Multicast list has changed; set the hardware filter
861 * accordingly.
862 */
863 if (ifp->if_flags & IFF_RUNNING)
864 error = emac_set_filter(sc);
865 else
866 error = 0;
867 }
868 }
869
870 /* try to get more packets going */
871 emac_start(ifp);
872
873 splx(s);
874 return error;
875 }
876
877 static int
878 emac_init(struct ifnet *ifp)
879 {
880 struct emac_softc *sc = ifp->if_softc;
881 struct emac_rxsoft *rxs;
882 const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
883 int error, i;
884
885 error = 0;
886
887 /* Cancel any pending I/O. */
888 emac_stop(ifp, 0);
889
890 /* Reset the chip to a known state. */
891 emac_soft_reset(sc);
892
893 /*
894 * Initialise the transmit descriptor ring.
895 */
896 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
897 /* set wrap on last descriptor */
898 sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
899 EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
900 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
901 sc->sc_txfree = EMAC_NTXDESC;
902 sc->sc_txnext = 0;
903
904 /*
905 * Initialise the transmit job descriptors.
906 */
907 for (i = 0; i < EMAC_TXQUEUELEN; i++)
908 sc->sc_txsoft[i].txs_mbuf = NULL;
909 sc->sc_txsfree = EMAC_TXQUEUELEN;
910 sc->sc_txsnext = 0;
911 sc->sc_txsdirty = 0;
912
913 /*
914 * Initialise the receiver descriptor and receive job
915 * descriptor rings.
916 */
917 for (i = 0; i < EMAC_NRXDESC; i++) {
918 rxs = &sc->sc_rxsoft[i];
919 if (rxs->rxs_mbuf == NULL) {
920 if ((error = emac_add_rxbuf(sc, i)) != 0) {
921 aprint_error_ifnet(ifp,
922 "unable to allocate or map rx buffer %d,"
923 " error = %d\n",
924 i, error);
925 /*
926 * XXX Should attempt to run with fewer receive
927 * XXX buffers instead of just failing.
928 */
929 emac_rxdrain(sc);
930 goto out;
931 }
932 } else
933 EMAC_INIT_RXDESC(sc, i);
934 }
935 sc->sc_rxptr = 0;
936
937 /*
938 * Set the current media.
939 */
940 if ((error = ether_mediachange(ifp)) != 0)
941 goto out;
942
943 /*
944 * Load the MAC address.
945 */
946 EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
947 EMAC_WRITE(sc, EMAC_IALR,
948 enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
949
950 /* Enable the transmit and receive channel on the MAL. */
951 error = mal_start(sc->sc_instance,
952 EMAC_CDTXADDR(sc, 0), EMAC_CDRXADDR(sc, 0));
953 if (error)
954 goto out;
955
956 sc->sc_mr1 &= ~MR1_JPSM;
957 if (ifp->if_mtu > ETHERMTU)
958 /* Enable Jumbo Packet Support Mode */
959 sc->sc_mr1 |= MR1_JPSM;
960
961 /* Set fifos, media modes. */
962 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
963
964 /*
965 * Enable Individual and (possibly) Broadcast Address modes,
966 * runt packets, and strip padding.
967 */
968 EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP | RMR_TFAE_2 |
969 (ifp->if_flags & IFF_PROMISC ? RMR_PME : 0) |
970 (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
971
972 /*
973 * Set multicast filter.
974 */
975 emac_set_filter(sc);
976
977 /*
978 * Set low- and urgent-priority request thresholds.
979 */
980 EMAC_WRITE(sc, EMAC_TMR1,
981 ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
982 ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
983 /*
984 * Set Transmit Request Threshold Register.
985 */
986 EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
987
988 /*
989 * Set high and low receive watermarks.
990 */
991 EMAC_WRITE(sc, EMAC_RWMR,
992 30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
993
994 /*
995 * Set frame gap.
996 */
997 EMAC_WRITE(sc, EMAC_IPGVR, 8);
998
999 /*
1000 * Set interrupt status enable bits for EMAC.
1001 */
1002 EMAC_WRITE(sc, EMAC_ISER,
1003 ISR_TXPE | /* TX Parity Error */
1004 ISR_RXPE | /* RX Parity Error */
1005 ISR_TXUE | /* TX Underrun Event */
1006 ISR_RXOE | /* RX Overrun Event */
1007 ISR_OVR | /* Overrun Error */
1008 ISR_PP | /* Pause Packet */
1009 ISR_BP | /* Bad Packet */
1010 ISR_RP | /* Runt Packet */
1011 ISR_SE | /* Short Event */
1012 ISR_ALE | /* Alignment Error */
1013 ISR_BFCS | /* Bad FCS */
1014 ISR_PTLE | /* Packet Too Long Error */
1015 ISR_ORE | /* Out of Range Error */
1016 ISR_IRE | /* In Range Error */
1017 ISR_SE0 | /* Signal Quality Error 0 (SQE) */
1018 ISR_TE0 | /* Transmit Error 0 */
1019 ISR_MOS | /* MMA Operation Succeeded */
1020 ISR_MOF); /* MMA Operation Failed */
1021
1022 /*
1023 * Enable the transmit and receive channel on the EMAC.
1024 */
1025 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1026
1027 /*
1028 * Start the one second MII clock.
1029 */
1030 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1031
1032 /*
1033 * ... all done!
1034 */
1035 ifp->if_flags |= IFF_RUNNING;
1036 ifp->if_flags &= ~IFF_OACTIVE;
1037
1038 out:
1039 if (error) {
1040 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1041 ifp->if_timer = 0;
1042 aprint_error_ifnet(ifp, "interface not running\n");
1043 }
1044 return error;
1045 }
1046
1047 static void
1048 emac_stop(struct ifnet *ifp, int disable)
1049 {
1050 struct emac_softc *sc = ifp->if_softc;
1051 struct emac_txsoft *txs;
1052 int i;
1053
1054 /* Stop the one second clock. */
1055 callout_stop(&sc->sc_callout);
1056
1057 /* Down the MII */
1058 mii_down(&sc->sc_mii);
1059
1060 /* Disable interrupts. */
1061 EMAC_WRITE(sc, EMAC_ISER, 0);
1062
1063 /* Disable the receive and transmit channels. */
1064 mal_stop(sc->sc_instance);
1065
1066 /* Disable the transmit enable and receive MACs. */
1067 EMAC_WRITE(sc, EMAC_MR0,
1068 EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
1069
1070 /* Release any queued transmit buffers. */
1071 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
1072 txs = &sc->sc_txsoft[i];
1073 if (txs->txs_mbuf != NULL) {
1074 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1075 m_freem(txs->txs_mbuf);
1076 txs->txs_mbuf = NULL;
1077 }
1078 }
1079
1080 if (disable)
1081 emac_rxdrain(sc);
1082
1083 /*
1084 * Mark the interface down and cancel the watchdog timer.
1085 */
1086 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1087 ifp->if_timer = 0;
1088 }
1089
1090 static void
1091 emac_watchdog(struct ifnet *ifp)
1092 {
1093 struct emac_softc *sc = ifp->if_softc;
1094
1095 /*
1096 * Since we're not interrupting every packet, sweep
1097 * up before we report an error.
1098 */
1099 emac_txreap(sc);
1100
1101 if (sc->sc_txfree != EMAC_NTXDESC) {
1102 aprint_error_ifnet(ifp,
1103 "device timeout (txfree %d txsfree %d txnext %d)\n",
1104 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
1105 ifp->if_oerrors++;
1106
1107 /* Reset the interface. */
1108 (void)emac_init(ifp);
1109 } else if (ifp->if_flags & IFF_DEBUG)
1110 aprint_error_ifnet(ifp, "recovered from device timeout\n");
1111
1112 /* try to get more packets going */
1113 emac_start(ifp);
1114 }
1115
1116 static int
1117 emac_add_rxbuf(struct emac_softc *sc, int idx)
1118 {
1119 struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
1120 struct mbuf *m;
1121 int error;
1122
1123 MGETHDR(m, M_DONTWAIT, MT_DATA);
1124 if (m == NULL)
1125 return ENOBUFS;
1126
1127 MCLGET(m, M_DONTWAIT);
1128 if ((m->m_flags & M_EXT) == 0) {
1129 m_freem(m);
1130 return ENOBUFS;
1131 }
1132
1133 if (rxs->rxs_mbuf != NULL)
1134 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1135
1136 rxs->rxs_mbuf = m;
1137
1138 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1139 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1140 if (error) {
1141 aprint_error_dev(sc->sc_dev,
1142 "can't load rx DMA map %d, error = %d\n", idx, error);
1143 panic("emac_add_rxbuf"); /* XXX */
1144 }
1145
1146 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1147 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1148
1149 EMAC_INIT_RXDESC(sc, idx);
1150
1151 return 0;
1152 }
1153
1154 static void
1155 emac_rxdrain(struct emac_softc *sc)
1156 {
1157 struct emac_rxsoft *rxs;
1158 int i;
1159
1160 for (i = 0; i < EMAC_NRXDESC; i++) {
1161 rxs = &sc->sc_rxsoft[i];
1162 if (rxs->rxs_mbuf != NULL) {
1163 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1164 m_freem(rxs->rxs_mbuf);
1165 rxs->rxs_mbuf = NULL;
1166 }
1167 }
1168 }
1169
1170 static int
1171 emac_set_filter(struct emac_softc *sc)
1172 {
1173 struct ether_multistep step;
1174 struct ether_multi *enm;
1175 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1176 uint32_t rmr, crc, mask, tmp, reg, gaht[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
1177 int regs, cnt = 0, i;
1178
1179 if (sc->sc_htsize == 256) {
1180 reg = EMAC_GAHT256(0);
1181 regs = 8;
1182 } else {
1183 reg = EMAC_GAHT64(0);
1184 regs = 4;
1185 }
1186 mask = (1ULL << (sc->sc_htsize / regs)) - 1;
1187
1188 rmr = EMAC_READ(sc, EMAC_RMR);
1189 rmr &= ~(RMR_PMME | RMR_MAE);
1190 ifp->if_flags &= ~IFF_ALLMULTI;
1191
1192 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1193 while (enm != NULL) {
1194 if (memcmp(enm->enm_addrlo,
1195 enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1196 /*
1197 * We must listen to a range of multicast addresses.
1198 * For now, just accept all multicasts, rather than
1199 * trying to set only those filter bits needed to match
1200 * the range. (At this time, the only use of address
1201 * ranges is for IP multicast routing, for which the
1202 * range is big enough to require all bits set.)
1203 */
1204 gaht[0] = gaht[1] = gaht[2] = gaht[3] =
1205 gaht[4] = gaht[5] = gaht[6] = gaht[7] = mask;
1206 break;
1207 }
1208
1209 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1210
1211 if (sc->sc_htsize == 256)
1212 EMAC_SET_FILTER256(gaht, crc);
1213 else
1214 EMAC_SET_FILTER(gaht, crc);
1215
1216 ETHER_NEXT_MULTI(step, enm);
1217 cnt++;
1218 }
1219
1220 for (i = 1, tmp = gaht[0]; i < regs; i++)
1221 tmp &= gaht[i];
1222 if (tmp == mask) {
1223 /* All categories are true. */
1224 ifp->if_flags |= IFF_ALLMULTI;
1225 rmr |= RMR_PMME;
1226 } else if (cnt != 0) {
1227 /* Some categories are true. */
1228 for (i = 0; i < regs; i++)
1229 EMAC_WRITE(sc, reg + (i << 2), gaht[i]);
1230 rmr |= RMR_MAE;
1231 }
1232 EMAC_WRITE(sc, EMAC_RMR, rmr);
1233
1234 return 0;
1235 }
1236
1237 /*
1238 * Reap completed Tx descriptors.
1239 */
1240 static int
1241 emac_txreap(struct emac_softc *sc)
1242 {
1243 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1244 struct emac_txsoft *txs;
1245 int handled, i;
1246 uint32_t txstat;
1247
1248 EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1249 handled = 0;
1250
1251 ifp->if_flags &= ~IFF_OACTIVE;
1252
1253 /*
1254 * Go through our Tx list and free mbufs for those
1255 * frames that have been transmitted.
1256 */
1257 for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1258 i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1259 txs = &sc->sc_txsoft[i];
1260
1261 EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1262 txs->txs_dmamap->dm_nsegs,
1263 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1264
1265 txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1266 if (txstat & MAL_TX_READY)
1267 break;
1268
1269 handled = 1;
1270
1271 /*
1272 * Check for errors and collisions.
1273 */
1274 if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1275 ifp->if_oerrors++;
1276
1277 #ifdef EMAC_EVENT_COUNTERS
1278 if (txstat & EMAC_TXS_UR)
1279 EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1280 #endif /* EMAC_EVENT_COUNTERS */
1281
1282 if (txstat &
1283 (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1284 if (txstat & EMAC_TXS_EC)
1285 ifp->if_collisions += 16;
1286 else if (txstat & EMAC_TXS_MC)
1287 ifp->if_collisions += 2; /* XXX? */
1288 else if (txstat & EMAC_TXS_SC)
1289 ifp->if_collisions++;
1290 if (txstat & EMAC_TXS_LC)
1291 ifp->if_collisions++;
1292 } else
1293 ifp->if_opackets++;
1294
1295 if (ifp->if_flags & IFF_DEBUG) {
1296 if (txstat & EMAC_TXS_ED)
1297 aprint_error_ifnet(ifp, "excessive deferral\n");
1298 if (txstat & EMAC_TXS_EC)
1299 aprint_error_ifnet(ifp,
1300 "excessive collisions\n");
1301 }
1302
1303 sc->sc_txfree += txs->txs_ndesc;
1304 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1305 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1306 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1307 m_freem(txs->txs_mbuf);
1308 txs->txs_mbuf = NULL;
1309 }
1310
1311 /* Update the dirty transmit buffer pointer. */
1312 sc->sc_txsdirty = i;
1313
1314 /*
1315 * If there are no more pending transmissions, cancel the watchdog
1316 * timer.
1317 */
1318 if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1319 ifp->if_timer = 0;
1320
1321 return handled;
1322 }
1323
1324
1325 /*
1326 * Reset functions
1327 */
1328
1329 static void
1330 emac_soft_reset(struct emac_softc *sc)
1331 {
1332 uint32_t sdr;
1333 int t = 0;
1334
1335 /*
1336 * The PHY must provide a TX Clk in order perform a soft reset the
1337 * EMAC. If none is present, select the internal clock,
1338 * SDR0_MFR[E0CS,E1CS]. After the soft reset, select the external
1339 * clock.
1340 */
1341
1342 sdr = mfsdr(DCR_SDR0_MFR);
1343 sdr |= SDR0_MFR_ECS(sc->sc_instance);
1344 mtsdr(DCR_SDR0_MFR, sdr);
1345
1346 EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1347
1348 sdr = mfsdr(DCR_SDR0_MFR);
1349 sdr &= ~SDR0_MFR_ECS(sc->sc_instance);
1350 mtsdr(DCR_SDR0_MFR, sdr);
1351
1352 delay(5);
1353
1354 /* wait finish */
1355 while (EMAC_READ(sc, EMAC_MR0) & MR0_SRST) {
1356 if (++t == 1000000 /* 1sec XXXXX */) {
1357 aprint_error_dev(sc->sc_dev, "Soft Reset failed\n");
1358 return;
1359 }
1360 delay(1);
1361 }
1362 }
1363
1364 static void
1365 emac_smart_reset(struct emac_softc *sc)
1366 {
1367 uint32_t mr0;
1368 int t = 0;
1369
1370 mr0 = EMAC_READ(sc, EMAC_MR0);
1371 if (mr0 & (MR0_TXE | MR0_RXE)) {
1372 mr0 &= ~(MR0_TXE | MR0_RXE);
1373 EMAC_WRITE(sc, EMAC_MR0, mr0);
1374
1375 /* wait idel state */
1376 while ((EMAC_READ(sc, EMAC_MR0) & (MR0_TXI | MR0_RXI)) !=
1377 (MR0_TXI | MR0_RXI)) {
1378 if (++t == 1000000 /* 1sec XXXXX */) {
1379 aprint_error_dev(sc->sc_dev,
1380 "Smart Reset failed\n");
1381 return;
1382 }
1383 delay(1);
1384 }
1385 }
1386 }
1387
1388
1389 /*
1390 * MII related functions
1391 */
1392
1393 static int
1394 emac_mii_readreg(device_t self, int phy, int reg)
1395 {
1396 struct emac_softc *sc = device_private(self);
1397 uint32_t sta_reg;
1398
1399 if (sc->sc_rmii_enable)
1400 sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1401
1402 /* wait for PHY data transfer to complete */
1403 if (emac_mii_wait(sc))
1404 goto fail;
1405
1406 sta_reg =
1407 sc->sc_stacr_read |
1408 (reg << STACR_PRA_SHIFT) |
1409 (phy << STACR_PCDA_SHIFT) |
1410 sc->sc_stacr_bits;
1411 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1412
1413 if (emac_mii_wait(sc))
1414 goto fail;
1415 sta_reg = EMAC_READ(sc, EMAC_STACR);
1416
1417 if (sc->sc_rmii_disable)
1418 sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1419
1420 if (sta_reg & STACR_PHYE)
1421 return 0;
1422 return sta_reg >> STACR_PHYD_SHIFT;
1423
1424 fail:
1425 if (sc->sc_rmii_disable)
1426 sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1427 return 0;
1428 }
1429
1430 static void
1431 emac_mii_writereg(device_t self, int phy, int reg, int val)
1432 {
1433 struct emac_softc *sc = device_private(self);
1434 uint32_t sta_reg;
1435
1436 if (sc->sc_rmii_enable)
1437 sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1438
1439 /* wait for PHY data transfer to complete */
1440 if (emac_mii_wait(sc))
1441 goto out;
1442
1443 sta_reg =
1444 (val << STACR_PHYD_SHIFT) |
1445 sc->sc_stacr_write |
1446 (reg << STACR_PRA_SHIFT) |
1447 (phy << STACR_PCDA_SHIFT) |
1448 sc->sc_stacr_bits;
1449 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1450
1451 if (emac_mii_wait(sc))
1452 goto out;
1453 if (EMAC_READ(sc, EMAC_STACR) & STACR_PHYE)
1454 aprint_error_dev(sc->sc_dev, "MII PHY Error\n");
1455
1456 out:
1457 if (sc->sc_rmii_disable)
1458 sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1459 }
1460
1461 static void
1462 emac_mii_statchg(device_t self)
1463 {
1464 struct emac_softc *sc = device_private(self);
1465 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1466 struct mii_data *mii = &sc->sc_mii;
1467
1468 /*
1469 * MR1 can only be written immediately after a reset...
1470 */
1471 emac_smart_reset(sc);
1472
1473 sc->sc_mr1 &= ~(MR1_FDE | MR1_ILE | MR1_EIFC | MR1_MF_MASK | MR1_IST);
1474 if (mii->mii_media_active & IFM_FDX)
1475 sc->sc_mr1 |= (MR1_FDE | MR1_EIFC | MR1_IST);
1476 if (mii->mii_media_active & IFM_FLOW)
1477 sc->sc_mr1 |= MR1_EIFC;
1478 if (mii->mii_media_active & IFM_LOOP)
1479 sc->sc_mr1 |= MR1_ILE;
1480 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1481 case IFM_1000_T:
1482 sc->sc_mr1 |= (MR1_MF_1000MBS | MR1_IST);
1483 break;
1484
1485 case IFM_100_TX:
1486 sc->sc_mr1 |= (MR1_MF_100MBS | MR1_IST);
1487 break;
1488
1489 case IFM_10_T:
1490 sc->sc_mr1 |= MR1_MF_10MBS;
1491 break;
1492
1493 case IFM_NONE:
1494 break;
1495
1496 default:
1497 aprint_error_dev(self, "unknown sub-type %d\n",
1498 IFM_SUBTYPE(mii->mii_media_active));
1499 break;
1500 }
1501 if (sc->sc_rmii_speed)
1502 sc->sc_rmii_speed(device_parent(self), sc->sc_instance,
1503 IFM_SUBTYPE(mii->mii_media_active));
1504
1505 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
1506
1507 /* Enable TX and RX if already RUNNING */
1508 if (ifp->if_flags & IFF_RUNNING)
1509 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1510 }
1511
1512 static uint32_t
1513 emac_mii_wait(struct emac_softc *sc)
1514 {
1515 int i;
1516 uint32_t oc;
1517
1518 /* wait for PHY data transfer to complete */
1519 i = 0;
1520 oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1521 while ((oc == STACR_OC) != sc->sc_stacr_completed) {
1522 delay(7);
1523 if (i++ > 5) {
1524 aprint_error_dev(sc->sc_dev, "MII timed out\n");
1525 return -1;
1526 }
1527 oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1528 }
1529 return 0;
1530 }
1531
1532 static void
1533 emac_mii_tick(void *arg)
1534 {
1535 struct emac_softc *sc = arg;
1536 int s;
1537
1538 if (!device_is_active(sc->sc_dev))
1539 return;
1540
1541 s = splnet();
1542 mii_tick(&sc->sc_mii);
1543 splx(s);
1544
1545 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1546 }
1547
1548 int
1549 emac_txeob_intr(void *arg)
1550 {
1551 struct emac_softc *sc = arg;
1552 int handled = 0;
1553
1554 EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1555 handled |= emac_txreap(sc);
1556
1557 /* try to get more packets going */
1558 emac_start(&sc->sc_ethercom.ec_if);
1559
1560 return handled;
1561 }
1562
1563 int
1564 emac_rxeob_intr(void *arg)
1565 {
1566 struct emac_softc *sc = arg;
1567 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1568 struct emac_rxsoft *rxs;
1569 struct mbuf *m;
1570 uint32_t rxstat;
1571 int i, len;
1572
1573 EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1574
1575 for (i = sc->sc_rxptr; ; i = EMAC_NEXTRX(i)) {
1576 rxs = &sc->sc_rxsoft[i];
1577
1578 EMAC_CDRXSYNC(sc, i,
1579 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1580
1581 rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1582
1583 if (rxstat & MAL_RX_EMPTY)
1584 /*
1585 * We have processed all of the receive buffers.
1586 */
1587 break;
1588
1589 /*
1590 * If an error occurred, update stats, clear the status
1591 * word, and leave the packet buffer in place. It will
1592 * simply be reused the next time the ring comes around.
1593 */
1594 if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1595 EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1596 EMAC_RXS_IRE)) {
1597 #define PRINTERR(bit, str) \
1598 if (rxstat & (bit)) \
1599 aprint_error_ifnet(ifp, \
1600 "receive error: %s\n", str)
1601 ifp->if_ierrors++;
1602 PRINTERR(EMAC_RXS_OE, "overrun error");
1603 PRINTERR(EMAC_RXS_BP, "bad packet");
1604 PRINTERR(EMAC_RXS_RP, "runt packet");
1605 PRINTERR(EMAC_RXS_SE, "short event");
1606 PRINTERR(EMAC_RXS_AE, "alignment error");
1607 PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1608 PRINTERR(EMAC_RXS_PTL, "packet too long");
1609 PRINTERR(EMAC_RXS_ORE, "out of range error");
1610 PRINTERR(EMAC_RXS_IRE, "in range error");
1611 #undef PRINTERR
1612 EMAC_INIT_RXDESC(sc, i);
1613 continue;
1614 }
1615
1616 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1617 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1618
1619 /*
1620 * No errors; receive the packet. Note, the 405GP emac
1621 * includes the CRC with every packet.
1622 */
1623 len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN;
1624
1625 /*
1626 * If the packet is small enough to fit in a
1627 * single header mbuf, allocate one and copy
1628 * the data into it. This greatly reduces
1629 * memory consumption when we receive lots
1630 * of small packets.
1631 *
1632 * Otherwise, we add a new buffer to the receive
1633 * chain. If this fails, we drop the packet and
1634 * recycle the old buffer.
1635 */
1636 if (emac_copy_small != 0 && len <= MHLEN) {
1637 MGETHDR(m, M_DONTWAIT, MT_DATA);
1638 if (m == NULL)
1639 goto dropit;
1640 memcpy(mtod(m, void *),
1641 mtod(rxs->rxs_mbuf, void *), len);
1642 EMAC_INIT_RXDESC(sc, i);
1643 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1644 rxs->rxs_dmamap->dm_mapsize,
1645 BUS_DMASYNC_PREREAD);
1646 } else {
1647 m = rxs->rxs_mbuf;
1648 if (emac_add_rxbuf(sc, i) != 0) {
1649 dropit:
1650 ifp->if_ierrors++;
1651 EMAC_INIT_RXDESC(sc, i);
1652 bus_dmamap_sync(sc->sc_dmat,
1653 rxs->rxs_dmamap, 0,
1654 rxs->rxs_dmamap->dm_mapsize,
1655 BUS_DMASYNC_PREREAD);
1656 continue;
1657 }
1658 }
1659
1660 ifp->if_ipackets++;
1661 m->m_pkthdr.rcvif = ifp;
1662 m->m_pkthdr.len = m->m_len = len;
1663
1664 /*
1665 * Pass this up to any BPF listeners, but only
1666 * pass if up the stack if it's for us.
1667 */
1668 bpf_mtap(ifp, m);
1669
1670 /* Pass it on. */
1671 (*ifp->if_input)(ifp, m);
1672 }
1673
1674 /* Update the receive pointer. */
1675 sc->sc_rxptr = i;
1676
1677 return 1;
1678 }
1679
1680 int
1681 emac_txde_intr(void *arg)
1682 {
1683 struct emac_softc *sc = arg;
1684
1685 EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1686 aprint_error_dev(sc->sc_dev, "emac_txde_intr\n");
1687 return 1;
1688 }
1689
1690 int
1691 emac_rxde_intr(void *arg)
1692 {
1693 struct emac_softc *sc = arg;
1694 int i;
1695
1696 EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1697 aprint_error_dev(sc->sc_dev, "emac_rxde_intr\n");
1698 /*
1699 * XXX!
1700 * This is a bit drastic; we just drop all descriptors that aren't
1701 * "clean". We should probably send any that are up the stack.
1702 */
1703 for (i = 0; i < EMAC_NRXDESC; i++) {
1704 EMAC_CDRXSYNC(sc, i,
1705 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1706
1707 if (sc->sc_rxdescs[i].md_data_len != MCLBYTES)
1708 EMAC_INIT_RXDESC(sc, i);
1709 }
1710
1711 return 1;
1712 }
1713