if_emac.c revision 1.53 1 /* $NetBSD: if_emac.c,v 1.53 2020/07/06 09:34:17 rin Exp $ */
2
3 /*
4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * emac(4) supports following ibm4xx's EMACs.
40 * XXXX: ZMII and 'TCP Accelaration Hardware' not support yet...
41 *
42 * tested
43 * ------
44 * 405EP - 10/100 x2
45 * 405EX/EXr o 10/100/1000 x2 (EXr x1), STA v2, 256bit hash-Table, RGMII
46 * 405GP/GPr o 10/100
47 * 440EP - 10/100 x2, ZMII
48 * 440GP - 10/100 x2, ZMII
49 * 440GX - 10/100/1000 x4, ZMII/RGMII(ch 2, 3), TAH(ch 2, 3)
50 * 440SP - 10/100/1000
51 * 440SPe - 10/100/1000, STA v2
52 */
53
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: if_emac.c,v 1.53 2020/07/06 09:34:17 rin Exp $");
56
57 #ifdef _KERNEL_OPT
58 #include "opt_emac.h"
59 #endif
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/mbuf.h>
64 #include <sys/kernel.h>
65 #include <sys/socket.h>
66 #include <sys/ioctl.h>
67 #include <sys/cpu.h>
68 #include <sys/device.h>
69
70 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76
77 #include <net/bpf.h>
78
79 #include <powerpc/ibm4xx/cpu.h>
80 #include <powerpc/ibm4xx/dcr4xx.h>
81 #include <powerpc/ibm4xx/mal405gp.h>
82 #include <powerpc/ibm4xx/dev/emacreg.h>
83 #include <powerpc/ibm4xx/dev/if_emacreg.h>
84 #include <powerpc/ibm4xx/dev/if_emacvar.h>
85 #include <powerpc/ibm4xx/dev/malvar.h>
86 #include <powerpc/ibm4xx/dev/opbreg.h>
87 #include <powerpc/ibm4xx/dev/opbvar.h>
88 #include <powerpc/ibm4xx/dev/plbvar.h>
89 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
90 #include <powerpc/ibm4xx/dev/rmiivar.h>
91 #endif
92
93 #include <dev/mii/miivar.h>
94
95 #include "locators.h"
96
97
98 /*
99 * Transmit descriptor list size. There are two Tx channels, each with
100 * up to 256 hardware descriptors available. We currently use one Tx
101 * channel. We tell the upper layers that they can queue a lot of
102 * packets, and we go ahead and manage up to 64 of them at a time. We
103 * allow up to 16 DMA segments per packet.
104 */
105 #define EMAC_NTXSEGS 16
106 #define EMAC_TXQUEUELEN 64
107 #define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1)
108 #define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4)
109 #define EMAC_NTXDESC 256
110 #define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1)
111 #define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK)
112 #define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK)
113
114 /*
115 * Receive descriptor list size. There is one Rx channel with up to 256
116 * hardware descriptors available. We allocate 64 receive descriptors,
117 * each with a 2k buffer (MCLBYTES).
118 */
119 #define EMAC_NRXDESC 64
120 #define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1)
121 #define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK)
122 #define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK)
123
124 /*
125 * Transmit/receive descriptors that are DMA'd to the EMAC.
126 */
127 struct emac_control_data {
128 struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
129 struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
130 };
131
132 #define EMAC_CDOFF(x) offsetof(struct emac_control_data, x)
133 #define EMAC_CDTXOFF(x) EMAC_CDOFF(ecd_txdesc[(x)])
134 #define EMAC_CDRXOFF(x) EMAC_CDOFF(ecd_rxdesc[(x)])
135
136 /*
137 * Software state for transmit jobs.
138 */
139 struct emac_txsoft {
140 struct mbuf *txs_mbuf; /* head of mbuf chain */
141 bus_dmamap_t txs_dmamap; /* our DMA map */
142 int txs_firstdesc; /* first descriptor in packet */
143 int txs_lastdesc; /* last descriptor in packet */
144 int txs_ndesc; /* # of descriptors used */
145 };
146
147 /*
148 * Software state for receive descriptors.
149 */
150 struct emac_rxsoft {
151 struct mbuf *rxs_mbuf; /* head of mbuf chain */
152 bus_dmamap_t rxs_dmamap; /* our DMA map */
153 };
154
155 /*
156 * Software state per device.
157 */
158 struct emac_softc {
159 device_t sc_dev; /* generic device information */
160 int sc_instance; /* instance no. */
161 bus_space_tag_t sc_st; /* bus space tag */
162 bus_space_handle_t sc_sh; /* bus space handle */
163 bus_dma_tag_t sc_dmat; /* bus DMA tag */
164 struct ethercom sc_ethercom; /* ethernet common data */
165 void *sc_sdhook; /* shutdown hook */
166 void *sc_powerhook; /* power management hook */
167
168 struct mii_data sc_mii; /* MII/media information */
169 struct callout sc_callout; /* tick callout */
170
171 uint32_t sc_mr1; /* copy of Mode Register 1 */
172 uint32_t sc_stacr_read; /* Read opcode of STAOPC of STACR */
173 uint32_t sc_stacr_write; /* Write opcode of STAOPC of STACR */
174 uint32_t sc_stacr_bits; /* misc bits of STACR */
175 bool sc_stacr_completed; /* Operation completed of STACR */
176 int sc_htsize; /* Hash Table size */
177
178 bus_dmamap_t sc_cddmamap; /* control data dma map */
179 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
180
181 /* Software state for transmit/receive descriptors. */
182 struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
183 struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
184
185 /* Control data structures. */
186 struct emac_control_data *sc_control_data;
187 #define sc_txdescs sc_control_data->ecd_txdesc
188 #define sc_rxdescs sc_control_data->ecd_rxdesc
189
190 #ifdef EMAC_EVENT_COUNTERS
191 struct evcnt sc_ev_rxintr; /* Rx interrupts */
192 struct evcnt sc_ev_txintr; /* Tx interrupts */
193 struct evcnt sc_ev_rxde; /* Rx descriptor interrupts */
194 struct evcnt sc_ev_txde; /* Tx descriptor interrupts */
195 struct evcnt sc_ev_intr; /* General EMAC interrupts */
196
197 struct evcnt sc_ev_txreap; /* Calls to Tx descriptor reaper */
198 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
199 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
200 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
201 struct evcnt sc_ev_tu; /* Tx underrun */
202 #endif /* EMAC_EVENT_COUNTERS */
203
204 int sc_txfree; /* number of free Tx descriptors */
205 int sc_txnext; /* next ready Tx descriptor */
206
207 int sc_txsfree; /* number of free Tx jobs */
208 int sc_txsnext; /* next ready Tx job */
209 int sc_txsdirty; /* dirty Tx jobs */
210
211 int sc_rxptr; /* next ready RX descriptor/descsoft */
212
213 void (*sc_rmii_enable)(device_t, int); /* reduced MII enable */
214 void (*sc_rmii_disable)(device_t, int); /* reduced MII disable*/
215 void (*sc_rmii_speed)(device_t, int, int); /* reduced MII speed */
216 };
217
218 #ifdef EMAC_EVENT_COUNTERS
219 #define EMAC_EVCNT_INCR(ev) (ev)->ev_count++
220 #else
221 #define EMAC_EVCNT_INCR(ev) /* nothing */
222 #endif
223
224 #define EMAC_CDTXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDTXOFF((x)))
225 #define EMAC_CDRXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDRXOFF((x)))
226
227 #define EMAC_CDTXSYNC(sc, x, n, ops) \
228 do { \
229 int __x, __n; \
230 \
231 __x = (x); \
232 __n = (n); \
233 \
234 /* If it will wrap around, sync to the end of the ring. */ \
235 if ((__x + __n) > EMAC_NTXDESC) { \
236 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
237 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * \
238 (EMAC_NTXDESC - __x), (ops)); \
239 __n -= (EMAC_NTXDESC - __x); \
240 __x = 0; \
241 } \
242 \
243 /* Now sync whatever is left. */ \
244 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
245 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
246 } while (/*CONSTCOND*/0)
247
248 #define EMAC_CDRXSYNC(sc, x, ops) \
249 do { \
250 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
251 EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops)); \
252 } while (/*CONSTCOND*/0)
253
254 #define EMAC_INIT_RXDESC(sc, x) \
255 do { \
256 struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
257 struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)]; \
258 struct mbuf *__m = __rxs->rxs_mbuf; \
259 \
260 /* \
261 * Note: We scoot the packet forward 2 bytes in the buffer \
262 * so that the payload after the Ethernet header is aligned \
263 * to a 4-byte boundary. \
264 */ \
265 __m->m_data = __m->m_ext.ext_buf + 2; \
266 \
267 __rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2; \
268 __rxd->md_data_len = __m->m_ext.ext_size - 2; \
269 __rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT | \
270 /* Set wrap on last descriptor. */ \
271 (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0); \
272 EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
273 } while (/*CONSTCOND*/0)
274
275 #define EMAC_WRITE(sc, reg, val) \
276 bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
277 #define EMAC_READ(sc, reg) \
278 bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
279
280 #define EMAC_SET_FILTER(aht, crc) \
281 do { \
282 (aht)[3 - (((crc) >> 26) >> 4)] |= 1 << (((crc) >> 26) & 0xf); \
283 } while (/*CONSTCOND*/0)
284 #define EMAC_SET_FILTER256(aht, crc) \
285 do { \
286 (aht)[7 - (((crc) >> 24) >> 5)] |= 1 << (((crc) >> 24) & 0x1f); \
287 } while (/*CONSTCOND*/0)
288
289 static int emac_match(device_t, cfdata_t, void *);
290 static void emac_attach(device_t, device_t, void *);
291
292 static int emac_intr(void *);
293 static void emac_shutdown(void *);
294
295 static void emac_start(struct ifnet *);
296 static int emac_ioctl(struct ifnet *, u_long, void *);
297 static int emac_init(struct ifnet *);
298 static void emac_stop(struct ifnet *, int);
299 static void emac_watchdog(struct ifnet *);
300
301 static int emac_add_rxbuf(struct emac_softc *, int);
302 static void emac_rxdrain(struct emac_softc *);
303 static int emac_set_filter(struct emac_softc *);
304 static int emac_txreap(struct emac_softc *);
305
306 static void emac_soft_reset(struct emac_softc *);
307 static void emac_smart_reset(struct emac_softc *);
308
309 static int emac_mii_readreg(device_t, int, int, uint16_t *);
310 static int emac_mii_writereg(device_t, int, int, uint16_t);
311 static void emac_mii_statchg(struct ifnet *);
312 static uint32_t emac_mii_wait(struct emac_softc *);
313 static void emac_mii_tick(void *);
314
315 int emac_copy_small = 0;
316
317 CFATTACH_DECL_NEW(emac, sizeof(struct emac_softc),
318 emac_match, emac_attach, NULL, NULL);
319
320
321 static int
322 emac_match(device_t parent, cfdata_t cf, void *aux)
323 {
324 struct opb_attach_args *oaa = aux;
325
326 /* match only on-chip ethernet devices */
327 if (strcmp(oaa->opb_name, cf->cf_name) == 0)
328 return 1;
329
330 return 0;
331 }
332
333 static void
334 emac_attach(device_t parent, device_t self, void *aux)
335 {
336 struct opb_attach_args *oaa = aux;
337 struct emac_softc *sc = device_private(self);
338 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
339 struct mii_data *mii = &sc->sc_mii;
340 const char * xname = device_xname(self);
341 bus_dma_segment_t seg;
342 int error, i, nseg, opb_freq, opbc, mii_phy = MII_PHY_ANY;
343 const uint8_t *enaddr;
344 prop_dictionary_t dict = device_properties(self);
345 prop_data_t ea;
346
347 bus_space_map(oaa->opb_bt, oaa->opb_addr, EMAC_NREG, 0, &sc->sc_sh);
348
349 sc->sc_dev = self;
350 sc->sc_instance = oaa->opb_instance;
351 sc->sc_st = oaa->opb_bt;
352 sc->sc_dmat = oaa->opb_dmat;
353
354 callout_init(&sc->sc_callout, 0);
355
356 aprint_naive("\n");
357 aprint_normal(": Ethernet Media Access Controller\n");
358
359 /* Fetch the Ethernet address. */
360 ea = prop_dictionary_get(dict, "mac-address");
361 if (ea == NULL) {
362 aprint_error_dev(self, "unable to get mac-address property\n");
363 return;
364 }
365 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
366 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
367 enaddr = prop_data_data_nocopy(ea);
368 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
369
370 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
371 /* Fetch the MII offset. */
372 prop_dictionary_get_uint32(dict, "mii-phy", &mii_phy);
373
374 #ifdef EMAC_ZMII_PHY
375 if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_ZMII)
376 zmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
377 &sc->sc_rmii_disable, &sc->sc_rmii_speed);
378 #endif
379 #ifdef EMAC_RGMII_PHY
380 if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_RGMII)
381 rgmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
382 &sc->sc_rmii_disable, &sc->sc_rmii_speed);
383 #endif
384 #endif
385
386 /*
387 * Allocate the control data structures, and create and load the
388 * DMA map for it.
389 */
390 if ((error = bus_dmamem_alloc(sc->sc_dmat,
391 sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
392 aprint_error_dev(self,
393 "unable to allocate control data, error = %d\n", error);
394 goto fail_0;
395 }
396
397 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
398 sizeof(struct emac_control_data), (void **)&sc->sc_control_data,
399 BUS_DMA_COHERENT)) != 0) {
400 aprint_error_dev(self,
401 "unable to map control data, error = %d\n", error);
402 goto fail_1;
403 }
404
405 if ((error = bus_dmamap_create(sc->sc_dmat,
406 sizeof(struct emac_control_data), 1,
407 sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
408 aprint_error_dev(self,
409 "unable to create control data DMA map, error = %d\n",
410 error);
411 goto fail_2;
412 }
413
414 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
415 sc->sc_control_data, sizeof(struct emac_control_data), NULL,
416 0)) != 0) {
417 aprint_error_dev(self,
418 "unable to load control data DMA map, error = %d\n", error);
419 goto fail_3;
420 }
421
422 /*
423 * Create the transmit buffer DMA maps.
424 */
425 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
426 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
427 EMAC_NTXSEGS, MCLBYTES, 0, 0,
428 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
429 aprint_error_dev(self,
430 "unable to create tx DMA map %d, error = %d\n",
431 i, error);
432 goto fail_4;
433 }
434 }
435
436 /*
437 * Create the receive buffer DMA maps.
438 */
439 for (i = 0; i < EMAC_NRXDESC; i++) {
440 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
441 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
442 aprint_error_dev(self,
443 "unable to create rx DMA map %d, error = %d\n",
444 i, error);
445 goto fail_5;
446 }
447 sc->sc_rxsoft[i].rxs_mbuf = NULL;
448 }
449
450 /* Soft Reset the EMAC. The chip to a known state. */
451 emac_soft_reset(sc);
452
453 opb_freq = opb_get_frequency();
454 switch (opb_freq) {
455 case 50000000: opbc = STACR_OPBC_50MHZ; break;
456 case 66666666: opbc = STACR_OPBC_66MHZ; break;
457 case 83333333: opbc = STACR_OPBC_83MHZ; break;
458 case 100000000: opbc = STACR_OPBC_100MHZ; break;
459
460 default:
461 if (opb_freq > 100000000) {
462 opbc = STACR_OPBC_A100MHZ;
463 break;
464 }
465 aprint_error_dev(self, "unsupport OPB frequency %dMHz\n",
466 opb_freq / 1000 / 1000);
467 goto fail_5;
468 }
469 if (oaa->opb_flags & OPB_FLAGS_EMAC_GBE) {
470 sc->sc_mr1 =
471 MR1_RFS_GBE(MR1__FS_16KB) |
472 MR1_TFS_GBE(MR1__FS_16KB) |
473 MR1_TR0_MULTIPLE |
474 MR1_OBCI(opbc);
475 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
476
477 if (oaa->opb_flags & OPB_FLAGS_EMAC_STACV2) {
478 sc->sc_stacr_read = STACR_STAOPC_READ;
479 sc->sc_stacr_write = STACR_STAOPC_WRITE;
480 sc->sc_stacr_bits = STACR_OC;
481 sc->sc_stacr_completed = false;
482 } else {
483 sc->sc_stacr_read = STACR_READ;
484 sc->sc_stacr_write = STACR_WRITE;
485 sc->sc_stacr_completed = true;
486 }
487 } else {
488 /*
489 * Set up Mode Register 1 - set receive and transmit FIFOs to
490 * maximum size, allow transmit of multiple packets (only
491 * channel 0 is used).
492 *
493 * XXX: Allow pause packets??
494 */
495 sc->sc_mr1 =
496 MR1_RFS(MR1__FS_4KB) |
497 MR1_TFS(MR1__FS_2KB) |
498 MR1_TR0_MULTIPLE;
499
500 sc->sc_stacr_read = STACR_READ;
501 sc->sc_stacr_write = STACR_WRITE;
502 sc->sc_stacr_bits = STACR_OPBC(opbc);
503 sc->sc_stacr_completed = true;
504 }
505
506 intr_establish(oaa->opb_irq, IST_LEVEL, IPL_NET, emac_intr, sc);
507 mal_intr_establish(sc->sc_instance, sc);
508
509 if (oaa->opb_flags & OPB_FLAGS_EMAC_HT256)
510 sc->sc_htsize = 256;
511 else
512 sc->sc_htsize = 64;
513
514 /* Clear all interrupts */
515 EMAC_WRITE(sc, EMAC_ISR, ISR_ALL);
516
517 /*
518 * Initialise the media structures.
519 */
520 mii->mii_ifp = ifp;
521 mii->mii_readreg = emac_mii_readreg;
522 mii->mii_writereg = emac_mii_writereg;
523 mii->mii_statchg = emac_mii_statchg;
524
525 sc->sc_ethercom.ec_mii = mii;
526 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
527 mii_attach(self, mii, 0xffffffff, mii_phy, MII_OFFSET_ANY,
528 MIIF_DOPAUSE);
529 if (LIST_FIRST(&mii->mii_phys) == NULL) {
530 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
531 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
532 } else
533 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
534
535 ifp = &sc->sc_ethercom.ec_if;
536 strcpy(ifp->if_xname, xname);
537 ifp->if_softc = sc;
538 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
539 ifp->if_start = emac_start;
540 ifp->if_ioctl = emac_ioctl;
541 ifp->if_init = emac_init;
542 ifp->if_stop = emac_stop;
543 ifp->if_watchdog = emac_watchdog;
544 IFQ_SET_READY(&ifp->if_snd);
545
546 /*
547 * We can support 802.1Q VLAN-sized frames.
548 */
549 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
550
551 /*
552 * Attach the interface.
553 */
554 if_attach(ifp);
555 if_deferred_start_init(ifp, NULL);
556 ether_ifattach(ifp, enaddr);
557
558 #ifdef EMAC_EVENT_COUNTERS
559 /*
560 * Attach the event counters.
561 */
562 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
563 NULL, xname, "txintr");
564 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
565 NULL, xname, "rxintr");
566 evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
567 NULL, xname, "txde");
568 evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
569 NULL, xname, "rxde");
570 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
571 NULL, xname, "intr");
572
573 evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
574 NULL, xname, "txreap");
575 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
576 NULL, xname, "txsstall");
577 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
578 NULL, xname, "txdstall");
579 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
580 NULL, xname, "txdrop");
581 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
582 NULL, xname, "tu");
583 #endif /* EMAC_EVENT_COUNTERS */
584
585 /*
586 * Make sure the interface is shutdown during reboot.
587 */
588 sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
589 if (sc->sc_sdhook == NULL)
590 aprint_error_dev(self,
591 "WARNING: unable to establish shutdown hook\n");
592
593 return;
594
595 /*
596 * Free any resources we've allocated during the failed attach
597 * attempt. Do this in reverse order and fall through.
598 */
599 fail_5:
600 for (i = 0; i < EMAC_NRXDESC; i++) {
601 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
602 bus_dmamap_destroy(sc->sc_dmat,
603 sc->sc_rxsoft[i].rxs_dmamap);
604 }
605 fail_4:
606 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
607 if (sc->sc_txsoft[i].txs_dmamap != NULL)
608 bus_dmamap_destroy(sc->sc_dmat,
609 sc->sc_txsoft[i].txs_dmamap);
610 }
611 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
612 fail_3:
613 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
614 fail_2:
615 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
616 sizeof(struct emac_control_data));
617 fail_1:
618 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
619 fail_0:
620 return;
621 }
622
623 /*
624 * EMAC General interrupt handler
625 */
626 static int
627 emac_intr(void *arg)
628 {
629 struct emac_softc *sc = arg;
630 uint32_t status;
631
632 EMAC_EVCNT_INCR(&sc->sc_ev_intr);
633 status = EMAC_READ(sc, EMAC_ISR);
634
635 /* Clear the interrupt status bits. */
636 EMAC_WRITE(sc, EMAC_ISR, status);
637
638 return 1;
639 }
640
641 static void
642 emac_shutdown(void *arg)
643 {
644 struct emac_softc *sc = arg;
645
646 emac_stop(&sc->sc_ethercom.ec_if, 0);
647 }
648
649
650 /*
651 * ifnet interface functions
652 */
653
654 static void
655 emac_start(struct ifnet *ifp)
656 {
657 struct emac_softc *sc = ifp->if_softc;
658 struct mbuf *m0;
659 struct emac_txsoft *txs;
660 bus_dmamap_t dmamap;
661 int error, firsttx, nexttx, lasttx, ofree, seg;
662
663 lasttx = 0; /* XXX gcc */
664
665 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
666 return;
667
668 /*
669 * Remember the previous number of free descriptors.
670 */
671 ofree = sc->sc_txfree;
672
673 /*
674 * Loop through the send queue, setting up transmit descriptors
675 * until we drain the queue, or use up all available transmit
676 * descriptors.
677 */
678 for (;;) {
679 /* Grab a packet off the queue. */
680 IFQ_POLL(&ifp->if_snd, m0);
681 if (m0 == NULL)
682 break;
683
684 /*
685 * Get a work queue entry. Reclaim used Tx descriptors if
686 * we are running low.
687 */
688 if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
689 emac_txreap(sc);
690 if (sc->sc_txsfree == 0) {
691 EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
692 break;
693 }
694 }
695
696 txs = &sc->sc_txsoft[sc->sc_txsnext];
697 dmamap = txs->txs_dmamap;
698
699 /*
700 * Load the DMA map. If this fails, the packet either
701 * didn't fit in the alloted number of segments, or we
702 * were short on resources. In this case, we'll copy
703 * and try again.
704 */
705 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
706 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
707 if (error) {
708 if (error == EFBIG) {
709 EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
710 aprint_error_ifnet(ifp,
711 "Tx packet consumes too many "
712 "DMA segments, dropping...\n");
713 IFQ_DEQUEUE(&ifp->if_snd, m0);
714 m_freem(m0);
715 continue;
716 }
717 /* Short on resources, just stop for now. */
718 break;
719 }
720
721 /*
722 * Ensure we have enough descriptors free to describe
723 * the packet.
724 */
725 if (dmamap->dm_nsegs > sc->sc_txfree) {
726 /*
727 * Not enough free descriptors to transmit this
728 * packet. We haven't committed anything yet,
729 * so just unload the DMA map, put the packet
730 * back on the queue, and punt. Notify the upper
731 * layer that there are not more slots left.
732 *
733 */
734 ifp->if_flags |= IFF_OACTIVE;
735 bus_dmamap_unload(sc->sc_dmat, dmamap);
736 EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
737 break;
738 }
739
740 IFQ_DEQUEUE(&ifp->if_snd, m0);
741
742 /*
743 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
744 */
745
746 /* Sync the DMA map. */
747 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
748 BUS_DMASYNC_PREWRITE);
749
750 /*
751 * Store a pointer to the packet so that we can free it
752 * later.
753 */
754 txs->txs_mbuf = m0;
755 txs->txs_firstdesc = sc->sc_txnext;
756 txs->txs_ndesc = dmamap->dm_nsegs;
757
758 /*
759 * Initialize the transmit descriptor.
760 */
761 firsttx = sc->sc_txnext;
762 for (nexttx = sc->sc_txnext, seg = 0;
763 seg < dmamap->dm_nsegs;
764 seg++, nexttx = EMAC_NEXTTX(nexttx)) {
765 struct mal_descriptor *txdesc =
766 &sc->sc_txdescs[nexttx];
767
768 /*
769 * If this is the first descriptor we're
770 * enqueueing, don't set the TX_READY bit just
771 * yet. That could cause a race condition.
772 * We'll do it below.
773 */
774 txdesc->md_data = dmamap->dm_segs[seg].ds_addr;
775 txdesc->md_data_len = dmamap->dm_segs[seg].ds_len;
776 txdesc->md_stat_ctrl =
777 (txdesc->md_stat_ctrl & MAL_TX_WRAP) |
778 (nexttx == firsttx ? 0 : MAL_TX_READY) |
779 EMAC_TXC_GFCS | EMAC_TXC_GPAD;
780 lasttx = nexttx;
781 }
782
783 /* Set the LAST bit on the last segment. */
784 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
785
786 /*
787 * Set up last segment descriptor to send an interrupt after
788 * that descriptor is transmitted, and bypass existing Tx
789 * descriptor reaping method (for now...).
790 */
791 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT;
792
793
794 txs->txs_lastdesc = lasttx;
795
796 /* Sync the descriptors we're using. */
797 EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
798 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
799
800 /*
801 * The entire packet chain is set up. Give the
802 * first descriptor to the chip now.
803 */
804 sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
805 EMAC_CDTXSYNC(sc, firsttx, 1,
806 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
807 /*
808 * Tell the EMAC that a new packet is available.
809 */
810 EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0 | TMR0_TFAE_2);
811
812 /* Advance the tx pointer. */
813 sc->sc_txfree -= txs->txs_ndesc;
814 sc->sc_txnext = nexttx;
815
816 sc->sc_txsfree--;
817 sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
818
819 /*
820 * Pass the packet to any BPF listeners.
821 */
822 bpf_mtap(ifp, m0, BPF_D_OUT);
823 }
824
825 if (sc->sc_txfree == 0)
826 /* No more slots left; notify upper layer. */
827 ifp->if_flags |= IFF_OACTIVE;
828
829 if (sc->sc_txfree != ofree)
830 /* Set a watchdog timer in case the chip flakes out. */
831 ifp->if_timer = 5;
832 }
833
834 static int
835 emac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
836 {
837 struct emac_softc *sc = ifp->if_softc;
838 int s, error;
839
840 s = splnet();
841
842 switch (cmd) {
843 case SIOCSIFMTU:
844 {
845 struct ifreq *ifr = (struct ifreq *)data;
846 int maxmtu;
847
848 if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU)
849 maxmtu = EMAC_MAX_MTU;
850 else
851 maxmtu = ETHERMTU;
852
853 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu)
854 error = EINVAL;
855 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
856 break;
857 else if (ifp->if_flags & IFF_UP)
858 error = emac_init(ifp);
859 else
860 error = 0;
861 break;
862 }
863
864 default:
865 error = ether_ioctl(ifp, cmd, data);
866 if (error == ENETRESET) {
867 /*
868 * Multicast list has changed; set the hardware filter
869 * accordingly.
870 */
871 if (ifp->if_flags & IFF_RUNNING)
872 error = emac_set_filter(sc);
873 else
874 error = 0;
875 }
876 }
877
878 /* try to get more packets going */
879 emac_start(ifp);
880
881 splx(s);
882 return error;
883 }
884
885 static int
886 emac_init(struct ifnet *ifp)
887 {
888 struct emac_softc *sc = ifp->if_softc;
889 struct emac_rxsoft *rxs;
890 const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
891 int error, i;
892
893 error = 0;
894
895 /* Cancel any pending I/O. */
896 emac_stop(ifp, 0);
897
898 /* Reset the chip to a known state. */
899 emac_soft_reset(sc);
900
901 /*
902 * Initialise the transmit descriptor ring.
903 */
904 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
905 /* set wrap on last descriptor */
906 sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
907 EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
908 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
909 sc->sc_txfree = EMAC_NTXDESC;
910 sc->sc_txnext = 0;
911
912 /*
913 * Initialise the transmit job descriptors.
914 */
915 for (i = 0; i < EMAC_TXQUEUELEN; i++)
916 sc->sc_txsoft[i].txs_mbuf = NULL;
917 sc->sc_txsfree = EMAC_TXQUEUELEN;
918 sc->sc_txsnext = 0;
919 sc->sc_txsdirty = 0;
920
921 /*
922 * Initialise the receiver descriptor and receive job
923 * descriptor rings.
924 */
925 for (i = 0; i < EMAC_NRXDESC; i++) {
926 rxs = &sc->sc_rxsoft[i];
927 if (rxs->rxs_mbuf == NULL) {
928 if ((error = emac_add_rxbuf(sc, i)) != 0) {
929 aprint_error_ifnet(ifp,
930 "unable to allocate or map rx buffer %d,"
931 " error = %d\n",
932 i, error);
933 /*
934 * XXX Should attempt to run with fewer receive
935 * XXX buffers instead of just failing.
936 */
937 emac_rxdrain(sc);
938 goto out;
939 }
940 } else
941 EMAC_INIT_RXDESC(sc, i);
942 }
943 sc->sc_rxptr = 0;
944
945 /*
946 * Set the current media.
947 */
948 if ((error = ether_mediachange(ifp)) != 0)
949 goto out;
950
951 /*
952 * Load the MAC address.
953 */
954 EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
955 EMAC_WRITE(sc, EMAC_IALR,
956 enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
957
958 /* Enable the transmit and receive channel on the MAL. */
959 error = mal_start(sc->sc_instance,
960 EMAC_CDTXADDR(sc, 0), EMAC_CDRXADDR(sc, 0));
961 if (error)
962 goto out;
963
964 sc->sc_mr1 &= ~MR1_JPSM;
965 if (ifp->if_mtu > ETHERMTU)
966 /* Enable Jumbo Packet Support Mode */
967 sc->sc_mr1 |= MR1_JPSM;
968
969 /* Set fifos, media modes. */
970 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
971
972 /*
973 * Enable Individual and (possibly) Broadcast Address modes,
974 * runt packets, and strip padding.
975 */
976 EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP | RMR_TFAE_2 |
977 (ifp->if_flags & IFF_PROMISC ? RMR_PME : 0) |
978 (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
979
980 /*
981 * Set multicast filter.
982 */
983 emac_set_filter(sc);
984
985 /*
986 * Set low- and urgent-priority request thresholds.
987 */
988 EMAC_WRITE(sc, EMAC_TMR1,
989 ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
990 ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
991 /*
992 * Set Transmit Request Threshold Register.
993 */
994 EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
995
996 /*
997 * Set high and low receive watermarks.
998 */
999 EMAC_WRITE(sc, EMAC_RWMR,
1000 30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
1001
1002 /*
1003 * Set frame gap.
1004 */
1005 EMAC_WRITE(sc, EMAC_IPGVR, 8);
1006
1007 /*
1008 * Set interrupt status enable bits for EMAC.
1009 */
1010 EMAC_WRITE(sc, EMAC_ISER,
1011 ISR_TXPE | /* TX Parity Error */
1012 ISR_RXPE | /* RX Parity Error */
1013 ISR_TXUE | /* TX Underrun Event */
1014 ISR_RXOE | /* RX Overrun Event */
1015 ISR_OVR | /* Overrun Error */
1016 ISR_PP | /* Pause Packet */
1017 ISR_BP | /* Bad Packet */
1018 ISR_RP | /* Runt Packet */
1019 ISR_SE | /* Short Event */
1020 ISR_ALE | /* Alignment Error */
1021 ISR_BFCS | /* Bad FCS */
1022 ISR_PTLE | /* Packet Too Long Error */
1023 ISR_ORE | /* Out of Range Error */
1024 ISR_IRE | /* In Range Error */
1025 ISR_SE0 | /* Signal Quality Error 0 (SQE) */
1026 ISR_TE0 | /* Transmit Error 0 */
1027 ISR_MOS | /* MMA Operation Succeeded */
1028 ISR_MOF); /* MMA Operation Failed */
1029
1030 /*
1031 * Enable the transmit and receive channel on the EMAC.
1032 */
1033 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1034
1035 /*
1036 * Start the one second MII clock.
1037 */
1038 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1039
1040 /*
1041 * ... all done!
1042 */
1043 ifp->if_flags |= IFF_RUNNING;
1044 ifp->if_flags &= ~IFF_OACTIVE;
1045
1046 out:
1047 if (error) {
1048 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1049 ifp->if_timer = 0;
1050 aprint_error_ifnet(ifp, "interface not running\n");
1051 }
1052 return error;
1053 }
1054
1055 static void
1056 emac_stop(struct ifnet *ifp, int disable)
1057 {
1058 struct emac_softc *sc = ifp->if_softc;
1059 struct emac_txsoft *txs;
1060 int i;
1061
1062 /* Stop the one second clock. */
1063 callout_stop(&sc->sc_callout);
1064
1065 /* Down the MII */
1066 mii_down(&sc->sc_mii);
1067
1068 /* Disable interrupts. */
1069 EMAC_WRITE(sc, EMAC_ISER, 0);
1070
1071 /* Disable the receive and transmit channels. */
1072 mal_stop(sc->sc_instance);
1073
1074 /* Disable the transmit enable and receive MACs. */
1075 EMAC_WRITE(sc, EMAC_MR0,
1076 EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
1077
1078 /* Release any queued transmit buffers. */
1079 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
1080 txs = &sc->sc_txsoft[i];
1081 if (txs->txs_mbuf != NULL) {
1082 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1083 m_freem(txs->txs_mbuf);
1084 txs->txs_mbuf = NULL;
1085 }
1086 }
1087
1088 if (disable)
1089 emac_rxdrain(sc);
1090
1091 /*
1092 * Mark the interface down and cancel the watchdog timer.
1093 */
1094 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1095 ifp->if_timer = 0;
1096 }
1097
1098 static void
1099 emac_watchdog(struct ifnet *ifp)
1100 {
1101 struct emac_softc *sc = ifp->if_softc;
1102
1103 /*
1104 * Since we're not interrupting every packet, sweep
1105 * up before we report an error.
1106 */
1107 emac_txreap(sc);
1108
1109 if (sc->sc_txfree != EMAC_NTXDESC) {
1110 aprint_error_ifnet(ifp,
1111 "device timeout (txfree %d txsfree %d txnext %d)\n",
1112 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
1113 if_statinc(ifp, if_oerrors);
1114
1115 /* Reset the interface. */
1116 (void)emac_init(ifp);
1117 } else if (ifp->if_flags & IFF_DEBUG)
1118 aprint_error_ifnet(ifp, "recovered from device timeout\n");
1119
1120 /* try to get more packets going */
1121 emac_start(ifp);
1122 }
1123
1124 static int
1125 emac_add_rxbuf(struct emac_softc *sc, int idx)
1126 {
1127 struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
1128 struct mbuf *m;
1129 int error;
1130
1131 MGETHDR(m, M_DONTWAIT, MT_DATA);
1132 if (m == NULL)
1133 return ENOBUFS;
1134
1135 MCLGET(m, M_DONTWAIT);
1136 if ((m->m_flags & M_EXT) == 0) {
1137 m_freem(m);
1138 return ENOBUFS;
1139 }
1140
1141 if (rxs->rxs_mbuf != NULL)
1142 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1143
1144 rxs->rxs_mbuf = m;
1145
1146 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1147 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1148 if (error) {
1149 aprint_error_dev(sc->sc_dev,
1150 "can't load rx DMA map %d, error = %d\n", idx, error);
1151 panic("emac_add_rxbuf"); /* XXX */
1152 }
1153
1154 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1155 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1156
1157 EMAC_INIT_RXDESC(sc, idx);
1158
1159 return 0;
1160 }
1161
1162 static void
1163 emac_rxdrain(struct emac_softc *sc)
1164 {
1165 struct emac_rxsoft *rxs;
1166 int i;
1167
1168 for (i = 0; i < EMAC_NRXDESC; i++) {
1169 rxs = &sc->sc_rxsoft[i];
1170 if (rxs->rxs_mbuf != NULL) {
1171 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1172 m_freem(rxs->rxs_mbuf);
1173 rxs->rxs_mbuf = NULL;
1174 }
1175 }
1176 }
1177
1178 static int
1179 emac_set_filter(struct emac_softc *sc)
1180 {
1181 struct ethercom *ec = &sc->sc_ethercom;
1182 struct ether_multistep step;
1183 struct ether_multi *enm;
1184 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1185 uint32_t rmr, crc, mask, tmp, reg, gaht[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
1186 int regs, cnt = 0, i;
1187
1188 if (sc->sc_htsize == 256) {
1189 reg = EMAC_GAHT256(0);
1190 regs = 8;
1191 } else {
1192 reg = EMAC_GAHT64(0);
1193 regs = 4;
1194 }
1195 mask = (1ULL << (sc->sc_htsize / regs)) - 1;
1196
1197 rmr = EMAC_READ(sc, EMAC_RMR);
1198 rmr &= ~(RMR_PMME | RMR_MAE);
1199 ifp->if_flags &= ~IFF_ALLMULTI;
1200
1201 ETHER_LOCK(ec);
1202 ETHER_FIRST_MULTI(step, ec, enm);
1203 while (enm != NULL) {
1204 if (memcmp(enm->enm_addrlo,
1205 enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1206 /*
1207 * We must listen to a range of multicast addresses.
1208 * For now, just accept all multicasts, rather than
1209 * trying to set only those filter bits needed to match
1210 * the range. (At this time, the only use of address
1211 * ranges is for IP multicast routing, for which the
1212 * range is big enough to require all bits set.)
1213 */
1214 gaht[0] = gaht[1] = gaht[2] = gaht[3] =
1215 gaht[4] = gaht[5] = gaht[6] = gaht[7] = mask;
1216 break;
1217 }
1218
1219 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1220
1221 if (sc->sc_htsize == 256)
1222 EMAC_SET_FILTER256(gaht, crc);
1223 else
1224 EMAC_SET_FILTER(gaht, crc);
1225
1226 ETHER_NEXT_MULTI(step, enm);
1227 cnt++;
1228 }
1229 ETHER_UNLOCK(ec);
1230
1231 for (i = 1, tmp = gaht[0]; i < regs; i++)
1232 tmp &= gaht[i];
1233 if (tmp == mask) {
1234 /* All categories are true. */
1235 ifp->if_flags |= IFF_ALLMULTI;
1236 rmr |= RMR_PMME;
1237 } else if (cnt != 0) {
1238 /* Some categories are true. */
1239 for (i = 0; i < regs; i++)
1240 EMAC_WRITE(sc, reg + (i << 2), gaht[i]);
1241 rmr |= RMR_MAE;
1242 }
1243 EMAC_WRITE(sc, EMAC_RMR, rmr);
1244
1245 return 0;
1246 }
1247
1248 /*
1249 * Reap completed Tx descriptors.
1250 */
1251 static int
1252 emac_txreap(struct emac_softc *sc)
1253 {
1254 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1255 struct emac_txsoft *txs;
1256 int handled, i;
1257 uint32_t txstat;
1258
1259 EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1260 handled = 0;
1261
1262 ifp->if_flags &= ~IFF_OACTIVE;
1263
1264 /*
1265 * Go through our Tx list and free mbufs for those
1266 * frames that have been transmitted.
1267 */
1268 for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1269 i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1270 txs = &sc->sc_txsoft[i];
1271
1272 EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1273 txs->txs_dmamap->dm_nsegs,
1274 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1275
1276 txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1277 if (txstat & MAL_TX_READY)
1278 break;
1279
1280 handled = 1;
1281
1282 /*
1283 * Check for errors and collisions.
1284 */
1285 if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1286 if_statinc(ifp, if_oerrors);
1287
1288 #ifdef EMAC_EVENT_COUNTERS
1289 if (txstat & EMAC_TXS_UR)
1290 EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1291 #endif /* EMAC_EVENT_COUNTERS */
1292
1293 if (txstat &
1294 (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1295 if (txstat & EMAC_TXS_EC)
1296 if_statadd(ifp, if_collisions, 16);
1297 else if (txstat & EMAC_TXS_MC)
1298 if_statadd(ifp, if_collisions, 2); /* XXX? */
1299 else if (txstat & EMAC_TXS_SC)
1300 if_statinc(ifp, if_collisions);
1301 if (txstat & EMAC_TXS_LC)
1302 if_statinc(ifp, if_collisions);
1303 } else
1304 if_statinc(ifp, if_opackets);
1305
1306 if (ifp->if_flags & IFF_DEBUG) {
1307 if (txstat & EMAC_TXS_ED)
1308 aprint_error_ifnet(ifp, "excessive deferral\n");
1309 if (txstat & EMAC_TXS_EC)
1310 aprint_error_ifnet(ifp,
1311 "excessive collisions\n");
1312 }
1313
1314 sc->sc_txfree += txs->txs_ndesc;
1315 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1316 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1317 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1318 m_freem(txs->txs_mbuf);
1319 txs->txs_mbuf = NULL;
1320 }
1321
1322 /* Update the dirty transmit buffer pointer. */
1323 sc->sc_txsdirty = i;
1324
1325 /*
1326 * If there are no more pending transmissions, cancel the watchdog
1327 * timer.
1328 */
1329 if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1330 ifp->if_timer = 0;
1331
1332 return handled;
1333 }
1334
1335
1336 /*
1337 * Reset functions
1338 */
1339
1340 static void
1341 emac_soft_reset(struct emac_softc *sc)
1342 {
1343 uint32_t sdr;
1344 int t = 0;
1345
1346 /*
1347 * The PHY must provide a TX Clk in order perform a soft reset the
1348 * EMAC. If none is present, select the internal clock,
1349 * SDR0_MFR[E0CS, E1CS]. After the soft reset, select the external
1350 * clock.
1351 */
1352
1353 sdr = mfsdr(DCR_SDR0_MFR);
1354 sdr |= SDR0_MFR_ECS(sc->sc_instance);
1355 mtsdr(DCR_SDR0_MFR, sdr);
1356
1357 EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1358
1359 sdr = mfsdr(DCR_SDR0_MFR);
1360 sdr &= ~SDR0_MFR_ECS(sc->sc_instance);
1361 mtsdr(DCR_SDR0_MFR, sdr);
1362
1363 delay(5);
1364
1365 /* wait finish */
1366 while (EMAC_READ(sc, EMAC_MR0) & MR0_SRST) {
1367 if (++t == 1000000 /* 1sec XXXXX */) {
1368 aprint_error_dev(sc->sc_dev, "Soft Reset failed\n");
1369 return;
1370 }
1371 delay(1);
1372 }
1373 }
1374
1375 static void
1376 emac_smart_reset(struct emac_softc *sc)
1377 {
1378 uint32_t mr0;
1379 int t = 0;
1380
1381 mr0 = EMAC_READ(sc, EMAC_MR0);
1382 if (mr0 & (MR0_TXE | MR0_RXE)) {
1383 mr0 &= ~(MR0_TXE | MR0_RXE);
1384 EMAC_WRITE(sc, EMAC_MR0, mr0);
1385
1386 /* wait idel state */
1387 while ((EMAC_READ(sc, EMAC_MR0) & (MR0_TXI | MR0_RXI)) !=
1388 (MR0_TXI | MR0_RXI)) {
1389 if (++t == 1000000 /* 1sec XXXXX */) {
1390 aprint_error_dev(sc->sc_dev,
1391 "Smart Reset failed\n");
1392 return;
1393 }
1394 delay(1);
1395 }
1396 }
1397 }
1398
1399
1400 /*
1401 * MII related functions
1402 */
1403
1404 static int
1405 emac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1406 {
1407 struct emac_softc *sc = device_private(self);
1408 uint32_t sta_reg;
1409 int rv;
1410
1411 if (sc->sc_rmii_enable)
1412 sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1413
1414 /* wait for PHY data transfer to complete */
1415 if ((rv = emac_mii_wait(sc)) != 0)
1416 goto fail;
1417
1418 sta_reg =
1419 sc->sc_stacr_read |
1420 (reg << STACR_PRA_SHIFT) |
1421 (phy << STACR_PCDA_SHIFT) |
1422 sc->sc_stacr_bits;
1423 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1424
1425 if ((rv = emac_mii_wait(sc)) != 0)
1426 goto fail;
1427 sta_reg = EMAC_READ(sc, EMAC_STACR);
1428
1429 if (sta_reg & STACR_PHYE) {
1430 rv = -1;
1431 goto fail;
1432 }
1433 *val = sta_reg >> STACR_PHYD_SHIFT;
1434
1435 fail:
1436 if (sc->sc_rmii_disable)
1437 sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1438 return rv;
1439 }
1440
1441 static int
1442 emac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1443 {
1444 struct emac_softc *sc = device_private(self);
1445 uint32_t sta_reg;
1446 int rv;
1447
1448 if (sc->sc_rmii_enable)
1449 sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1450
1451 /* wait for PHY data transfer to complete */
1452 if ((rv = emac_mii_wait(sc)) != 0)
1453 goto out;
1454
1455 sta_reg =
1456 (val << STACR_PHYD_SHIFT) |
1457 sc->sc_stacr_write |
1458 (reg << STACR_PRA_SHIFT) |
1459 (phy << STACR_PCDA_SHIFT) |
1460 sc->sc_stacr_bits;
1461 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1462
1463 if ((rv = emac_mii_wait(sc)) != 0)
1464 goto out;
1465 if (EMAC_READ(sc, EMAC_STACR) & STACR_PHYE) {
1466 aprint_error_dev(sc->sc_dev, "MII PHY Error\n");
1467 rv = -1;
1468 }
1469
1470 out:
1471 if (sc->sc_rmii_disable)
1472 sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1473
1474 return rv;
1475 }
1476
1477 static void
1478 emac_mii_statchg(struct ifnet *ifp)
1479 {
1480 struct emac_softc *sc = ifp->if_softc;
1481 struct mii_data *mii = &sc->sc_mii;
1482
1483 /*
1484 * MR1 can only be written immediately after a reset...
1485 */
1486 emac_smart_reset(sc);
1487
1488 sc->sc_mr1 &= ~(MR1_FDE | MR1_ILE | MR1_EIFC | MR1_MF_MASK | MR1_IST);
1489 if (mii->mii_media_active & IFM_FDX)
1490 sc->sc_mr1 |= (MR1_FDE | MR1_EIFC | MR1_IST);
1491 if (mii->mii_media_active & IFM_FLOW)
1492 sc->sc_mr1 |= MR1_EIFC;
1493 if (mii->mii_media_active & IFM_LOOP)
1494 sc->sc_mr1 |= MR1_ILE;
1495 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1496 case IFM_1000_T:
1497 sc->sc_mr1 |= (MR1_MF_1000MBS | MR1_IST);
1498 break;
1499
1500 case IFM_100_TX:
1501 sc->sc_mr1 |= (MR1_MF_100MBS | MR1_IST);
1502 break;
1503
1504 case IFM_10_T:
1505 sc->sc_mr1 |= MR1_MF_10MBS;
1506 break;
1507
1508 case IFM_NONE:
1509 break;
1510
1511 default:
1512 aprint_error_dev(sc->sc_dev, "unknown sub-type %d\n",
1513 IFM_SUBTYPE(mii->mii_media_active));
1514 break;
1515 }
1516 if (sc->sc_rmii_speed)
1517 sc->sc_rmii_speed(device_parent(sc->sc_dev), sc->sc_instance,
1518 IFM_SUBTYPE(mii->mii_media_active));
1519
1520 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
1521
1522 /* Enable TX and RX if already RUNNING */
1523 if (ifp->if_flags & IFF_RUNNING)
1524 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1525 }
1526
1527 static uint32_t
1528 emac_mii_wait(struct emac_softc *sc)
1529 {
1530 int i;
1531 uint32_t oc;
1532
1533 /* wait for PHY data transfer to complete */
1534 i = 0;
1535 oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1536 while ((oc == STACR_OC) != sc->sc_stacr_completed) {
1537 delay(7);
1538 if (i++ > 5) {
1539 aprint_error_dev(sc->sc_dev, "MII timed out\n");
1540 return ETIMEDOUT;
1541 }
1542 oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1543 }
1544 return 0;
1545 }
1546
1547 static void
1548 emac_mii_tick(void *arg)
1549 {
1550 struct emac_softc *sc = arg;
1551 int s;
1552
1553 if (!device_is_active(sc->sc_dev))
1554 return;
1555
1556 s = splnet();
1557 mii_tick(&sc->sc_mii);
1558 splx(s);
1559
1560 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1561 }
1562
1563 int
1564 emac_txeob_intr(void *arg)
1565 {
1566 struct emac_softc *sc = arg;
1567 int handled = 0;
1568
1569 EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1570 handled |= emac_txreap(sc);
1571
1572 /* try to get more packets going */
1573 if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
1574
1575 return handled;
1576 }
1577
1578 int
1579 emac_rxeob_intr(void *arg)
1580 {
1581 struct emac_softc *sc = arg;
1582 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1583 struct emac_rxsoft *rxs;
1584 struct mbuf *m;
1585 uint32_t rxstat;
1586 int i, len;
1587
1588 EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1589
1590 for (i = sc->sc_rxptr; ; i = EMAC_NEXTRX(i)) {
1591 rxs = &sc->sc_rxsoft[i];
1592
1593 EMAC_CDRXSYNC(sc, i,
1594 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1595
1596 rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1597
1598 if (rxstat & MAL_RX_EMPTY) {
1599 /*
1600 * We have processed all of the receive buffers.
1601 */
1602 /* Flush current empty descriptor */
1603 EMAC_CDRXSYNC(sc, i,
1604 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1605 break;
1606 }
1607
1608 /*
1609 * If an error occurred, update stats, clear the status
1610 * word, and leave the packet buffer in place. It will
1611 * simply be reused the next time the ring comes around.
1612 */
1613 if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1614 EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1615 EMAC_RXS_IRE)) {
1616 #define PRINTERR(bit, str) \
1617 if (rxstat & (bit)) \
1618 aprint_error_ifnet(ifp, \
1619 "receive error: %s\n", str)
1620 if_statinc(ifp, if_ierrors);
1621 PRINTERR(EMAC_RXS_OE, "overrun error");
1622 PRINTERR(EMAC_RXS_BP, "bad packet");
1623 PRINTERR(EMAC_RXS_RP, "runt packet");
1624 PRINTERR(EMAC_RXS_SE, "short event");
1625 PRINTERR(EMAC_RXS_AE, "alignment error");
1626 PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1627 PRINTERR(EMAC_RXS_PTL, "packet too long");
1628 PRINTERR(EMAC_RXS_ORE, "out of range error");
1629 PRINTERR(EMAC_RXS_IRE, "in range error");
1630 #undef PRINTERR
1631 EMAC_INIT_RXDESC(sc, i);
1632 continue;
1633 }
1634
1635 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1636 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1637
1638 /*
1639 * No errors; receive the packet. Note, the 405GP emac
1640 * includes the CRC with every packet.
1641 */
1642 len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN;
1643
1644 /*
1645 * If the packet is small enough to fit in a
1646 * single header mbuf, allocate one and copy
1647 * the data into it. This greatly reduces
1648 * memory consumption when we receive lots
1649 * of small packets.
1650 *
1651 * Otherwise, we add a new buffer to the receive
1652 * chain. If this fails, we drop the packet and
1653 * recycle the old buffer.
1654 */
1655 if (emac_copy_small != 0 && len <= MHLEN) {
1656 MGETHDR(m, M_DONTWAIT, MT_DATA);
1657 if (m == NULL)
1658 goto dropit;
1659 memcpy(mtod(m, void *),
1660 mtod(rxs->rxs_mbuf, void *), len);
1661 EMAC_INIT_RXDESC(sc, i);
1662 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1663 rxs->rxs_dmamap->dm_mapsize,
1664 BUS_DMASYNC_PREREAD);
1665 } else {
1666 m = rxs->rxs_mbuf;
1667 if (emac_add_rxbuf(sc, i) != 0) {
1668 dropit:
1669 if_statinc(ifp, if_ierrors);
1670 EMAC_INIT_RXDESC(sc, i);
1671 bus_dmamap_sync(sc->sc_dmat,
1672 rxs->rxs_dmamap, 0,
1673 rxs->rxs_dmamap->dm_mapsize,
1674 BUS_DMASYNC_PREREAD);
1675 continue;
1676 }
1677 }
1678
1679 m_set_rcvif(m, ifp);
1680 m->m_pkthdr.len = m->m_len = len;
1681
1682 /* Pass it on. */
1683 if_percpuq_enqueue(ifp->if_percpuq, m);
1684 }
1685
1686 /* Update the receive pointer. */
1687 sc->sc_rxptr = i;
1688
1689 return 1;
1690 }
1691
1692 int
1693 emac_txde_intr(void *arg)
1694 {
1695 struct emac_softc *sc = arg;
1696
1697 EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1698 aprint_error_dev(sc->sc_dev, "emac_txde_intr\n");
1699 return 1;
1700 }
1701
1702 int
1703 emac_rxde_intr(void *arg)
1704 {
1705 struct emac_softc *sc = arg;
1706 int i;
1707
1708 EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1709 aprint_error_dev(sc->sc_dev, "emac_rxde_intr\n");
1710 /*
1711 * XXX!
1712 * This is a bit drastic; we just drop all descriptors that aren't
1713 * "clean". We should probably send any that are up the stack.
1714 */
1715 for (i = 0; i < EMAC_NRXDESC; i++) {
1716 EMAC_CDRXSYNC(sc, i,
1717 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1718
1719 if (sc->sc_rxdescs[i].md_data_len != MCLBYTES)
1720 EMAC_INIT_RXDESC(sc, i);
1721 }
1722
1723 return 1;
1724 }
1725