if_emac.c revision 1.29 1 /* $NetBSD: if_emac.c,v 1.29 2007/08/26 22:32:06 dyoung Exp $ */
2
3 /*
4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: if_emac.c,v 1.29 2007/08/26 22:32:06 dyoung Exp $");
40
41 #include "bpfilter.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/mbuf.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49
50 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #if NBPFILTER > 0
58 #include <net/bpf.h>
59 #endif
60
61 #include <powerpc/ibm4xx/dev/opbvar.h>
62
63 #include <powerpc/ibm4xx/ibm405gp.h>
64 #include <powerpc/ibm4xx/mal405gp.h>
65 #include <powerpc/ibm4xx/dcr405gp.h>
66 #include <powerpc/ibm4xx/dev/emacreg.h>
67 #include <powerpc/ibm4xx/dev/if_emacreg.h>
68
69 #include <dev/mii/miivar.h>
70
71 /*
72 * Transmit descriptor list size. There are two Tx channels, each with
73 * up to 256 hardware descriptors available. We currently use one Tx
74 * channel. We tell the upper layers that they can queue a lot of
75 * packets, and we go ahead and manage up to 64 of them at a time. We
76 * allow up to 16 DMA segments per packet.
77 */
78 #define EMAC_NTXSEGS 16
79 #define EMAC_TXQUEUELEN 64
80 #define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1)
81 #define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4)
82 #define EMAC_NTXDESC 256
83 #define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1)
84 #define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK)
85 #define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK)
86
87 /*
88 * Receive descriptor list size. There is one Rx channel with up to 256
89 * hardware descriptors available. We allocate 64 receive descriptors,
90 * each with a 2k buffer (MCLBYTES).
91 */
92 #define EMAC_NRXDESC 64
93 #define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1)
94 #define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK)
95 #define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK)
96
97 /*
98 * Transmit/receive descriptors that are DMA'd to the EMAC.
99 */
100 struct emac_control_data {
101 struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
102 struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
103 };
104
105 #define EMAC_CDOFF(x) offsetof(struct emac_control_data, x)
106 #define EMAC_CDTXOFF(x) EMAC_CDOFF(ecd_txdesc[(x)])
107 #define EMAC_CDRXOFF(x) EMAC_CDOFF(ecd_rxdesc[(x)])
108
109 /*
110 * Software state for transmit jobs.
111 */
112 struct emac_txsoft {
113 struct mbuf *txs_mbuf; /* head of mbuf chain */
114 bus_dmamap_t txs_dmamap; /* our DMA map */
115 int txs_firstdesc; /* first descriptor in packet */
116 int txs_lastdesc; /* last descriptor in packet */
117 int txs_ndesc; /* # of descriptors used */
118 };
119
120 /*
121 * Software state for receive descriptors.
122 */
123 struct emac_rxsoft {
124 struct mbuf *rxs_mbuf; /* head of mbuf chain */
125 bus_dmamap_t rxs_dmamap; /* our DMA map */
126 };
127
128 /*
129 * Software state per device.
130 */
131 struct emac_softc {
132 struct device sc_dev; /* generic device information */
133 bus_space_tag_t sc_st; /* bus space tag */
134 bus_space_handle_t sc_sh; /* bus space handle */
135 bus_dma_tag_t sc_dmat; /* bus DMA tag */
136 struct ethercom sc_ethercom; /* ethernet common data */
137 void *sc_sdhook; /* shutdown hook */
138 void *sc_powerhook; /* power management hook */
139
140 struct mii_data sc_mii; /* MII/media information */
141 struct callout sc_callout; /* tick callout */
142
143 u_int32_t sc_mr1; /* copy of Mode Register 1 */
144
145 bus_dmamap_t sc_cddmamap; /* control data dma map */
146 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
147
148 /* Software state for transmit/receive descriptors. */
149 struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
150 struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
151
152 /* Control data structures. */
153 struct emac_control_data *sc_control_data;
154 #define sc_txdescs sc_control_data->ecd_txdesc
155 #define sc_rxdescs sc_control_data->ecd_rxdesc
156
157 #ifdef EMAC_EVENT_COUNTERS
158 struct evcnt sc_ev_rxintr; /* Rx interrupts */
159 struct evcnt sc_ev_txintr; /* Tx interrupts */
160 struct evcnt sc_ev_rxde; /* Rx descriptor interrupts */
161 struct evcnt sc_ev_txde; /* Tx descriptor interrupts */
162 struct evcnt sc_ev_wol; /* Wake-On-Lan interrupts */
163 struct evcnt sc_ev_serr; /* MAL system error interrupts */
164 struct evcnt sc_ev_intr; /* General EMAC interrupts */
165
166 struct evcnt sc_ev_txreap; /* Calls to Tx descriptor reaper */
167 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
168 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
169 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
170 struct evcnt sc_ev_tu; /* Tx underrun */
171 #endif /* EMAC_EVENT_COUNTERS */
172
173 int sc_txfree; /* number of free Tx descriptors */
174 int sc_txnext; /* next ready Tx descriptor */
175
176 int sc_txsfree; /* number of free Tx jobs */
177 int sc_txsnext; /* next ready Tx job */
178 int sc_txsdirty; /* dirty Tx jobs */
179
180 int sc_rxptr; /* next ready RX descriptor/descsoft */
181 };
182
183 #ifdef EMAC_EVENT_COUNTERS
184 #define EMAC_EVCNT_INCR(ev) (ev)->ev_count++
185 #else
186 #define EMAC_EVCNT_INCR(ev) /* nothing */
187 #endif
188
189 #define EMAC_CDTXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDTXOFF((x)))
190 #define EMAC_CDRXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDRXOFF((x)))
191
192 #define EMAC_CDTXSYNC(sc, x, n, ops) \
193 do { \
194 int __x, __n; \
195 \
196 __x = (x); \
197 __n = (n); \
198 \
199 /* If it will wrap around, sync to the end of the ring. */ \
200 if ((__x + __n) > EMAC_NTXDESC) { \
201 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
202 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * \
203 (EMAC_NTXDESC - __x), (ops)); \
204 __n -= (EMAC_NTXDESC - __x); \
205 __x = 0; \
206 } \
207 \
208 /* Now sync whatever is left. */ \
209 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
210 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
211 } while (/*CONSTCOND*/0)
212
213 #define EMAC_CDRXSYNC(sc, x, ops) \
214 do { \
215 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
216 EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops)); \
217 } while (/*CONSTCOND*/0)
218
219 #define EMAC_INIT_RXDESC(sc, x) \
220 do { \
221 struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
222 struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)]; \
223 struct mbuf *__m = __rxs->rxs_mbuf; \
224 \
225 /* \
226 * Note: We scoot the packet forward 2 bytes in the buffer \
227 * so that the payload after the Ethernet header is aligned \
228 * to a 4-byte boundary. \
229 */ \
230 __m->m_data = __m->m_ext.ext_buf + 2; \
231 \
232 __rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2; \
233 __rxd->md_data_len = __m->m_ext.ext_size - 2; \
234 __rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT | \
235 /* Set wrap on last descriptor. */ \
236 (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0); \
237 EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
238 } while (/*CONSTCOND*/0)
239
240 #define EMAC_WRITE(sc, reg, val) \
241 bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
242 #define EMAC_READ(sc, reg) \
243 bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
244
245 #define EMAC_SET_FILTER(aht, category) \
246 do { \
247 (aht)[3 - ((category) >> 4)] |= 1 << ((category) & 0xf); \
248 } while (/*CONSTCOND*/0)
249
250 static int emac_match(struct device *, struct cfdata *, void *);
251 static void emac_attach(struct device *, struct device *, void *);
252
253 static int emac_add_rxbuf(struct emac_softc *, int);
254 static int emac_init(struct ifnet *);
255 static int emac_ioctl(struct ifnet *, u_long, void *);
256 static void emac_reset(struct emac_softc *);
257 static void emac_rxdrain(struct emac_softc *);
258 static int emac_txreap(struct emac_softc *);
259 static void emac_shutdown(void *);
260 static void emac_start(struct ifnet *);
261 static void emac_stop(struct ifnet *, int);
262 static void emac_watchdog(struct ifnet *);
263 static int emac_set_filter(struct emac_softc *);
264
265 static int emac_wol_intr(void *);
266 static int emac_serr_intr(void *);
267 static int emac_txeob_intr(void *);
268 static int emac_rxeob_intr(void *);
269 static int emac_txde_intr(void *);
270 static int emac_rxde_intr(void *);
271 static int emac_intr(void *);
272
273 static int emac_mediachange(struct ifnet *);
274 static void emac_mediastatus(struct ifnet *, struct ifmediareq *);
275 static int emac_mii_readreg(struct device *, int, int);
276 static void emac_mii_statchg(struct device *);
277 static void emac_mii_tick(void *);
278 static uint32_t emac_mii_wait(struct emac_softc *);
279 static void emac_mii_writereg(struct device *, int, int, int);
280
281 int emac_copy_small = 0;
282
283 CFATTACH_DECL(emac, sizeof(struct emac_softc),
284 emac_match, emac_attach, NULL, NULL);
285
286 static int
287 emac_match(struct device *parent, struct cfdata *cf, void *aux)
288 {
289 struct opb_attach_args *oaa = aux;
290
291 /* match only on-chip ethernet devices */
292 if (strcmp(oaa->opb_name, cf->cf_name) == 0)
293 return (1);
294
295 return (0);
296 }
297
298 static void
299 emac_attach(struct device *parent, struct device *self, void *aux)
300 {
301 struct opb_attach_args *oaa = aux;
302 struct emac_softc *sc = (struct emac_softc *)self;
303 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
304 struct mii_data *mii = &sc->sc_mii;
305 bus_dma_segment_t seg;
306 int error, i, nseg;
307 const uint8_t *enaddr;
308 prop_data_t ea;
309
310 bus_space_map(oaa->opb_bt, oaa->opb_addr, EMAC_NREG, 0, &sc->sc_sh);
311 sc->sc_st = oaa->opb_bt;
312 sc->sc_dmat = oaa->opb_dmat;
313
314 printf(": 405GP EMAC\n");
315
316 /*
317 * Set up Mode Register 1 - set receive and transmit FIFOs to maximum
318 * size, allow transmit of multiple packets (only channel 0 is used).
319 *
320 * XXX: Allow pause packets??
321 */
322 sc->sc_mr1 = MR1_RFS_4KB | MR1_TFS_2KB | MR1_TR0_MULTIPLE;
323
324 intr_establish(oaa->opb_irq , IST_LEVEL, IPL_NET, emac_wol_intr, sc);
325 intr_establish(oaa->opb_irq + 1, IST_LEVEL, IPL_NET, emac_serr_intr, sc);
326 intr_establish(oaa->opb_irq + 2, IST_LEVEL, IPL_NET, emac_txeob_intr, sc);
327 intr_establish(oaa->opb_irq + 3, IST_LEVEL, IPL_NET, emac_rxeob_intr, sc);
328 intr_establish(oaa->opb_irq + 4, IST_LEVEL, IPL_NET, emac_txde_intr, sc);
329 intr_establish(oaa->opb_irq + 5, IST_LEVEL, IPL_NET, emac_rxde_intr, sc);
330 intr_establish(oaa->opb_irq + 6, IST_LEVEL, IPL_NET, emac_intr, sc);
331 printf("%s: interrupting at irqs %d .. %d\n", sc->sc_dev.dv_xname,
332 oaa->opb_irq, oaa->opb_irq + 6);
333
334 /*
335 * Allocate the control data structures, and create and load the
336 * DMA map for it.
337 */
338 if ((error = bus_dmamem_alloc(sc->sc_dmat,
339 sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
340 printf("%s: unable to allocate control data, error = %d\n",
341 sc->sc_dev.dv_xname, error);
342 goto fail_0;
343 }
344
345 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
346 sizeof(struct emac_control_data), (void **)&sc->sc_control_data,
347 BUS_DMA_COHERENT)) != 0) {
348 printf("%s: unable to map control data, error = %d\n",
349 sc->sc_dev.dv_xname, error);
350 goto fail_1;
351 }
352
353 if ((error = bus_dmamap_create(sc->sc_dmat,
354 sizeof(struct emac_control_data), 1,
355 sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
356 printf("%s: unable to create control data DMA map, "
357 "error = %d\n", sc->sc_dev.dv_xname, error);
358 goto fail_2;
359 }
360
361 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
362 sc->sc_control_data, sizeof(struct emac_control_data), NULL,
363 0)) != 0) {
364 printf("%s: unable to load control data DMA map, error = %d\n",
365 sc->sc_dev.dv_xname, error);
366 goto fail_3;
367 }
368
369 /*
370 * Create the transmit buffer DMA maps.
371 */
372 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
373 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
374 EMAC_NTXSEGS, MCLBYTES, 0, 0,
375 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
376 printf("%s: unable to create tx DMA map %d, "
377 "error = %d\n", sc->sc_dev.dv_xname, i, error);
378 goto fail_4;
379 }
380 }
381
382 /*
383 * Create the receive buffer DMA maps.
384 */
385 for (i = 0; i < EMAC_NRXDESC; i++) {
386 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
387 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
388 printf("%s: unable to create rx DMA map %d, "
389 "error = %d\n", sc->sc_dev.dv_xname, i, error);
390 goto fail_5;
391 }
392 sc->sc_rxsoft[i].rxs_mbuf = NULL;
393 }
394
395 /*
396 * Reset the chip to a known state.
397 */
398 emac_reset(sc);
399
400 /* Fetch the Ethernet address. */
401 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
402 if (ea == NULL) {
403 printf("%s: unable to get mac-addr property\n",
404 sc->sc_dev.dv_xname);
405 return;
406 }
407 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
408 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
409 enaddr = prop_data_data_nocopy(ea);
410
411 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
412 ether_sprintf(enaddr));
413
414 /*
415 * Initialise the media structures.
416 */
417 mii->mii_ifp = ifp;
418 mii->mii_readreg = emac_mii_readreg;
419 mii->mii_writereg = emac_mii_writereg;
420 mii->mii_statchg = emac_mii_statchg;
421
422 ifmedia_init(&mii->mii_media, 0, emac_mediachange,
423 emac_mediastatus);
424 mii_attach(&sc->sc_dev, mii, 0xffffffff,
425 MII_PHY_ANY, MII_OFFSET_ANY, 0);
426 if (LIST_FIRST(&mii->mii_phys) == NULL) {
427 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
428 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
429 } else
430 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
431
432 ifp = &sc->sc_ethercom.ec_if;
433 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
434 ifp->if_softc = sc;
435 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
436 ifp->if_ioctl = emac_ioctl;
437 ifp->if_start = emac_start;
438 ifp->if_watchdog = emac_watchdog;
439 ifp->if_init = emac_init;
440 ifp->if_stop = emac_stop;
441 IFQ_SET_READY(&ifp->if_snd);
442
443 /*
444 * We can support 802.1Q VLAN-sized frames.
445 */
446 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
447
448 /*
449 * Attach the interface.
450 */
451 if_attach(ifp);
452 ether_ifattach(ifp, enaddr);
453
454 #ifdef EMAC_EVENT_COUNTERS
455 /*
456 * Attach the event counters.
457 */
458 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
459 NULL, sc->sc_dev.dv_xname, "rxintr");
460 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
461 NULL, sc->sc_dev.dv_xname, "txintr");
462 evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
463 NULL, sc->sc_dev.dv_xname, "rxde");
464 evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
465 NULL, sc->sc_dev.dv_xname, "txde");
466 evcnt_attach_dynamic(&sc->sc_ev_wol, EVCNT_TYPE_INTR,
467 NULL, sc->sc_dev.dv_xname, "wol");
468 evcnt_attach_dynamic(&sc->sc_ev_serr, EVCNT_TYPE_INTR,
469 NULL, sc->sc_dev.dv_xname, "serr");
470 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
471 NULL, sc->sc_dev.dv_xname, "intr");
472
473 evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
474 NULL, sc->sc_dev.dv_xname, "txreap");
475 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
476 NULL, sc->sc_dev.dv_xname, "txsstall");
477 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
478 NULL, sc->sc_dev.dv_xname, "txdstall");
479 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
480 NULL, sc->sc_dev.dv_xname, "txdrop");
481 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
482 NULL, sc->sc_dev.dv_xname, "tu");
483 #endif /* EMAC_EVENT_COUNTERS */
484
485 /*
486 * Make sure the interface is shutdown during reboot.
487 */
488 sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
489 if (sc->sc_sdhook == NULL)
490 printf("%s: WARNING: unable to establish shutdown hook\n",
491 sc->sc_dev.dv_xname);
492
493 return;
494
495 /*
496 * Free any resources we've allocated during the failed attach
497 * attempt. Do this in reverse order and fall through.
498 */
499 fail_5:
500 for (i = 0; i < EMAC_NRXDESC; i++) {
501 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
502 bus_dmamap_destroy(sc->sc_dmat,
503 sc->sc_rxsoft[i].rxs_dmamap);
504 }
505 fail_4:
506 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
507 if (sc->sc_txsoft[i].txs_dmamap != NULL)
508 bus_dmamap_destroy(sc->sc_dmat,
509 sc->sc_txsoft[i].txs_dmamap);
510 }
511 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
512 fail_3:
513 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
514 fail_2:
515 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
516 sizeof(struct emac_control_data));
517 fail_1:
518 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
519 fail_0:
520 return;
521 }
522
523 /*
524 * Device shutdown routine.
525 */
526 static void
527 emac_shutdown(void *arg)
528 {
529 struct emac_softc *sc = arg;
530
531 emac_stop(&sc->sc_ethercom.ec_if, 0);
532 }
533
534 /* ifnet interface function */
535 static void
536 emac_start(struct ifnet *ifp)
537 {
538 struct emac_softc *sc = ifp->if_softc;
539 struct mbuf *m0;
540 struct emac_txsoft *txs;
541 bus_dmamap_t dmamap;
542 int error, firsttx, nexttx, lasttx, ofree, seg;
543
544 lasttx = 0; /* XXX gcc */
545
546 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
547 return;
548
549 /*
550 * Remember the previous number of free descriptors.
551 */
552 ofree = sc->sc_txfree;
553
554 /*
555 * Loop through the send queue, setting up transmit descriptors
556 * until we drain the queue, or use up all available transmit
557 * descriptors.
558 */
559 for (;;) {
560 /* Grab a packet off the queue. */
561 IFQ_POLL(&ifp->if_snd, m0);
562 if (m0 == NULL)
563 break;
564
565 /*
566 * Get a work queue entry. Reclaim used Tx descriptors if
567 * we are running low.
568 */
569 if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
570 emac_txreap(sc);
571 if (sc->sc_txsfree == 0) {
572 EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
573 break;
574 }
575 }
576
577 txs = &sc->sc_txsoft[sc->sc_txsnext];
578 dmamap = txs->txs_dmamap;
579
580 /*
581 * Load the DMA map. If this fails, the packet either
582 * didn't fit in the alloted number of segments, or we
583 * were short on resources. In this case, we'll copy
584 * and try again.
585 */
586 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
587 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
588 if (error) {
589 if (error == EFBIG) {
590 EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
591 printf("%s: Tx packet consumes too many "
592 "DMA segments, dropping...\n",
593 sc->sc_dev.dv_xname);
594 IFQ_DEQUEUE(&ifp->if_snd, m0);
595 m_freem(m0);
596 continue;
597 }
598 /* Short on resources, just stop for now. */
599 break;
600 }
601
602 /*
603 * Ensure we have enough descriptors free to describe
604 * the packet.
605 */
606 if (dmamap->dm_nsegs > sc->sc_txfree) {
607 /*
608 * Not enough free descriptors to transmit this
609 * packet. We haven't committed anything yet,
610 * so just unload the DMA map, put the packet
611 * back on the queue, and punt. Notify the upper
612 * layer that there are not more slots left.
613 *
614 */
615 ifp->if_flags |= IFF_OACTIVE;
616 bus_dmamap_unload(sc->sc_dmat, dmamap);
617 EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
618 break;
619 }
620
621 IFQ_DEQUEUE(&ifp->if_snd, m0);
622
623 /*
624 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
625 */
626
627 /* Sync the DMA map. */
628 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
629 BUS_DMASYNC_PREWRITE);
630
631 /*
632 * Store a pointer to the packet so that we can free it
633 * later.
634 */
635 txs->txs_mbuf = m0;
636 txs->txs_firstdesc = sc->sc_txnext;
637 txs->txs_ndesc = dmamap->dm_nsegs;
638
639 /*
640 * Initialize the transmit descriptor.
641 */
642 firsttx = sc->sc_txnext;
643 for (nexttx = sc->sc_txnext, seg = 0;
644 seg < dmamap->dm_nsegs;
645 seg++, nexttx = EMAC_NEXTTX(nexttx)) {
646 /*
647 * If this is the first descriptor we're
648 * enqueueing, don't set the TX_READY bit just
649 * yet. That could cause a race condition.
650 * We'll do it below.
651 */
652 sc->sc_txdescs[nexttx].md_data =
653 dmamap->dm_segs[seg].ds_addr;
654 sc->sc_txdescs[nexttx].md_data_len =
655 dmamap->dm_segs[seg].ds_len;
656 sc->sc_txdescs[nexttx].md_stat_ctrl =
657 (sc->sc_txdescs[nexttx].md_stat_ctrl & MAL_TX_WRAP) |
658 (nexttx == firsttx ? 0 : MAL_TX_READY) |
659 EMAC_TXC_GFCS | EMAC_TXC_GPAD;
660 lasttx = nexttx;
661 }
662
663 /* Set the LAST bit on the last segment. */
664 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
665
666 /*
667 * Set up last segment descriptor to send an interrupt after
668 * that descriptor is transmitted, and bypass existing Tx
669 * descriptor reaping method (for now...).
670 */
671 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT;
672
673
674 txs->txs_lastdesc = lasttx;
675
676 /* Sync the descriptors we're using. */
677 EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
679
680 /*
681 * The entire packet chain is set up. Give the
682 * first descriptor to the chip now.
683 */
684 sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
685 EMAC_CDTXSYNC(sc, firsttx, 1,
686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
687 /*
688 * Tell the EMAC that a new packet is available.
689 */
690 EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0);
691
692 /* Advance the tx pointer. */
693 sc->sc_txfree -= txs->txs_ndesc;
694 sc->sc_txnext = nexttx;
695
696 sc->sc_txsfree--;
697 sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
698
699 #if NBPFILTER > 0
700 /*
701 * Pass the packet to any BPF listeners.
702 */
703 if (ifp->if_bpf)
704 bpf_mtap(ifp->if_bpf, m0);
705 #endif /* NBPFILTER > 0 */
706 }
707
708 if (sc->sc_txfree == 0) {
709 /* No more slots left; notify upper layer. */
710 ifp->if_flags |= IFF_OACTIVE;
711 }
712
713 if (sc->sc_txfree != ofree) {
714 /* Set a watchdog timer in case the chip flakes out. */
715 ifp->if_timer = 5;
716 }
717 }
718
719 static int
720 emac_init(struct ifnet *ifp)
721 {
722 struct emac_softc *sc = ifp->if_softc;
723 struct emac_rxsoft *rxs;
724 const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
725 int error, i;
726
727 error = 0;
728
729 /* Cancel any pending I/O. */
730 emac_stop(ifp, 0);
731
732 /* Reset the chip to a known state. */
733 emac_reset(sc);
734
735 /*
736 * Initialise the transmit descriptor ring.
737 */
738 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
739 /* set wrap on last descriptor */
740 sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
741 EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
742 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
743 sc->sc_txfree = EMAC_NTXDESC;
744 sc->sc_txnext = 0;
745
746 /*
747 * Initialise the transmit job descriptors.
748 */
749 for (i = 0; i < EMAC_TXQUEUELEN; i++)
750 sc->sc_txsoft[i].txs_mbuf = NULL;
751 sc->sc_txsfree = EMAC_TXQUEUELEN;
752 sc->sc_txsnext = 0;
753 sc->sc_txsdirty = 0;
754
755 /*
756 * Initialise the receiver descriptor and receive job
757 * descriptor rings.
758 */
759 for (i = 0; i < EMAC_NRXDESC; i++) {
760 rxs = &sc->sc_rxsoft[i];
761 if (rxs->rxs_mbuf == NULL) {
762 if ((error = emac_add_rxbuf(sc, i)) != 0) {
763 printf("%s: unable to allocate or map rx "
764 "buffer %d, error = %d\n",
765 sc->sc_dev.dv_xname, i, error);
766 /*
767 * XXX Should attempt to run with fewer receive
768 * XXX buffers instead of just failing.
769 */
770 emac_rxdrain(sc);
771 goto out;
772 }
773 } else
774 EMAC_INIT_RXDESC(sc, i);
775 }
776 sc->sc_rxptr = 0;
777
778 /*
779 * Set the current media.
780 */
781 mii_mediachg(&sc->sc_mii);
782
783 /*
784 * Give the transmit and receive rings to the MAL.
785 */
786 mtdcr(DCR_MAL0_TXCTP0R, EMAC_CDTXADDR(sc, 0));
787 mtdcr(DCR_MAL0_RXCTP0R, EMAC_CDRXADDR(sc, 0));
788
789 /*
790 * Load the MAC address.
791 */
792 EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
793 EMAC_WRITE(sc, EMAC_IALR,
794 enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
795
796 /*
797 * Set the receive channel buffer size (in units of 16 bytes).
798 */
799 #if MCLBYTES > (4096 - 16) /* XXX! */
800 # error MCLBYTES > max rx channel buffer size
801 #endif
802 mtdcr(DCR_MAL0_RCBS0, MCLBYTES / 16);
803
804 /* Set fifos, media modes. */
805 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
806
807 /*
808 * Enable Individual and (possibly) Broadcast Address modes,
809 * runt packets, and strip padding.
810 */
811 EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP |
812 (ifp->if_flags & IFF_PROMISC ? RMR_PME : 0) |
813 (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
814
815 /*
816 * Set multicast filter.
817 */
818 emac_set_filter(sc);
819
820 /*
821 * Set low- and urgent-priority request thresholds.
822 */
823 EMAC_WRITE(sc, EMAC_TMR1,
824 ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
825 ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
826 /*
827 * Set Transmit Request Threshold Register.
828 */
829 EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
830
831 /*
832 * Set high and low receive watermarks.
833 */
834 EMAC_WRITE(sc, EMAC_RWMR,
835 30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
836
837 /*
838 * Set frame gap.
839 */
840 EMAC_WRITE(sc, EMAC_IPGVR, 8);
841
842 /*
843 * Set interrupt status enable bits for EMAC and MAL.
844 */
845 EMAC_WRITE(sc, EMAC_ISER,
846 ISR_BP | ISR_SE | ISR_ALE | ISR_BFCS | ISR_PTLE | ISR_ORE | ISR_IRE);
847 mtdcr(DCR_MAL0_IER, MAL0_IER_DE | MAL0_IER_NWE | MAL0_IER_TO |
848 MAL0_IER_OPB | MAL0_IER_PLB);
849
850 /*
851 * Enable the transmit and receive channel on the MAL.
852 */
853 mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
854 mtdcr(DCR_MAL0_TXCASR, MAL0_TXCASR_CHAN0);
855
856 /*
857 * Enable the transmit and receive channel on the EMAC.
858 */
859 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
860
861 /*
862 * Start the one second MII clock.
863 */
864 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
865
866 /*
867 * ... all done!
868 */
869 ifp->if_flags |= IFF_RUNNING;
870 ifp->if_flags &= ~IFF_OACTIVE;
871
872 out:
873 if (error) {
874 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
875 ifp->if_timer = 0;
876 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
877 }
878 return (error);
879 }
880
881 static int
882 emac_add_rxbuf(struct emac_softc *sc, int idx)
883 {
884 struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
885 struct mbuf *m;
886 int error;
887
888 MGETHDR(m, M_DONTWAIT, MT_DATA);
889 if (m == NULL)
890 return (ENOBUFS);
891
892 MCLGET(m, M_DONTWAIT);
893 if ((m->m_flags & M_EXT) == 0) {
894 m_freem(m);
895 return (ENOBUFS);
896 }
897
898 if (rxs->rxs_mbuf != NULL)
899 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
900
901 rxs->rxs_mbuf = m;
902
903 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
904 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
905 if (error) {
906 printf("%s: can't load rx DMA map %d, error = %d\n",
907 sc->sc_dev.dv_xname, idx, error);
908 panic("emac_add_rxbuf"); /* XXX */
909 }
910
911 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
912 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
913
914 EMAC_INIT_RXDESC(sc, idx);
915
916 return (0);
917 }
918
919 /* ifnet interface function */
920 static void
921 emac_watchdog(struct ifnet *ifp)
922 {
923 struct emac_softc *sc = ifp->if_softc;
924
925 /*
926 * Since we're not interrupting every packet, sweep
927 * up before we report an error.
928 */
929 emac_txreap(sc);
930
931 if (sc->sc_txfree != EMAC_NTXDESC) {
932 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
933 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
934 sc->sc_txnext);
935 ifp->if_oerrors++;
936
937 /* Reset the interface. */
938 (void)emac_init(ifp);
939 } else if (ifp->if_flags & IFF_DEBUG)
940 printf("%s: recovered from device timeout\n",
941 sc->sc_dev.dv_xname);
942
943 /* try to get more packets going */
944 emac_start(ifp);
945 }
946
947 static void
948 emac_rxdrain(struct emac_softc *sc)
949 {
950 struct emac_rxsoft *rxs;
951 int i;
952
953 for (i = 0; i < EMAC_NRXDESC; i++) {
954 rxs = &sc->sc_rxsoft[i];
955 if (rxs->rxs_mbuf != NULL) {
956 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
957 m_freem(rxs->rxs_mbuf);
958 rxs->rxs_mbuf = NULL;
959 }
960 }
961 }
962
963 /* ifnet interface function */
964 static void
965 emac_stop(struct ifnet *ifp, int disable)
966 {
967 struct emac_softc *sc = ifp->if_softc;
968 struct emac_txsoft *txs;
969 int i;
970
971 /* Stop the one second clock. */
972 callout_stop(&sc->sc_callout);
973
974 /* Down the MII */
975 mii_down(&sc->sc_mii);
976
977 /* Disable interrupts. */
978 #if 0 /* Can't disable MAL interrupts without a reset... */
979 EMAC_WRITE(sc, EMAC_ISER, 0);
980 #endif
981 mtdcr(DCR_MAL0_IER, 0);
982
983 /* Disable the receive and transmit channels. */
984 mtdcr(DCR_MAL0_RXCARR, MAL0_RXCARR_CHAN0);
985 mtdcr(DCR_MAL0_TXCARR, MAL0_TXCARR_CHAN0 | MAL0_TXCARR_CHAN1);
986
987 /* Disable the transmit enable and receive MACs. */
988 EMAC_WRITE(sc, EMAC_MR0,
989 EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
990
991 /* Release any queued transmit buffers. */
992 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
993 txs = &sc->sc_txsoft[i];
994 if (txs->txs_mbuf != NULL) {
995 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
996 m_freem(txs->txs_mbuf);
997 txs->txs_mbuf = NULL;
998 }
999 }
1000
1001 if (disable)
1002 emac_rxdrain(sc);
1003
1004 /*
1005 * Mark the interface down and cancel the watchdog timer.
1006 */
1007 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1008 ifp->if_timer = 0;
1009 }
1010
1011 /* ifnet interface function */
1012 static int
1013 emac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1014 {
1015 struct emac_softc *sc = ifp->if_softc;
1016 struct ifreq *ifr = (struct ifreq *)data;
1017 int s, error;
1018
1019 s = splnet();
1020
1021 switch (cmd) {
1022 case SIOCSIFMEDIA:
1023 case SIOCGIFMEDIA:
1024 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1025 break;
1026
1027 default:
1028 error = ether_ioctl(ifp, cmd, data);
1029 if (error == ENETRESET) {
1030 /*
1031 * Multicast list has changed; set the hardware filter
1032 * accordingly.
1033 */
1034 if (ifp->if_flags & IFF_RUNNING)
1035 error = emac_set_filter(sc);
1036 else
1037 error = 0;
1038 }
1039 break;
1040 }
1041
1042 /* try to get more packets going */
1043 emac_start(ifp);
1044
1045 splx(s);
1046 return (error);
1047 }
1048
1049 static void
1050 emac_reset(struct emac_softc *sc)
1051 {
1052
1053 /* reset the MAL */
1054 mtdcr(DCR_MAL0_CFG, MAL0_CFG_SR);
1055
1056 EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1057 delay(5);
1058
1059 /* XXX: check if MR0_SRST is clear until a timeout instead? */
1060 EMAC_WRITE(sc, EMAC_MR0, EMAC_READ(sc, EMAC_MR0) & ~MR0_SRST);
1061
1062 /* XXX clear interrupts in EMAC_ISR just to be sure?? */
1063
1064 /* set the MAL config register */
1065 mtdcr(DCR_MAL0_CFG, MAL0_CFG_PLBB | MAL0_CFG_OPBBL | MAL0_CFG_LEA |
1066 MAL0_CFG_SD | MAL0_CFG_PLBLT);
1067 }
1068
1069 static int
1070 emac_set_filter(struct emac_softc *sc)
1071 {
1072 struct ether_multistep step;
1073 struct ether_multi *enm;
1074 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1075 uint32_t rmr, crc, gaht[4] = {0, 0, 0, 0};
1076 int category, cnt = 0;
1077
1078 rmr = EMAC_READ(sc, EMAC_RMR);
1079 rmr &= ~(RMR_PMME | RMR_MAE);
1080 ifp->if_flags &= ~IFF_ALLMULTI;
1081
1082 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1083 while (enm != NULL) {
1084 if (memcmp(enm->enm_addrlo,
1085 enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1086 /*
1087 * We must listen to a range of multicast addresses.
1088 * For now, just accept all multicasts, rather than
1089 * trying to set only those filter bits needed to match
1090 * the range. (At this time, the only use of address
1091 * ranges is for IP multicast routing, for which the
1092 * range is big enough to require all bits set.)
1093 */
1094 gaht[0] = gaht[1] = gaht[2] = gaht[3] = 0xffff;
1095 break;
1096 }
1097
1098 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1099
1100 /* Just want the 6 most significant bits. */
1101 category = crc >> 26;
1102 EMAC_SET_FILTER(gaht, category);
1103
1104 ETHER_NEXT_MULTI(step, enm);
1105 cnt++;
1106 }
1107
1108 if ((gaht[0] & gaht[1] & gaht[2] & gaht[3]) == 0xffff) {
1109 /* All categories are true. */
1110 ifp->if_flags |= IFF_ALLMULTI;
1111 rmr |= RMR_PMME;
1112 } else if (cnt != 0) {
1113 /* Some categories are true. */
1114 EMAC_WRITE(sc, EMAC_GAHT1, gaht[0]);
1115 EMAC_WRITE(sc, EMAC_GAHT2, gaht[1]);
1116 EMAC_WRITE(sc, EMAC_GAHT3, gaht[2]);
1117 EMAC_WRITE(sc, EMAC_GAHT4, gaht[3]);
1118
1119 rmr |= RMR_MAE;
1120 }
1121 EMAC_WRITE(sc, EMAC_RMR, rmr);
1122
1123 return 0;
1124 }
1125
1126 /*
1127 * EMAC General interrupt handler
1128 */
1129 static int
1130 emac_intr(void *arg)
1131 {
1132 struct emac_softc *sc = arg;
1133 uint32_t status;
1134
1135 EMAC_EVCNT_INCR(&sc->sc_ev_intr);
1136 status = EMAC_READ(sc, EMAC_ISR);
1137
1138 /* Clear the interrupt status bits. */
1139 EMAC_WRITE(sc, EMAC_ISR, status);
1140
1141 return (0);
1142 }
1143
1144 /*
1145 * EMAC Wake-On-LAN interrupt handler
1146 */
1147 static int
1148 emac_wol_intr(void *arg)
1149 {
1150 struct emac_softc *sc = arg;
1151
1152 EMAC_EVCNT_INCR(&sc->sc_ev_wol);
1153 printf("%s: emac_wol_intr\n", sc->sc_dev.dv_xname);
1154 return (0);
1155 }
1156
1157 /*
1158 * MAL System ERRor interrupt handler
1159 */
1160 static int
1161 emac_serr_intr(void *arg)
1162 {
1163 #ifdef EMAC_EVENT_COUNTERS
1164 struct emac_softc *sc = arg;
1165 #endif
1166 u_int32_t esr;
1167
1168 EMAC_EVCNT_INCR(&sc->sc_ev_serr);
1169 esr = mfdcr(DCR_MAL0_ESR);
1170
1171 /* Clear the interrupt status bits. */
1172 mtdcr(DCR_MAL0_ESR, esr);
1173 return (0);
1174 }
1175
1176 /*
1177 * MAL Transmit End-Of-Buffer interrupt handler.
1178 * NOTE: This shouldn't be called!
1179 */
1180 static int
1181 emac_txeob_intr(void *arg)
1182 {
1183 struct emac_softc *sc = arg;
1184 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1185 int handled;
1186
1187 EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1188 handled = emac_txreap(arg);
1189
1190 /* try to get more packets going */
1191 emac_start(ifp);
1192
1193 return (handled);
1194
1195 }
1196
1197 /*
1198 * Reap completed Tx descriptors.
1199 */
1200 static int
1201 emac_txreap(struct emac_softc *sc)
1202 {
1203 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1204 struct emac_txsoft *txs;
1205 int handled, i;
1206 u_int32_t txstat;
1207
1208 EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1209 handled = 0;
1210
1211 /* Clear the interrupt */
1212 mtdcr(DCR_MAL0_TXEOBISR, mfdcr(DCR_MAL0_TXEOBISR));
1213
1214 ifp->if_flags &= ~IFF_OACTIVE;
1215
1216 /*
1217 * Go through our Tx list and free mbufs for those
1218 * frames that have been transmitted.
1219 */
1220 for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1221 i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1222 txs = &sc->sc_txsoft[i];
1223
1224 EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1225 txs->txs_dmamap->dm_nsegs,
1226 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1227
1228 txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1229 if (txstat & MAL_TX_READY)
1230 break;
1231
1232 handled = 1;
1233
1234 /*
1235 * Check for errors and collisions.
1236 */
1237 if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1238 ifp->if_oerrors++;
1239
1240 #ifdef EMAC_EVENT_COUNTERS
1241 if (txstat & EMAC_TXS_UR)
1242 EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1243 #endif /* EMAC_EVENT_COUNTERS */
1244
1245 if (txstat & (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1246 if (txstat & EMAC_TXS_EC)
1247 ifp->if_collisions += 16;
1248 else if (txstat & EMAC_TXS_MC)
1249 ifp->if_collisions += 2; /* XXX? */
1250 else if (txstat & EMAC_TXS_SC)
1251 ifp->if_collisions++;
1252 if (txstat & EMAC_TXS_LC)
1253 ifp->if_collisions++;
1254 } else
1255 ifp->if_opackets++;
1256
1257 if (ifp->if_flags & IFF_DEBUG) {
1258 if (txstat & EMAC_TXS_ED)
1259 printf("%s: excessive deferral\n",
1260 sc->sc_dev.dv_xname);
1261 if (txstat & EMAC_TXS_EC)
1262 printf("%s: excessive collisions\n",
1263 sc->sc_dev.dv_xname);
1264 }
1265
1266 sc->sc_txfree += txs->txs_ndesc;
1267 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1268 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1269 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1270 m_freem(txs->txs_mbuf);
1271 txs->txs_mbuf = NULL;
1272 }
1273
1274 /* Update the dirty transmit buffer pointer. */
1275 sc->sc_txsdirty = i;
1276
1277 /*
1278 * If there are no more pending transmissions, cancel the watchdog
1279 * timer.
1280 */
1281 if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1282 ifp->if_timer = 0;
1283
1284 return (handled);
1285 }
1286
1287 /*
1288 * MAL Receive End-Of-Buffer interrupt handler
1289 */
1290 static int
1291 emac_rxeob_intr(void *arg)
1292 {
1293 struct emac_softc *sc = arg;
1294 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1295 struct emac_rxsoft *rxs;
1296 struct mbuf *m;
1297 u_int32_t rxstat;
1298 int i, len;
1299
1300 EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1301
1302 /* Clear the interrupt */
1303 mtdcr(DCR_MAL0_RXEOBISR, mfdcr(DCR_MAL0_RXEOBISR));
1304
1305 for (i = sc->sc_rxptr;; i = EMAC_NEXTRX(i)) {
1306 rxs = &sc->sc_rxsoft[i];
1307
1308 EMAC_CDRXSYNC(sc, i,
1309 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1310
1311 rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1312
1313 if (rxstat & MAL_RX_EMPTY)
1314 /*
1315 * We have processed all of the receive buffers.
1316 */
1317 break;
1318
1319 /*
1320 * If an error occurred, update stats, clear the status
1321 * word, and leave the packet buffer in place. It will
1322 * simply be reused the next time the ring comes around.
1323 */
1324 if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1325 EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1326 EMAC_RXS_IRE)) {
1327 #define PRINTERR(bit, str) \
1328 if (rxstat & (bit)) \
1329 printf("%s: receive error: %s\n", \
1330 sc->sc_dev.dv_xname, str)
1331 ifp->if_ierrors++;
1332 PRINTERR(EMAC_RXS_OE, "overrun error");
1333 PRINTERR(EMAC_RXS_BP, "bad packet");
1334 PRINTERR(EMAC_RXS_RP, "runt packet");
1335 PRINTERR(EMAC_RXS_SE, "short event");
1336 PRINTERR(EMAC_RXS_AE, "alignment error");
1337 PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1338 PRINTERR(EMAC_RXS_PTL, "packet too long");
1339 PRINTERR(EMAC_RXS_ORE, "out of range error");
1340 PRINTERR(EMAC_RXS_IRE, "in range error");
1341 #undef PRINTERR
1342 EMAC_INIT_RXDESC(sc, i);
1343 continue;
1344 }
1345
1346 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1347 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1348
1349 /*
1350 * No errors; receive the packet. Note, the 405GP emac
1351 * includes the CRC with every packet.
1352 */
1353 len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN;
1354
1355 /*
1356 * If the packet is small enough to fit in a
1357 * single header mbuf, allocate one and copy
1358 * the data into it. This greatly reduces
1359 * memory consumption when we receive lots
1360 * of small packets.
1361 *
1362 * Otherwise, we add a new buffer to the receive
1363 * chain. If this fails, we drop the packet and
1364 * recycle the old buffer.
1365 */
1366 if (emac_copy_small != 0 && len <= MHLEN) {
1367 MGETHDR(m, M_DONTWAIT, MT_DATA);
1368 if (m == NULL)
1369 goto dropit;
1370 memcpy(mtod(m, void *),
1371 mtod(rxs->rxs_mbuf, void *), len);
1372 EMAC_INIT_RXDESC(sc, i);
1373 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1374 rxs->rxs_dmamap->dm_mapsize,
1375 BUS_DMASYNC_PREREAD);
1376 } else {
1377 m = rxs->rxs_mbuf;
1378 if (emac_add_rxbuf(sc, i) != 0) {
1379 dropit:
1380 ifp->if_ierrors++;
1381 EMAC_INIT_RXDESC(sc, i);
1382 bus_dmamap_sync(sc->sc_dmat,
1383 rxs->rxs_dmamap, 0,
1384 rxs->rxs_dmamap->dm_mapsize,
1385 BUS_DMASYNC_PREREAD);
1386 continue;
1387 }
1388 }
1389
1390 ifp->if_ipackets++;
1391 m->m_pkthdr.rcvif = ifp;
1392 m->m_pkthdr.len = m->m_len = len;
1393
1394 #if NBPFILTER > 0
1395 /*
1396 * Pass this up to any BPF listeners, but only
1397 * pass if up the stack if it's for us.
1398 */
1399 if (ifp->if_bpf)
1400 bpf_mtap(ifp->if_bpf, m);
1401 #endif /* NBPFILTER > 0 */
1402
1403 /* Pass it on. */
1404 (*ifp->if_input)(ifp, m);
1405 }
1406
1407 /* Update the receive pointer. */
1408 sc->sc_rxptr = i;
1409
1410 return (0);
1411 }
1412
1413 /*
1414 * MAL Transmit Descriptor Error interrupt handler
1415 */
1416 static int
1417 emac_txde_intr(void *arg)
1418 {
1419 struct emac_softc *sc = arg;
1420
1421 EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1422 printf("%s: emac_txde_intr\n", sc->sc_dev.dv_xname);
1423 return (0);
1424 }
1425
1426 /*
1427 * MAL Receive Descriptor Error interrupt handler
1428 */
1429 static int
1430 emac_rxde_intr(void *arg)
1431 {
1432 int i;
1433 struct emac_softc *sc = arg;
1434
1435 EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1436 printf("%s: emac_rxde_intr\n", sc->sc_dev.dv_xname);
1437 /*
1438 * XXX!
1439 * This is a bit drastic; we just drop all descriptors that aren't
1440 * "clean". We should probably send any that are up the stack.
1441 */
1442 for (i = 0; i < EMAC_NRXDESC; i++) {
1443 EMAC_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1444
1445 if (sc->sc_rxdescs[i].md_data_len != MCLBYTES) {
1446 EMAC_INIT_RXDESC(sc, i);
1447 }
1448
1449 }
1450
1451 /* Reenable the receive channel */
1452 mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
1453
1454 /* Clear the interrupt */
1455 mtdcr(DCR_MAL0_RXDEIR, mfdcr(DCR_MAL0_RXDEIR));
1456
1457 return (0);
1458 }
1459
1460 static uint32_t
1461 emac_mii_wait(struct emac_softc *sc)
1462 {
1463 int i;
1464 uint32_t reg;
1465
1466 /* wait for PHY data transfer to complete */
1467 i = 0;
1468 while ((reg = EMAC_READ(sc, EMAC_STACR) & STACR_OC) == 0) {
1469 delay(7);
1470 if (i++ > 5) {
1471 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1472 return (0);
1473 }
1474 }
1475 return (reg);
1476 }
1477
1478 static int
1479 emac_mii_readreg(struct device *self, int phy, int reg)
1480 {
1481 struct emac_softc *sc = (struct emac_softc *)self;
1482 uint32_t sta_reg;
1483
1484 /* wait for PHY data transfer to complete */
1485 if (emac_mii_wait(sc) == 0)
1486 return (0);
1487
1488 sta_reg = reg << STACR_PRASHIFT;
1489 sta_reg |= STACR_READ;
1490 sta_reg |= phy << STACR_PCDASHIFT;
1491
1492 sta_reg &= ~STACR_OPBC_MASK;
1493 sta_reg |= STACR_OPBC_50MHZ;
1494
1495
1496 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1497
1498 if ((sta_reg = emac_mii_wait(sc)) == 0)
1499 return (0);
1500 sta_reg = EMAC_READ(sc, EMAC_STACR);
1501 if ((sta_reg & STACR_PHYE) != 0)
1502 return (0);
1503 return (sta_reg >> STACR_PHYDSHIFT);
1504 }
1505
1506 static void
1507 emac_mii_writereg(struct device *self, int phy, int reg, int val)
1508 {
1509 struct emac_softc *sc = (struct emac_softc *)self;
1510 uint32_t sta_reg;
1511
1512 /* wait for PHY data transfer to complete */
1513 if (emac_mii_wait(sc) == 0)
1514 return;
1515
1516 sta_reg = reg << STACR_PRASHIFT;
1517 sta_reg |= STACR_WRITE;
1518 sta_reg |= phy << STACR_PCDASHIFT;
1519
1520 sta_reg &= ~STACR_OPBC_MASK;
1521 sta_reg |= STACR_OPBC_50MHZ;
1522
1523 sta_reg |= val << STACR_PHYDSHIFT;
1524
1525 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1526
1527 if ((sta_reg = emac_mii_wait(sc)) == 0)
1528 return;
1529 if ((sta_reg & STACR_PHYE) != 0)
1530 /* error */
1531 return;
1532 }
1533
1534 static void
1535 emac_mii_statchg(struct device *self)
1536 {
1537 struct emac_softc *sc = (void *)self;
1538
1539 if (sc->sc_mii.mii_media_active & IFM_FDX)
1540 sc->sc_mr1 |= MR1_FDE;
1541 else
1542 sc->sc_mr1 &= ~(MR1_FDE | MR1_EIFC);
1543
1544 /* XXX 802.1x flow-control? */
1545
1546 /*
1547 * MR1 can only be written immediately after a reset...
1548 */
1549 emac_reset(sc);
1550 }
1551
1552 static void
1553 emac_mii_tick(void *arg)
1554 {
1555 struct emac_softc *sc = arg;
1556 int s;
1557
1558 if (!device_is_active(&sc->sc_dev))
1559 return;
1560
1561 s = splnet();
1562 mii_tick(&sc->sc_mii);
1563 splx(s);
1564
1565 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1566 }
1567
1568 /* ifmedia interface function */
1569 static void
1570 emac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1571 {
1572 struct emac_softc *sc = ifp->if_softc;
1573
1574 mii_pollstat(&sc->sc_mii);
1575
1576 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1577 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1578 }
1579
1580 /* ifmedia interface function */
1581 static int
1582 emac_mediachange(struct ifnet *ifp)
1583 {
1584 struct emac_softc *sc = ifp->if_softc;
1585
1586 if (ifp->if_flags & IFF_UP)
1587 mii_mediachg(&sc->sc_mii);
1588 return (0);
1589 }
1590