if_emac.c revision 1.14 1 /* $NetBSD: if_emac.c,v 1.14 2003/07/04 02:34:47 thorpej Exp $ */
2
3 /*
4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/kernel.h>
44 #include <sys/socket.h>
45 #include <sys/ioctl.h>
46
47 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
48
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_ether.h>
53
54 #if NBPFILTER > 0
55 #include <net/bpf.h>
56 #endif
57
58 #include <powerpc/ibm4xx/dev/opbvar.h>
59
60 #include <powerpc/ibm4xx/ibm405gp.h>
61 #include <powerpc/ibm4xx/mal405gp.h>
62 #include <powerpc/ibm4xx/dcr405gp.h>
63 #include <powerpc/ibm4xx/dev/emacreg.h>
64 #include <powerpc/ibm4xx/dev/if_emacreg.h>
65
66 #include <dev/mii/miivar.h>
67
68 /*
69 * Transmit descriptor list size. There are two Tx channels, each with
70 * up to 256 hardware descriptors available. We currently use one Tx
71 * channel. We tell the upper layers that they can queue a lot of
72 * packets, and we go ahead and manage up to 64 of them at a time. We
73 * allow up to 16 DMA segments per packet.
74 */
75 #define EMAC_NTXSEGS 16
76 #define EMAC_TXQUEUELEN 64
77 #define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1)
78 #define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4)
79 #define EMAC_NTXDESC 256
80 #define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1)
81 #define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK)
82 #define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK)
83
84 /*
85 * Receive descriptor list size. There is one Rx channel with up to 256
86 * hardware descriptors available. We allocate 64 receive descriptors,
87 * each with a 2k buffer (MCLBYTES).
88 */
89 #define EMAC_NRXDESC 64
90 #define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1)
91 #define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK)
92 #define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK)
93
94 /*
95 * Transmit/receive descriptors that are DMA'd to the EMAC.
96 */
97 struct emac_control_data {
98 struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
99 struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
100 };
101
102 #define EMAC_CDOFF(x) offsetof(struct emac_control_data, x)
103 #define EMAC_CDTXOFF(x) EMAC_CDOFF(ecd_txdesc[(x)])
104 #define EMAC_CDRXOFF(x) EMAC_CDOFF(ecd_rxdesc[(x)])
105
106 /*
107 * Software state for transmit jobs.
108 */
109 struct emac_txsoft {
110 struct mbuf *txs_mbuf; /* head of mbuf chain */
111 bus_dmamap_t txs_dmamap; /* our DMA map */
112 int txs_firstdesc; /* first descriptor in packet */
113 int txs_lastdesc; /* last descriptor in packet */
114 int txs_ndesc; /* # of descriptors used */
115 };
116
117 /*
118 * Software state for receive descriptors.
119 */
120 struct emac_rxsoft {
121 struct mbuf *rxs_mbuf; /* head of mbuf chain */
122 bus_dmamap_t rxs_dmamap; /* our DMA map */
123 };
124
125 /*
126 * Software state per device.
127 */
128 struct emac_softc {
129 struct device sc_dev; /* generic device information */
130 bus_space_tag_t sc_st; /* bus space tag */
131 bus_space_handle_t sc_sh; /* bus space handle */
132 bus_dma_tag_t sc_dmat; /* bus DMA tag */
133 struct ethercom sc_ethercom; /* ethernet common data */
134 void *sc_sdhook; /* shutdown hook */
135 void *sc_powerhook; /* power management hook */
136
137 struct mii_data sc_mii; /* MII/media information */
138 struct callout sc_callout; /* tick callout */
139
140 u_int32_t sc_mr1; /* copy of Mode Register 1 */
141
142 bus_dmamap_t sc_cddmamap; /* control data dma map */
143 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
144
145 /* Software state for transmit/receive descriptors. */
146 struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
147 struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
148
149 /* Control data structures. */
150 struct emac_control_data *sc_control_data;
151 #define sc_txdescs sc_control_data->ecd_txdesc
152 #define sc_rxdescs sc_control_data->ecd_rxdesc
153
154 #ifdef EMAC_EVENT_COUNTERS
155 struct evcnt sc_ev_rxintr; /* Rx interrupts */
156 struct evcnt sc_ev_txintr; /* Tx interrupts */
157 struct evcnt sc_ev_rxde; /* Rx descriptor interrupts */
158 struct evcnt sc_ev_txde; /* Tx descriptor interrupts */
159 struct evcnt sc_ev_wol; /* Wake-On-Lan interrupts */
160 struct evcnt sc_ev_serr; /* MAL system error interrupts */
161 struct evcnt sc_ev_intr; /* General EMAC interrupts */
162
163 struct evcnt sc_ev_txreap; /* Calls to Tx descriptor reaper */
164 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
165 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
166 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
167 struct evcnt sc_ev_tu; /* Tx underrun */
168 #endif /* EMAC_EVENT_COUNTERS */
169
170 int sc_txfree; /* number of free Tx descriptors */
171 int sc_txnext; /* next ready Tx descriptor */
172
173 int sc_txsfree; /* number of free Tx jobs */
174 int sc_txsnext; /* next ready Tx job */
175 int sc_txsdirty; /* dirty Tx jobs */
176
177 int sc_rxptr; /* next ready RX descriptor/descsoft */
178 };
179
180 #ifdef EMAC_EVENT_COUNTERS
181 #define EMAC_EVCNT_INCR(ev) (ev)->ev_count++
182 #else
183 #define EMAC_EVCNT_INCR(ev) /* nothing */
184 #endif
185
186 #define EMAC_CDTXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDTXOFF((x)))
187 #define EMAC_CDRXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDRXOFF((x)))
188
189 #define EMAC_CDTXSYNC(sc, x, n, ops) \
190 do { \
191 int __x, __n; \
192 \
193 __x = (x); \
194 __n = (n); \
195 \
196 /* If it will wrap around, sync to the end of the ring. */ \
197 if ((__x + __n) > EMAC_NTXDESC) { \
198 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
199 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * \
200 (EMAC_NTXDESC - __x), (ops)); \
201 __n -= (EMAC_NTXDESC - __x); \
202 __x = 0; \
203 } \
204 \
205 /* Now sync whatever is left. */ \
206 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
207 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
208 } while (/*CONSTCOND*/0)
209
210 #define EMAC_CDRXSYNC(sc, x, ops) \
211 do { \
212 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
213 EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops)); \
214 } while (/*CONSTCOND*/0)
215
216 #define EMAC_INIT_RXDESC(sc, x) \
217 do { \
218 struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
219 struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)]; \
220 struct mbuf *__m = __rxs->rxs_mbuf; \
221 \
222 /* \
223 * Note: We scoot the packet forward 2 bytes in the buffer \
224 * so that the payload after the Ethernet header is aligned \
225 * to a 4-byte boundary. \
226 */ \
227 __m->m_data = __m->m_ext.ext_buf + 2; \
228 \
229 __rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2; \
230 __rxd->md_data_len = __m->m_ext.ext_size - 2; \
231 __rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT | \
232 /* Set wrap on last descriptor. */ \
233 (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0); \
234 EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
235 } while (/*CONSTCOND*/0)
236
237 #define EMAC_WRITE(sc, reg, val) \
238 bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
239 #define EMAC_READ(sc, reg) \
240 bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
241
242 static int emac_match(struct device *, struct cfdata *, void *);
243 static void emac_attach(struct device *, struct device *, void *);
244
245 static int emac_add_rxbuf(struct emac_softc *, int);
246 static int emac_init(struct ifnet *);
247 static int emac_ioctl(struct ifnet *, u_long, caddr_t);
248 static void emac_reset(struct emac_softc *);
249 static void emac_rxdrain(struct emac_softc *);
250 static int emac_txreap(struct emac_softc *);
251 static void emac_shutdown(void *);
252 static void emac_start(struct ifnet *);
253 static void emac_stop(struct ifnet *, int);
254 static void emac_watchdog(struct ifnet *);
255
256 static int emac_wol_intr(void *);
257 static int emac_serr_intr(void *);
258 static int emac_txeob_intr(void *);
259 static int emac_rxeob_intr(void *);
260 static int emac_txde_intr(void *);
261 static int emac_rxde_intr(void *);
262 static int emac_intr(void *);
263
264 static int emac_mediachange(struct ifnet *);
265 static void emac_mediastatus(struct ifnet *, struct ifmediareq *);
266 static int emac_mii_readreg(struct device *, int, int);
267 static void emac_mii_statchg(struct device *);
268 static void emac_mii_tick(void *);
269 static uint32_t emac_mii_wait(struct emac_softc *);
270 static void emac_mii_writereg(struct device *, int, int, int);
271
272 int emac_copy_small = 0;
273
274 CFATTACH_DECL(emac, sizeof(struct emac_softc),
275 emac_match, emac_attach, NULL, NULL);
276
277 static int
278 emac_match(struct device *parent, struct cfdata *cf, void *aux)
279 {
280 struct opb_attach_args *oaa = aux;
281
282 /* match only on-chip ethernet devices */
283 if (strcmp(oaa->opb_name, cf->cf_name) == 0)
284 return (1);
285
286 return (0);
287 }
288
289 static void
290 emac_attach(struct device *parent, struct device *self, void *aux)
291 {
292 struct opb_attach_args *oaa = aux;
293 struct emac_softc *sc = (struct emac_softc *)self;
294 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
295 struct mii_data *mii = &sc->sc_mii;
296 bus_dma_segment_t seg;
297 int error, i, nseg;
298 uint8_t enaddr[ETHER_ADDR_LEN];
299
300 sc->sc_st = oaa->opb_bt;
301 sc->sc_sh = oaa->opb_addr;
302 sc->sc_dmat = oaa->opb_dmat;
303
304 printf(": 405GP EMAC\n");
305
306 /*
307 * Set up Mode Register 1 - set receive and transmit FIFOs to maximum
308 * size, allow transmit of multiple packets (only channel 0 is used).
309 *
310 * XXX: Allow pause packets??
311 */
312 sc->sc_mr1 = MR1_RFS_4KB | MR1_TFS_2KB | MR1_TR0_MULTIPLE;
313
314 intr_establish(oaa->opb_irq , IST_LEVEL, IPL_NET, emac_wol_intr, sc);
315 intr_establish(oaa->opb_irq + 1, IST_LEVEL, IPL_NET, emac_serr_intr, sc);
316 intr_establish(oaa->opb_irq + 2, IST_LEVEL, IPL_NET, emac_txeob_intr, sc);
317 intr_establish(oaa->opb_irq + 3, IST_LEVEL, IPL_NET, emac_rxeob_intr, sc);
318 intr_establish(oaa->opb_irq + 4, IST_LEVEL, IPL_NET, emac_txde_intr, sc);
319 intr_establish(oaa->opb_irq + 5, IST_LEVEL, IPL_NET, emac_rxde_intr, sc);
320 intr_establish(oaa->opb_irq + 6, IST_LEVEL, IPL_NET, emac_intr, sc);
321 printf("%s: interrupting at irqs %d .. %d\n", sc->sc_dev.dv_xname,
322 oaa->opb_irq, oaa->opb_irq + 6);
323
324 /*
325 * Allocate the control data structures, and create and load the
326 * DMA map for it.
327 */
328 if ((error = bus_dmamem_alloc(sc->sc_dmat,
329 sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
330 printf("%s: unable to allocate control data, error = %d\n",
331 sc->sc_dev.dv_xname, error);
332 goto fail_0;
333 }
334
335 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
336 sizeof(struct emac_control_data), (caddr_t *)&sc->sc_control_data,
337 BUS_DMA_COHERENT)) != 0) {
338 printf("%s: unable to map control data, error = %d\n",
339 sc->sc_dev.dv_xname, error);
340 goto fail_1;
341 }
342
343 if ((error = bus_dmamap_create(sc->sc_dmat,
344 sizeof(struct emac_control_data), 1,
345 sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
346 printf("%s: unable to create control data DMA map, "
347 "error = %d\n", sc->sc_dev.dv_xname, error);
348 goto fail_2;
349 }
350
351 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
352 sc->sc_control_data, sizeof(struct emac_control_data), NULL,
353 0)) != 0) {
354 printf("%s: unable to load control data DMA map, error = %d\n",
355 sc->sc_dev.dv_xname, error);
356 goto fail_3;
357 }
358
359 /*
360 * Create the transmit buffer DMA maps.
361 */
362 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
363 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
364 EMAC_NTXSEGS, MCLBYTES, 0, 0,
365 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
366 printf("%s: unable to create tx DMA map %d, "
367 "error = %d\n", sc->sc_dev.dv_xname, i, error);
368 goto fail_4;
369 }
370 }
371
372 /*
373 * Create the receive buffer DMA maps.
374 */
375 for (i = 0; i < EMAC_NRXDESC; i++) {
376 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
377 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
378 printf("%s: unable to create rx DMA map %d, "
379 "error = %d\n", sc->sc_dev.dv_xname, i, error);
380 goto fail_5;
381 }
382 sc->sc_rxsoft[i].rxs_mbuf = NULL;
383 }
384
385 /*
386 * Reset the chip to a known state.
387 */
388 emac_reset(sc);
389
390 /* Fetch the Ethernet address. */
391 if (prop_get(dev_propdb, &sc->sc_dev, "mac-addr", enaddr,
392 sizeof(enaddr), NULL) != sizeof(enaddr)) {
393 printf("%s: unable to get mac-addr property\n",
394 sc->sc_dev.dv_xname);
395 return;
396 }
397
398 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
399 ether_sprintf(enaddr));
400
401 /*
402 * Initialise the media structures.
403 */
404 mii->mii_ifp = ifp;
405 mii->mii_readreg = emac_mii_readreg;
406 mii->mii_writereg = emac_mii_writereg;
407 mii->mii_statchg = emac_mii_statchg;
408
409 ifmedia_init(&mii->mii_media, 0, emac_mediachange,
410 emac_mediastatus);
411 mii_attach(&sc->sc_dev, mii, 0xffffffff,
412 MII_PHY_ANY, MII_OFFSET_ANY, 0);
413 if (LIST_FIRST(&mii->mii_phys) == NULL) {
414 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
415 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
416 } else
417 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
418
419 ifp = &sc->sc_ethercom.ec_if;
420 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
421 ifp->if_softc = sc;
422 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
423 ifp->if_ioctl = emac_ioctl;
424 ifp->if_start = emac_start;
425 ifp->if_watchdog = emac_watchdog;
426 ifp->if_init = emac_init;
427 ifp->if_stop = emac_stop;
428 IFQ_SET_READY(&ifp->if_snd);
429
430 /*
431 * We can support 802.1Q VLAN-sized frames.
432 */
433 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
434
435 /*
436 * Attach the interface.
437 */
438 if_attach(ifp);
439 ether_ifattach(ifp, enaddr);
440
441 #ifdef EMAC_EVENT_COUNTERS
442 /*
443 * Attach the event counters.
444 */
445 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
446 NULL, sc->sc_dev.dv_xname, "rxintr");
447 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
448 NULL, sc->sc_dev.dv_xname, "txintr");
449 evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
450 NULL, sc->sc_dev.dv_xname, "rxde");
451 evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
452 NULL, sc->sc_dev.dv_xname, "txde");
453 evcnt_attach_dynamic(&sc->sc_ev_wol, EVCNT_TYPE_INTR,
454 NULL, sc->sc_dev.dv_xname, "wol");
455 evcnt_attach_dynamic(&sc->sc_ev_serr, EVCNT_TYPE_INTR,
456 NULL, sc->sc_dev.dv_xname, "serr");
457 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
458 NULL, sc->sc_dev.dv_xname, "intr");
459
460 evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
461 NULL, sc->sc_dev.dv_xname, "txreap");
462 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
463 NULL, sc->sc_dev.dv_xname, "txsstall");
464 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
465 NULL, sc->sc_dev.dv_xname, "txdstall");
466 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
467 NULL, sc->sc_dev.dv_xname, "txdrop");
468 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
469 NULL, sc->sc_dev.dv_xname, "tu");
470 #endif /* EMAC_EVENT_COUNTERS */
471
472 /*
473 * Make sure the interface is shutdown during reboot.
474 */
475 sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
476 if (sc->sc_sdhook == NULL)
477 printf("%s: WARNING: unable to establish shutdown hook\n",
478 sc->sc_dev.dv_xname);
479
480 return;
481
482 /*
483 * Free any resources we've allocated during the failed attach
484 * attempt. Do this in reverse order and fall through.
485 */
486 fail_5:
487 for (i = 0; i < EMAC_NRXDESC; i++) {
488 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
489 bus_dmamap_destroy(sc->sc_dmat,
490 sc->sc_rxsoft[i].rxs_dmamap);
491 }
492 fail_4:
493 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
494 if (sc->sc_txsoft[i].txs_dmamap != NULL)
495 bus_dmamap_destroy(sc->sc_dmat,
496 sc->sc_txsoft[i].txs_dmamap);
497 }
498 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
499 fail_3:
500 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
501 fail_2:
502 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
503 sizeof(struct emac_control_data));
504 fail_1:
505 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
506 fail_0:
507 return;
508 }
509
510 /*
511 * Device shutdown routine.
512 */
513 static void
514 emac_shutdown(void *arg)
515 {
516 struct emac_softc *sc = arg;
517
518 emac_stop(&sc->sc_ethercom.ec_if, 0);
519 }
520
521 /* ifnet interface function */
522 static void
523 emac_start(struct ifnet *ifp)
524 {
525 struct emac_softc *sc = ifp->if_softc;
526 struct mbuf *m0;
527 struct emac_txsoft *txs;
528 bus_dmamap_t dmamap;
529 int error, firsttx, nexttx, lasttx, ofree, seg;
530
531 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
532 return;
533
534 /*
535 * Remember the previous number of free descriptors.
536 */
537 ofree = sc->sc_txfree;
538
539 /*
540 * Loop through the send queue, setting up transmit descriptors
541 * until we drain the queue, or use up all available transmit
542 * descriptors.
543 */
544 for (;;) {
545 /* Grab a packet off the queue. */
546 IFQ_POLL(&ifp->if_snd, m0);
547 if (m0 == NULL)
548 break;
549
550 /*
551 * Get a work queue entry. Reclaim used Tx descriptors if
552 * we are running low.
553 */
554 if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
555 emac_txreap(sc);
556 if (sc->sc_txsfree == 0) {
557 EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
558 break;
559 }
560 }
561
562 txs = &sc->sc_txsoft[sc->sc_txsnext];
563 dmamap = txs->txs_dmamap;
564
565 /*
566 * Load the DMA map. If this fails, the packet either
567 * didn't fit in the alloted number of segments, or we
568 * were short on resources. In this case, we'll copy
569 * and try again.
570 */
571 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
572 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
573 if (error) {
574 if (error == EFBIG) {
575 EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
576 printf("%s: Tx packet consumes too many "
577 "DMA segments, dropping...\n",
578 sc->sc_dev.dv_xname);
579 IFQ_DEQUEUE(&ifp->if_snd, m0);
580 m_freem(m0);
581 continue;
582 }
583 /* Short on resources, just stop for now. */
584 break;
585 }
586
587 /*
588 * Ensure we have enough descriptors free to describe
589 * the packet.
590 */
591 if (dmamap->dm_nsegs > sc->sc_txfree) {
592 /*
593 * Not enough free descriptors to transmit this
594 * packet. We haven't committed anything yet,
595 * so just unload the DMA map, put the packet
596 * back on the queue, and punt. Notify the upper
597 * layer that there are not more slots left.
598 *
599 */
600 ifp->if_flags |= IFF_OACTIVE;
601 bus_dmamap_unload(sc->sc_dmat, dmamap);
602 EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
603 break;
604 }
605
606 IFQ_DEQUEUE(&ifp->if_snd, m0);
607
608 /*
609 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
610 */
611
612 /* Sync the DMA map. */
613 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
614 BUS_DMASYNC_PREWRITE);
615
616 /*
617 * Store a pointer to the packet so that we can free it
618 * later.
619 */
620 txs->txs_mbuf = m0;
621 txs->txs_firstdesc = sc->sc_txnext;
622 txs->txs_ndesc = dmamap->dm_nsegs;
623
624 /*
625 * Initialize the transmit descriptor.
626 */
627 firsttx = sc->sc_txnext;
628 for (nexttx = sc->sc_txnext, seg = 0;
629 seg < dmamap->dm_nsegs;
630 seg++, nexttx = EMAC_NEXTTX(nexttx)) {
631 /*
632 * If this is the first descriptor we're
633 * enqueueing, don't set the TX_READY bit just
634 * yet. That could cause a race condition.
635 * We'll do it below.
636 */
637 sc->sc_txdescs[nexttx].md_data =
638 dmamap->dm_segs[seg].ds_addr;
639 sc->sc_txdescs[nexttx].md_data_len =
640 dmamap->dm_segs[seg].ds_len;
641 sc->sc_txdescs[nexttx].md_stat_ctrl =
642 (sc->sc_txdescs[nexttx].md_stat_ctrl & MAL_TX_WRAP) |
643 (nexttx == firsttx ? 0 : MAL_TX_READY) |
644 EMAC_TXC_GFCS | EMAC_TXC_GPAD;
645 lasttx = nexttx;
646 }
647
648 /* Set the LAST bit on the last segment. */
649 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
650
651 txs->txs_lastdesc = lasttx;
652
653 /* Sync the descriptors we're using. */
654 EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
656
657 /*
658 * The entire packet chain is set up. Give the
659 * first descriptor to the chip now.
660 */
661 sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
662 EMAC_CDTXSYNC(sc, firsttx, 1,
663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
664 /*
665 * Tell the EMAC that a new packet is available.
666 */
667 EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0);
668
669 /* Advance the tx pointer. */
670 sc->sc_txfree -= txs->txs_ndesc;
671 sc->sc_txnext = nexttx;
672
673 sc->sc_txsfree--;
674 sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
675
676 #if NBPFILTER > 0
677 /*
678 * Pass the packet to any BPF listeners.
679 */
680 if (ifp->if_bpf)
681 bpf_mtap(ifp->if_bpf, m0);
682 #endif /* NBPFILTER > 0 */
683 }
684
685 if (txs == NULL || sc->sc_txfree == 0) {
686 /* No more slots left; notify upper layer. */
687 ifp->if_flags |= IFF_OACTIVE;
688 }
689
690 if (sc->sc_txfree != ofree) {
691 /* Set a watchdog timer in case the chip flakes out. */
692 ifp->if_timer = 5;
693 }
694 }
695
696 static int
697 emac_init(struct ifnet *ifp)
698 {
699 struct emac_softc *sc = ifp->if_softc;
700 struct emac_rxsoft *rxs;
701 uint8_t *enaddr = LLADDR(ifp->if_sadl);
702 int error, i;
703
704 error = 0;
705
706 /* Cancel any pending I/O. */
707 emac_stop(ifp, 0);
708
709 /* Reset the chip to a known state. */
710 emac_reset(sc);
711
712 /*
713 * Initialise the transmit descriptor ring.
714 */
715 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
716 /* set wrap on last descriptor */
717 sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
718 EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
719 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
720 sc->sc_txfree = EMAC_NTXDESC;
721 sc->sc_txnext = 0;
722
723 /*
724 * Initialise the transmit job descriptors.
725 */
726 for (i = 0; i < EMAC_TXQUEUELEN; i++)
727 sc->sc_txsoft[i].txs_mbuf = NULL;
728 sc->sc_txsfree = EMAC_TXQUEUELEN;
729 sc->sc_txsnext = 0;
730 sc->sc_txsdirty = 0;
731
732 /*
733 * Initialise the receiver descriptor and receive job
734 * descriptor rings.
735 */
736 for (i = 0; i < EMAC_NRXDESC; i++) {
737 rxs = &sc->sc_rxsoft[i];
738 if (rxs->rxs_mbuf == NULL) {
739 if ((error = emac_add_rxbuf(sc, i)) != 0) {
740 printf("%s: unable to allocate or map rx "
741 "buffer %d, error = %d\n",
742 sc->sc_dev.dv_xname, i, error);
743 /*
744 * XXX Should attempt to run with fewer receive
745 * XXX buffers instead of just failing.
746 */
747 emac_rxdrain(sc);
748 goto out;
749 }
750 } else
751 EMAC_INIT_RXDESC(sc, i);
752 }
753 sc->sc_rxptr = 0;
754
755 /*
756 * Set the current media.
757 */
758 mii_mediachg(&sc->sc_mii);
759
760 /*
761 * Give the transmit and receive rings to the MAL.
762 */
763 mtdcr(DCR_MAL0_TXCTP0R, EMAC_CDTXADDR(sc, 0));
764 mtdcr(DCR_MAL0_RXCTP0R, EMAC_CDRXADDR(sc, 0));
765
766 /*
767 * Load the MAC address.
768 */
769 EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
770 EMAC_WRITE(sc, EMAC_IALR,
771 enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
772
773 /*
774 * Set the receive channel buffer size (in units of 16 bytes).
775 */
776 #if MCLBYTES > (4096 - 16) /* XXX! */
777 # error MCLBYTES > max rx channel buffer size
778 #endif
779 mtdcr(DCR_MAL0_RCBS0, MCLBYTES / 16);
780
781 /* Set fifos, media modes. */
782 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
783
784 /*
785 * Enable Individual and (possibly) Broadcast Address modes,
786 * runt packets, and strip padding.
787 *
788 * XXX: promiscuous mode (and promiscuous multicast mode) need to be
789 * dealt with here!
790 */
791 EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP |
792 (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
793
794 /*
795 * Set low- and urgent-priority request thresholds.
796 */
797 EMAC_WRITE(sc, EMAC_TMR1,
798 ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
799 ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
800 /*
801 * Set Transmit Request Threshold Register.
802 */
803 EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
804
805 /*
806 * Set high and low receive watermarks.
807 */
808 EMAC_WRITE(sc, EMAC_RWMR,
809 30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
810
811 /*
812 * Set frame gap.
813 */
814 EMAC_WRITE(sc, EMAC_IPGVR, 8);
815
816 /*
817 * Set interrupt status enable bits for EMAC and MAL.
818 */
819 EMAC_WRITE(sc, EMAC_ISER,
820 ISR_BP | ISR_SE | ISR_ALE | ISR_BFCS | ISR_PTLE | ISR_ORE | ISR_IRE);
821 mtdcr(DCR_MAL0_IER, MAL0_IER_DE | MAL0_IER_NWE | MAL0_IER_TO |
822 MAL0_IER_OPB | MAL0_IER_PLB);
823
824 /*
825 * Enable the transmit and receive channel on the MAL.
826 */
827 mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
828 mtdcr(DCR_MAL0_TXCASR, MAL0_TXCASR_CHAN0);
829
830 /*
831 * Enable the transmit and receive channel on the EMAC.
832 */
833 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
834
835 /*
836 * Start the one second MII clock.
837 */
838 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
839
840 /*
841 * ... all done!
842 */
843 ifp->if_flags |= IFF_RUNNING;
844 ifp->if_flags &= ~IFF_OACTIVE;
845
846 out:
847 if (error) {
848 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
849 ifp->if_timer = 0;
850 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
851 }
852 return (error);
853 }
854
855 static int
856 emac_add_rxbuf(struct emac_softc *sc, int idx)
857 {
858 struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
859 struct mbuf *m;
860 int error;
861
862 MGETHDR(m, M_DONTWAIT, MT_DATA);
863 if (m == NULL)
864 return (ENOBUFS);
865
866 MCLGET(m, M_DONTWAIT);
867 if ((m->m_flags & M_EXT) == 0) {
868 m_freem(m);
869 return (ENOBUFS);
870 }
871
872 if (rxs->rxs_mbuf != NULL)
873 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
874
875 rxs->rxs_mbuf = m;
876
877 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
878 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
879 if (error) {
880 printf("%s: can't load rx DMA map %d, error = %d\n",
881 sc->sc_dev.dv_xname, idx, error);
882 panic("emac_add_rxbuf"); /* XXX */
883 }
884
885 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
886 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
887
888 EMAC_INIT_RXDESC(sc, idx);
889
890 return (0);
891 }
892
893 /* ifnet interface function */
894 static void
895 emac_watchdog(struct ifnet *ifp)
896 {
897 struct emac_softc *sc = ifp->if_softc;
898
899 /*
900 * Since we're not interrupting every packet, sweep
901 * up before we report an error.
902 */
903 emac_txreap(sc);
904
905 if (sc->sc_txfree != EMAC_NTXDESC) {
906 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
907 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
908 sc->sc_txnext);
909 ifp->if_oerrors++;
910
911 /* Reset the interface. */
912 (void)emac_init(ifp);
913 } else if (ifp->if_flags & IFF_DEBUG)
914 printf("%s: recovered from device timeout\n",
915 sc->sc_dev.dv_xname);
916
917 /* try to get more packets going */
918 emac_start(ifp);
919 }
920
921 static void
922 emac_rxdrain(struct emac_softc *sc)
923 {
924 struct emac_rxsoft *rxs;
925 int i;
926
927 for (i = 0; i < EMAC_NRXDESC; i++) {
928 rxs = &sc->sc_rxsoft[i];
929 if (rxs->rxs_mbuf != NULL) {
930 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
931 m_freem(rxs->rxs_mbuf);
932 rxs->rxs_mbuf = NULL;
933 }
934 }
935 }
936
937 /* ifnet interface function */
938 static void
939 emac_stop(struct ifnet *ifp, int disable)
940 {
941 struct emac_softc *sc = ifp->if_softc;
942 struct emac_txsoft *txs;
943 int i;
944
945 /* Stop the one second clock. */
946 callout_stop(&sc->sc_callout);
947
948 /* Down the MII */
949 mii_down(&sc->sc_mii);
950
951 /* Disable interrupts. */
952 #if 0 /* Can't disable MAL interrupts without a reset... */
953 EMAC_WRITE(sc, EMAC_ISER, 0);
954 #endif
955 mtdcr(DCR_MAL0_IER, 0);
956
957 /* Disable the receive and transmit channels. */
958 mtdcr(DCR_MAL0_RXCARR, MAL0_RXCARR_CHAN0);
959 mtdcr(DCR_MAL0_TXCARR, MAL0_TXCARR_CHAN0 | MAL0_TXCARR_CHAN1);
960
961 /* Disable the transmit enable and receive MACs. */
962 EMAC_WRITE(sc, EMAC_MR0,
963 EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
964
965 /* Release any queued transmit buffers. */
966 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
967 txs = &sc->sc_txsoft[i];
968 if (txs->txs_mbuf != NULL) {
969 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
970 m_freem(txs->txs_mbuf);
971 txs->txs_mbuf = NULL;
972 }
973 }
974
975 if (disable)
976 emac_rxdrain(sc);
977
978 /*
979 * Mark the interface down and cancel the watchdog timer.
980 */
981 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
982 ifp->if_timer = 0;
983 }
984
985 /* ifnet interface function */
986 static int
987 emac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
988 {
989 struct emac_softc *sc = ifp->if_softc;
990 struct ifreq *ifr = (struct ifreq *)data;
991 int s, error;
992
993 s = splnet();
994
995 switch (cmd) {
996 case SIOCSIFMEDIA:
997 case SIOCGIFMEDIA:
998 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
999 break;
1000
1001 default:
1002 error = ether_ioctl(ifp, cmd, data);
1003 if (error == ENETRESET) {
1004 /*
1005 * Multicast list has changed; set the hardware filter
1006 * accordingly.
1007 */
1008 #if 0
1009 error = emac_set_filter(sc); /* XXX not done yet */
1010 #else
1011 error = emac_init(ifp);
1012 #endif
1013 }
1014 break;
1015 }
1016
1017 /* try to get more packets going */
1018 emac_start(ifp);
1019
1020 splx(s);
1021 return (error);
1022 }
1023
1024 static void
1025 emac_reset(struct emac_softc *sc)
1026 {
1027
1028 /* reset the MAL */
1029 mtdcr(DCR_MAL0_CFG, MAL0_CFG_SR);
1030
1031 EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1032 delay(5);
1033
1034 /* XXX: check if MR0_SRST is clear until a timeout instead? */
1035 EMAC_WRITE(sc, EMAC_MR0, EMAC_READ(sc, EMAC_MR0) & ~MR0_SRST);
1036
1037 /* XXX clear interrupts in EMAC_ISR just to be sure?? */
1038
1039 /* set the MAL config register */
1040 mtdcr(DCR_MAL0_CFG, MAL0_CFG_PLBB | MAL0_CFG_OPBBL | MAL0_CFG_LEA |
1041 MAL0_CFG_SD | MAL0_CFG_PLBLT);
1042 }
1043
1044 /*
1045 * EMAC General interrupt handler
1046 */
1047 static int
1048 emac_intr(void *arg)
1049 {
1050 struct emac_softc *sc = arg;
1051 uint32_t status;
1052
1053 EMAC_EVCNT_INCR(&sc->sc_ev_intr);
1054 status = EMAC_READ(sc, EMAC_ISR);
1055
1056 /* Clear the interrupt status bits. */
1057 EMAC_WRITE(sc, EMAC_ISR, status);
1058
1059 return (0);
1060 }
1061
1062 /*
1063 * EMAC Wake-On-LAN interrupt handler
1064 */
1065 static int
1066 emac_wol_intr(void *arg)
1067 {
1068 struct emac_softc *sc = arg;
1069
1070 EMAC_EVCNT_INCR(&sc->sc_ev_wol);
1071 printf("%s: emac_wol_intr\n", sc->sc_dev.dv_xname);
1072 return (0);
1073 }
1074
1075 /*
1076 * MAL System ERRor interrupt handler
1077 */
1078 static int
1079 emac_serr_intr(void *arg)
1080 {
1081 #ifdef EMAC_EVENT_COUNTERS
1082 struct emac_softc *sc = arg;
1083 #endif
1084 u_int32_t esr;
1085
1086 EMAC_EVCNT_INCR(&sc->sc_ev_serr);
1087 esr = mfdcr(DCR_MAL0_ESR);
1088
1089 /* Clear the interrupt status bits. */
1090 mtdcr(DCR_MAL0_ESR, esr);
1091 return (0);
1092 }
1093
1094 /*
1095 * MAL Transmit End-Of-Buffer interrupt handler.
1096 * NOTE: This shouldn't be called!
1097 */
1098 static int
1099 emac_txeob_intr(void *arg)
1100 {
1101 #ifdef EMAC_EVENT_COUNTERS
1102 struct emac_softc *sc = arg;
1103 #endif
1104
1105 EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1106 emac_txreap(arg);
1107
1108 return (0);
1109
1110 }
1111
1112 /*
1113 * Reap completed Tx descriptors.
1114 */
1115 static int
1116 emac_txreap(struct emac_softc *sc)
1117 {
1118 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1119 struct emac_txsoft *txs;
1120 int i;
1121 u_int32_t txstat;
1122
1123 EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1124
1125 /* Clear the interrupt */
1126 mtdcr(DCR_MAL0_TXEOBISR, mfdcr(DCR_MAL0_TXEOBISR));
1127
1128 ifp->if_flags &= ~IFF_OACTIVE;
1129
1130 /*
1131 * Go through our Tx list and free mbufs for those
1132 * frames that have been transmitted.
1133 */
1134 for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1135 i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1136 txs = &sc->sc_txsoft[i];
1137
1138 EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1139 txs->txs_dmamap->dm_nsegs,
1140 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1141
1142 txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1143 if (txstat & MAL_TX_READY)
1144 break;
1145
1146 /*
1147 * Check for errors and collisions.
1148 */
1149 if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1150 ifp->if_oerrors++;
1151
1152 #ifdef EMAC_EVENT_COUNTERS
1153 if (txstat & EMAC_TXS_UR)
1154 EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1155 #endif /* EMAC_EVENT_COUNTERS */
1156
1157 if (txstat & (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1158 if (txstat & EMAC_TXS_EC)
1159 ifp->if_collisions += 16;
1160 else if (txstat & EMAC_TXS_MC)
1161 ifp->if_collisions += 2; /* XXX? */
1162 else if (txstat & EMAC_TXS_SC)
1163 ifp->if_collisions++;
1164 if (txstat & EMAC_TXS_LC)
1165 ifp->if_collisions++;
1166 } else
1167 ifp->if_opackets++;
1168
1169 if (ifp->if_flags & IFF_DEBUG) {
1170 if (txstat & EMAC_TXS_ED)
1171 printf("%s: excessive deferral\n",
1172 sc->sc_dev.dv_xname);
1173 if (txstat & EMAC_TXS_EC)
1174 printf("%s: excessive collisions\n",
1175 sc->sc_dev.dv_xname);
1176 }
1177
1178 sc->sc_txfree += txs->txs_ndesc;
1179 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1180 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1181 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1182 m_freem(txs->txs_mbuf);
1183 txs->txs_mbuf = NULL;
1184 }
1185
1186 /* Update the dirty transmit buffer pointer. */
1187 sc->sc_txsdirty = i;
1188
1189 /*
1190 * If there are no more pending transmissions, cancel the watchdog
1191 * timer.
1192 */
1193 if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1194 ifp->if_timer = 0;
1195
1196 return (0);
1197 }
1198
1199 /*
1200 * MAL Receive End-Of-Buffer interrupt handler
1201 */
1202 static int
1203 emac_rxeob_intr(void *arg)
1204 {
1205 struct emac_softc *sc = arg;
1206 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1207 struct emac_rxsoft *rxs;
1208 struct mbuf *m;
1209 u_int32_t rxstat;
1210 int i, len;
1211
1212 EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1213
1214 /* Clear the interrupt */
1215 mtdcr(DCR_MAL0_RXEOBISR, mfdcr(DCR_MAL0_RXEOBISR));
1216
1217 for (i = sc->sc_rxptr;; i = EMAC_NEXTRX(i)) {
1218 rxs = &sc->sc_rxsoft[i];
1219
1220 EMAC_CDRXSYNC(sc, i,
1221 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1222
1223 rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1224
1225 if (rxstat & MAL_RX_EMPTY)
1226 /*
1227 * We have processed all of the receive buffers.
1228 */
1229 break;
1230
1231 /*
1232 * If an error occurred, update stats, clear the status
1233 * word, and leave the packet buffer in place. It will
1234 * simply be reused the next time the ring comes around.
1235 */
1236 if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1237 EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1238 EMAC_RXS_IRE)) {
1239 #define PRINTERR(bit, str) \
1240 if (rxstat & (bit)) \
1241 printf("%s: receive error: %s\n", \
1242 sc->sc_dev.dv_xname, str)
1243 ifp->if_ierrors++;
1244 PRINTERR(EMAC_RXS_OE, "overrun error");
1245 PRINTERR(EMAC_RXS_BP, "bad packet");
1246 PRINTERR(EMAC_RXS_RP, "runt packet");
1247 PRINTERR(EMAC_RXS_SE, "short event");
1248 PRINTERR(EMAC_RXS_AE, "alignment error");
1249 PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1250 PRINTERR(EMAC_RXS_PTL, "packet too long");
1251 PRINTERR(EMAC_RXS_ORE, "out of range error");
1252 PRINTERR(EMAC_RXS_IRE, "in range error");
1253 #undef PRINTERR
1254 EMAC_INIT_RXDESC(sc, i);
1255 continue;
1256 }
1257
1258 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1259 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1260
1261 /*
1262 * No errors; receive the packet. Note, the 405GP emac
1263 * includes the CRC with every packet.
1264 */
1265 len = sc->sc_rxdescs[i].md_data_len;
1266
1267 /*
1268 * If the packet is small enough to fit in a
1269 * single header mbuf, allocate one and copy
1270 * the data into it. This greatly reduces
1271 * memory consumption when we receive lots
1272 * of small packets.
1273 *
1274 * Otherwise, we add a new buffer to the receive
1275 * chain. If this fails, we drop the packet and
1276 * recycle the old buffer.
1277 */
1278 if (emac_copy_small != 0 && len <= MHLEN) {
1279 MGETHDR(m, M_DONTWAIT, MT_DATA);
1280 if (m == NULL)
1281 goto dropit;
1282 memcpy(mtod(m, caddr_t),
1283 mtod(rxs->rxs_mbuf, caddr_t), len);
1284 EMAC_INIT_RXDESC(sc, i);
1285 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1286 rxs->rxs_dmamap->dm_mapsize,
1287 BUS_DMASYNC_PREREAD);
1288 } else {
1289 m = rxs->rxs_mbuf;
1290 if (emac_add_rxbuf(sc, i) != 0) {
1291 dropit:
1292 ifp->if_ierrors++;
1293 EMAC_INIT_RXDESC(sc, i);
1294 bus_dmamap_sync(sc->sc_dmat,
1295 rxs->rxs_dmamap, 0,
1296 rxs->rxs_dmamap->dm_mapsize,
1297 BUS_DMASYNC_PREREAD);
1298 continue;
1299 }
1300 }
1301
1302 ifp->if_ipackets++;
1303 m->m_flags |= M_HASFCS;
1304 m->m_pkthdr.rcvif = ifp;
1305 m->m_pkthdr.len = m->m_len = len;
1306
1307 #if NBPFILTER > 0
1308 /*
1309 * Pass this up to any BPF listeners, but only
1310 * pass if up the stack if it's for us.
1311 */
1312 if (ifp->if_bpf)
1313 bpf_mtap(ifp->if_bpf, m);
1314 #endif /* NBPFILTER > 0 */
1315
1316 /* Pass it on. */
1317 (*ifp->if_input)(ifp, m);
1318 }
1319
1320 /* Update the receive pointer. */
1321 sc->sc_rxptr = i;
1322
1323 return (0);
1324 }
1325
1326 /*
1327 * MAL Transmit Descriptor Error interrupt handler
1328 */
1329 static int
1330 emac_txde_intr(void *arg)
1331 {
1332 struct emac_softc *sc = arg;
1333
1334 EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1335 printf("%s: emac_txde_intr\n", sc->sc_dev.dv_xname);
1336 return (0);
1337 }
1338
1339 /*
1340 * MAL Receive Descriptor Error interrupt handler
1341 */
1342 static int
1343 emac_rxde_intr(void *arg)
1344 {
1345 int i;
1346 struct emac_softc *sc = arg;
1347
1348 EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1349 printf("%s: emac_rxde_intr\n", sc->sc_dev.dv_xname);
1350 /*
1351 * XXX!
1352 * This is a bit drastic; we just drop all descriptors that aren't
1353 * "clean". We should probably send any that are up the stack.
1354 */
1355 for (i = 0; i < EMAC_NRXDESC; i++) {
1356 EMAC_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1357
1358 if (sc->sc_rxdescs[i].md_data_len != MCLBYTES) {
1359 EMAC_INIT_RXDESC(sc, i);
1360 }
1361
1362 }
1363
1364 /* Reenable the receive channel */
1365 mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
1366
1367 /* Clear the interrupt */
1368 mtdcr(DCR_MAL0_RXDEIR, mfdcr(DCR_MAL0_RXDEIR));
1369
1370 return (0);
1371 }
1372
1373 static uint32_t
1374 emac_mii_wait(struct emac_softc *sc)
1375 {
1376 int i;
1377 uint32_t reg;
1378
1379 /* wait for PHY data transfer to complete */
1380 i = 0;
1381 while ((reg = EMAC_READ(sc, EMAC_STACR) & STACR_OC) == 0) {
1382 delay(7);
1383 if (i++ > 5) {
1384 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1385 return (0);
1386 }
1387 }
1388 return (reg);
1389 }
1390
1391 static int
1392 emac_mii_readreg(struct device *self, int phy, int reg)
1393 {
1394 struct emac_softc *sc = (struct emac_softc *)self;
1395 uint32_t sta_reg;
1396
1397 /* wait for PHY data transfer to complete */
1398 if (emac_mii_wait(sc) == 0)
1399 return (0);
1400
1401 sta_reg = reg << STACR_PRASHIFT;
1402 sta_reg |= STACR_READ;
1403 sta_reg |= phy << STACR_PCDASHIFT;
1404
1405 sta_reg &= ~STACR_OPBC_MASK;
1406 sta_reg |= STACR_OPBC_50MHZ;
1407
1408
1409 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1410
1411 if ((sta_reg = emac_mii_wait(sc)) == 0)
1412 return (0);
1413 sta_reg = EMAC_READ(sc, EMAC_STACR);
1414 if ((sta_reg & STACR_PHYE) != 0)
1415 return (0);
1416 return (sta_reg >> STACR_PHYDSHIFT);
1417 }
1418
1419 static void
1420 emac_mii_writereg(struct device *self, int phy, int reg, int val)
1421 {
1422 struct emac_softc *sc = (struct emac_softc *)self;
1423 uint32_t sta_reg;
1424
1425 /* wait for PHY data transfer to complete */
1426 if (emac_mii_wait(sc) == 0)
1427 return;
1428
1429 sta_reg = reg << STACR_PRASHIFT;
1430 sta_reg |= STACR_WRITE;
1431 sta_reg |= phy << STACR_PCDASHIFT;
1432
1433 sta_reg &= ~STACR_OPBC_MASK;
1434 sta_reg |= STACR_OPBC_50MHZ;
1435
1436 sta_reg |= val << STACR_PHYDSHIFT;
1437
1438 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1439
1440 if ((sta_reg = emac_mii_wait(sc)) == 0)
1441 return;
1442 if ((sta_reg & STACR_PHYE) != 0)
1443 /* error */
1444 return;
1445 }
1446
1447 static void
1448 emac_mii_statchg(struct device *self)
1449 {
1450 struct emac_softc *sc = (void *)self;
1451
1452 if (sc->sc_mii.mii_media_active & IFM_FDX)
1453 sc->sc_mr1 |= MR1_FDE;
1454 else
1455 sc->sc_mr1 &= ~(MR1_FDE | MR1_EIFC);
1456
1457 /* XXX 802.1x flow-control? */
1458
1459 /*
1460 * MR1 can only be written immediately after a reset...
1461 */
1462 emac_reset(sc);
1463 }
1464
1465 static void
1466 emac_mii_tick(void *arg)
1467 {
1468 struct emac_softc *sc = arg;
1469 int s;
1470
1471 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1472 return;
1473
1474 s = splnet();
1475 mii_tick(&sc->sc_mii);
1476 splx(s);
1477
1478 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1479 }
1480
1481 /* ifmedia interface function */
1482 static void
1483 emac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1484 {
1485 struct emac_softc *sc = ifp->if_softc;
1486
1487 mii_pollstat(&sc->sc_mii);
1488
1489 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1490 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1491 }
1492
1493 /* ifmedia interface function */
1494 static int
1495 emac_mediachange(struct ifnet *ifp)
1496 {
1497 struct emac_softc *sc = ifp->if_softc;
1498
1499 if (ifp->if_flags & IFF_UP)
1500 mii_mediachg(&sc->sc_mii);
1501 return (0);
1502 }
1503