if_emac.c revision 1.6 1 /* $NetBSD: if_emac.c,v 1.6 2002/08/13 04:57:49 simonb Exp $ */
2
3 /*
4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/kernel.h>
44 #include <sys/socket.h>
45 #include <sys/ioctl.h>
46
47 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
48
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_ether.h>
53
54 #if NBPFILTER > 0
55 #include <net/bpf.h>
56 #endif
57
58 #include <powerpc/ibm4xx/dev/opbvar.h>
59
60 #include <powerpc/ibm4xx/ibm405gp.h>
61 #include <powerpc/ibm4xx/mal405gp.h>
62 #include <powerpc/ibm4xx/dcr405gp.h>
63 #include <powerpc/ibm4xx/emacreg.h>
64 #include <powerpc/ibm4xx/dev/if_emacreg.h>
65
66 #include <dev/mii/miivar.h>
67
68 /*
69 * Transmit descriptor list size. There are two Tx channels, each with
70 * up to 256 hardware descriptors available. We currently use one Tx
71 * channel. We tell the upper layers that they can queue a lot of
72 * packets, and we go ahead and manage up to 64 of them at a time. We
73 * allow up to 16 DMA segments per packet.
74 */
75 #define EMAC_NTXSEGS 16
76 #define EMAC_TXQUEUELEN 64
77 #define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1)
78 #define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4)
79 #define EMAC_NTXDESC 256
80 #define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1)
81 #define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK)
82 #define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK)
83
84 /*
85 * Receive descriptor list size. There is one Rx channel with up to 256
86 * hardware descriptors available. We allocate 64 receive descriptors,
87 * each with a 2k buffer (MCLBYTES).
88 */
89 #define EMAC_NRXDESC 64
90 #define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1)
91 #define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK)
92 #define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK)
93
94 /*
95 * Transmit/receive descriptors that are DMA'd to the EMAC.
96 */
97 struct emac_control_data {
98 struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
99 struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
100 };
101
102 #define EMAC_CDOFF(x) offsetof(struct emac_control_data, x)
103 #define EMAC_CDTXOFF(x) EMAC_CDOFF(ecd_txdesc[(x)])
104 #define EMAC_CDRXOFF(x) EMAC_CDOFF(ecd_rxdesc[(x)])
105
106 /*
107 * Software state for transmit jobs.
108 */
109 struct emac_txsoft {
110 struct mbuf *txs_mbuf; /* head of mbuf chain */
111 bus_dmamap_t txs_dmamap; /* our DMA map */
112 int txs_firstdesc; /* first descriptor in packet */
113 int txs_lastdesc; /* last descriptor in packet */
114 int txs_ndesc; /* # of descriptors used */
115 };
116
117 /*
118 * Software state for receive descriptors.
119 */
120 struct emac_rxsoft {
121 struct mbuf *rxs_mbuf; /* head of mbuf chain */
122 bus_dmamap_t rxs_dmamap; /* our DMA map */
123 };
124
125 /*
126 * Software state per device.
127 */
128 struct emac_softc {
129 struct device sc_dev; /* generic device information */
130 bus_space_tag_t sc_st; /* bus space tag */
131 bus_space_handle_t sc_sh; /* bus space handle */
132 bus_dma_tag_t sc_dmat; /* bus DMA tag */
133 struct ethercom sc_ethercom; /* ethernet common data */
134 void *sc_sdhook; /* shutdown hook */
135 void *sc_powerhook; /* power management hook */
136
137 struct mii_data sc_mii; /* MII/media information */
138 struct callout sc_callout; /* tick callout */
139
140 u_int32_t sc_mr1; /* copy of Mode Register 1 */
141
142 bus_dmamap_t sc_cddmamap; /* control data dma map */
143 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
144
145 /* Software state for transmit/receive descriptors. */
146 struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
147 struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
148
149 /* Control data structures. */
150 struct emac_control_data *sc_control_data;
151 #define sc_txdescs sc_control_data->ecd_txdesc
152 #define sc_rxdescs sc_control_data->ecd_rxdesc
153
154 #ifdef EMAC_EVENT_COUNTERS
155 struct evcnt sc_ev_rxintr; /* Rx interrupts */
156 struct evcnt sc_ev_txintr; /* Tx interrupts */
157 struct evcnt sc_ev_rxde; /* Rx descriptor interrupts */
158 struct evcnt sc_ev_txde; /* Tx descriptor interrupts */
159 struct evcnt sc_ev_wol; /* Wake-On-Lan interrupts */
160 struct evcnt sc_ev_serr; /* MAL system error interrupts */
161 struct evcnt sc_ev_intr; /* General EMAC interrupts */
162
163 struct evcnt sc_ev_txreap; /* Calls to Tx descriptor reaper */
164 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
165 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
166 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
167 struct evcnt sc_ev_tu; /* Tx underrun */
168 #endif /* EMAC_EVENT_COUNTERS */
169
170 int sc_txfree; /* number of free Tx descriptors */
171 int sc_txnext; /* next ready Tx descriptor */
172
173 int sc_txsfree; /* number of free Tx jobs */
174 int sc_txsnext; /* next ready Tx job */
175 int sc_txsdirty; /* dirty Tx jobs */
176
177 int sc_rxptr; /* next ready RX descriptor/descsoft */
178 };
179
180 #ifdef EMAC_EVENT_COUNTERS
181 #define EMAC_EVCNT_INCR(ev) (ev)->ev_count++
182 #else
183 #define EMAC_EVCNT_INCR(ev) /* nothing */
184 #endif
185
186 #define EMAC_CDTXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDTXOFF((x)))
187 #define EMAC_CDRXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDRXOFF((x)))
188
189 #define EMAC_CDTXSYNC(sc, x, n, ops) \
190 do { \
191 int __x, __n; \
192 \
193 __x = (x); \
194 __n = (n); \
195 \
196 /* If it will wrap around, sync to the end of the ring. */ \
197 if ((__x + __n) > EMAC_NTXDESC) { \
198 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
199 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * \
200 (EMAC_NTXDESC - __x), (ops)); \
201 __n -= (EMAC_NTXDESC - __x); \
202 __x = 0; \
203 } \
204 \
205 /* Now sync whatever is left. */ \
206 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
207 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
208 } while (/*CONSTCOND*/0)
209
210 #define EMAC_CDRXSYNC(sc, x, ops) \
211 do { \
212 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
213 EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops)); \
214 } while (/*CONSTCOND*/0)
215
216 #define EMAC_INIT_RXDESC(sc, x) \
217 do { \
218 struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
219 struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)]; \
220 struct mbuf *__m = __rxs->rxs_mbuf; \
221 \
222 /* \
223 * Note: We scoot the packet forward 2 bytes in the buffer \
224 * so that the payload after the Ethernet header is aligned \
225 * to a 4-byte boundary. \
226 */ \
227 __m->m_data = __m->m_ext.ext_buf + 2; \
228 \
229 __rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2; \
230 __rxd->md_data_len = __m->m_ext.ext_size - 2; \
231 __rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT | \
232 /* Set wrap on last descriptor. */ \
233 (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0); \
234 EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
235 } while (/*CONSTCOND*/0)
236
237 #define EMAC_WRITE(sc, reg, val) \
238 bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
239 #define EMAC_READ(sc, reg) \
240 bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
241
242 static int emac_match(struct device *, struct cfdata *, void *);
243 static void emac_attach(struct device *, struct device *, void *);
244
245 static int emac_add_rxbuf(struct emac_softc *, int);
246 static int emac_init(struct ifnet *);
247 static int emac_ioctl(struct ifnet *, u_long, caddr_t);
248 static void emac_reset(struct emac_softc *);
249 static void emac_rxdrain(struct emac_softc *);
250 static int emac_txreap(struct emac_softc *);
251 static void emac_shutdown(void *);
252 static void emac_start(struct ifnet *);
253 static void emac_stop(struct ifnet *, int);
254 static void emac_watchdog(struct ifnet *);
255
256 static int emac_wol_intr(void *);
257 static int emac_serr_intr(void *);
258 static int emac_txeob_intr(void *);
259 static int emac_rxeob_intr(void *);
260 static int emac_txde_intr(void *);
261 static int emac_rxde_intr(void *);
262 static int emac_intr(void *);
263
264 static int emac_mediachange(struct ifnet *);
265 static void emac_mediastatus(struct ifnet *, struct ifmediareq *);
266 static int emac_mii_readreg(struct device *, int, int);
267 static void emac_mii_statchg(struct device *);
268 static void emac_mii_tick(void *);
269 static uint32_t emac_mii_wait(struct emac_softc *);
270 static void emac_mii_writereg(struct device *, int, int, int);
271
272 int emac_copy_small = 0;
273
274 struct cfattach emac_ca = {
275 sizeof(struct emac_softc), emac_match, emac_attach
276 };
277
278 static int
279 emac_match(struct device *parent, struct cfdata *cf, void *aux)
280 {
281 struct opb_attach_args *oaa = aux;
282
283 /* match only on-chip ethernet devices */
284 if (strcmp(oaa->opb_name, cf->cf_driver->cd_name) == 0)
285 return (1);
286
287 return (0);
288 }
289
290 static void
291 emac_attach(struct device *parent, struct device *self, void *aux)
292 {
293 struct opb_attach_args *oaa = aux;
294 struct emac_softc *sc = (struct emac_softc *)self;
295 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
296 struct mii_data *mii = &sc->sc_mii;
297 bus_dma_segment_t seg;
298 int error, i, nseg;
299
300 sc->sc_st = galaxy_make_bus_space_tag(0, 0);
301 sc->sc_sh = oaa->opb_addr;
302 sc->sc_dmat = oaa->opb_dmat;
303
304 printf(": 405GP EMAC\n");
305
306 /*
307 * Set up Mode Register 1 - set receive and transmit FIFOs to maximum
308 * size, allow transmit of multiple packets (only channel 0 is used).
309 *
310 * XXX: Allow pause packets??
311 */
312 sc->sc_mr1 = MR1_RFS_4KB | MR1_TFS_2KB | MR1_TR0_MULTIPLE;
313
314 intr_establish(oaa->opb_irq , IST_LEVEL, IPL_NET, emac_wol_intr, sc);
315 intr_establish(oaa->opb_irq + 1, IST_LEVEL, IPL_NET, emac_serr_intr, sc);
316 intr_establish(oaa->opb_irq + 2, IST_LEVEL, IPL_NET, emac_txeob_intr, sc);
317 intr_establish(oaa->opb_irq + 3, IST_LEVEL, IPL_NET, emac_rxeob_intr, sc);
318 intr_establish(oaa->opb_irq + 4, IST_LEVEL, IPL_NET, emac_txde_intr, sc);
319 intr_establish(oaa->opb_irq + 5, IST_LEVEL, IPL_NET, emac_rxde_intr, sc);
320 intr_establish(oaa->opb_irq + 6, IST_LEVEL, IPL_NET, emac_intr, sc);
321 printf("%s: interrupting at irqs %d .. %d\n", sc->sc_dev.dv_xname,
322 oaa->opb_irq, oaa->opb_irq + 6);
323
324 /*
325 * Allocate the control data structures, and create and load the
326 * DMA map for it.
327 */
328 if ((error = bus_dmamem_alloc(sc->sc_dmat,
329 sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
330 printf("%s: unable to allocate control data, error = %d\n",
331 sc->sc_dev.dv_xname, error);
332 goto fail_0;
333 }
334
335 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
336 sizeof(struct emac_control_data), (caddr_t *)&sc->sc_control_data,
337 BUS_DMA_COHERENT)) != 0) {
338 printf("%s: unable to map control data, error = %d\n",
339 sc->sc_dev.dv_xname, error);
340 goto fail_1;
341 }
342
343 if ((error = bus_dmamap_create(sc->sc_dmat,
344 sizeof(struct emac_control_data), 1,
345 sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
346 printf("%s: unable to create control data DMA map, "
347 "error = %d\n", sc->sc_dev.dv_xname, error);
348 goto fail_2;
349 }
350
351 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
352 sc->sc_control_data, sizeof(struct emac_control_data), NULL,
353 0)) != 0) {
354 printf("%s: unable to load control data DMA map, error = %d\n",
355 sc->sc_dev.dv_xname, error);
356 goto fail_3;
357 }
358
359 /*
360 * Create the transmit buffer DMA maps.
361 */
362 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
363 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
364 EMAC_NTXSEGS, MCLBYTES, 0, 0,
365 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
366 printf("%s: unable to create tx DMA map %d, "
367 "error = %d\n", sc->sc_dev.dv_xname, i, error);
368 goto fail_4;
369 }
370 }
371
372 /*
373 * Create the receive buffer DMA maps.
374 */
375 for (i = 0; i < EMAC_NRXDESC; i++) {
376 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
377 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
378 printf("%s: unable to create rx DMA map %d, "
379 "error = %d\n", sc->sc_dev.dv_xname, i, error);
380 goto fail_5;
381 }
382 sc->sc_rxsoft[i].rxs_mbuf = NULL;
383 }
384
385 /*
386 * Reset the chip to a known state.
387 */
388 emac_reset(sc);
389
390 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
391 ether_sprintf(board_data.mac_address_local));
392
393 /*
394 * Initialise the media structures.
395 */
396 mii->mii_ifp = ifp;
397 mii->mii_readreg = emac_mii_readreg;
398 mii->mii_writereg = emac_mii_writereg;
399 mii->mii_statchg = emac_mii_statchg;
400
401 ifmedia_init(&mii->mii_media, 0, emac_mediachange,
402 emac_mediastatus);
403 mii_attach(&sc->sc_dev, mii, 0xffffffff,
404 MII_PHY_ANY, MII_OFFSET_ANY, 0);
405 if (LIST_FIRST(&mii->mii_phys) == NULL) {
406 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
407 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
408 } else
409 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
410
411 ifp = &sc->sc_ethercom.ec_if;
412 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
413 ifp->if_softc = sc;
414 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
415 ifp->if_ioctl = emac_ioctl;
416 ifp->if_start = emac_start;
417 ifp->if_watchdog = emac_watchdog;
418 ifp->if_init = emac_init;
419 ifp->if_stop = emac_stop;
420 IFQ_SET_READY(&ifp->if_snd);
421
422 /*
423 * We can support 802.1Q VLAN-sized frames.
424 */
425 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
426
427 /*
428 * Attach the interface.
429 */
430 if_attach(ifp);
431 ether_ifattach(ifp, board_data.mac_address_local);
432
433 #ifdef EMAC_EVENT_COUNTERS
434 /*
435 * Attach the event counters.
436 */
437 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
438 NULL, sc->sc_dev.dv_xname, "rxintr");
439 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
440 NULL, sc->sc_dev.dv_xname, "txintr");
441 evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
442 NULL, sc->sc_dev.dv_xname, "rxde");
443 evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
444 NULL, sc->sc_dev.dv_xname, "txde");
445 evcnt_attach_dynamic(&sc->sc_ev_wol, EVCNT_TYPE_INTR,
446 NULL, sc->sc_dev.dv_xname, "wol");
447 evcnt_attach_dynamic(&sc->sc_ev_serr, EVCNT_TYPE_INTR,
448 NULL, sc->sc_dev.dv_xname, "serr");
449 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
450 NULL, sc->sc_dev.dv_xname, "intr");
451
452 evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
453 NULL, sc->sc_dev.dv_xname, "txreap");
454 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
455 NULL, sc->sc_dev.dv_xname, "txsstall");
456 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
457 NULL, sc->sc_dev.dv_xname, "txdstall");
458 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
459 NULL, sc->sc_dev.dv_xname, "txdrop");
460 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
461 NULL, sc->sc_dev.dv_xname, "tu");
462 #endif /* EMAC_EVENT_COUNTERS */
463
464 /*
465 * Make sure the interface is shutdown during reboot.
466 */
467 sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
468 if (sc->sc_sdhook == NULL)
469 printf("%s: WARNING: unable to establish shutdown hook\n",
470 sc->sc_dev.dv_xname);
471
472 return;
473
474 /*
475 * Free any resources we've allocated during the failed attach
476 * attempt. Do this in reverse order and fall through.
477 */
478 fail_5:
479 for (i = 0; i < EMAC_NRXDESC; i++) {
480 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
481 bus_dmamap_destroy(sc->sc_dmat,
482 sc->sc_rxsoft[i].rxs_dmamap);
483 }
484 fail_4:
485 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
486 if (sc->sc_txsoft[i].txs_dmamap != NULL)
487 bus_dmamap_destroy(sc->sc_dmat,
488 sc->sc_txsoft[i].txs_dmamap);
489 }
490 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
491 fail_3:
492 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
493 fail_2:
494 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
495 sizeof(struct emac_control_data));
496 fail_1:
497 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
498 fail_0:
499 return;
500 }
501
502 /*
503 * Device shutdown routine.
504 */
505 static void
506 emac_shutdown(void *arg)
507 {
508 struct emac_softc *sc = arg;
509
510 emac_stop(&sc->sc_ethercom.ec_if, 0);
511 }
512
513 /* ifnet interface function */
514 static void
515 emac_start(struct ifnet *ifp)
516 {
517 struct emac_softc *sc = ifp->if_softc;
518 struct mbuf *m0;
519 struct emac_txsoft *txs;
520 bus_dmamap_t dmamap;
521 int error, firsttx, nexttx, lasttx, ofree, seg;
522
523 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
524 return;
525
526 /*
527 * Remember the previous number of free descriptors.
528 */
529 ofree = sc->sc_txfree;
530
531 /*
532 * Loop through the send queue, setting up transmit descriptors
533 * until we drain the queue, or use up all available transmit
534 * descriptors.
535 */
536 for (;;) {
537 /* Grab a packet off the queue. */
538 IFQ_POLL(&ifp->if_snd, m0);
539 if (m0 == NULL)
540 break;
541
542 /*
543 * Get a work queue entry. Reclaim used Tx descriptors if
544 * we are running low.
545 */
546 if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
547 emac_txreap(sc);
548 if (sc->sc_txsfree == 0) {
549 EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
550 break;
551 }
552 }
553
554 txs = &sc->sc_txsoft[sc->sc_txsnext];
555 dmamap = txs->txs_dmamap;
556
557 /*
558 * Load the DMA map. If this fails, the packet either
559 * didn't fit in the alloted number of segments, or we
560 * were short on resources. In this case, we'll copy
561 * and try again.
562 */
563 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
564 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
565 if (error) {
566 if (error == EFBIG) {
567 EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
568 printf("%s: Tx packet consumes too many "
569 "DMA segments, dropping...\n",
570 sc->sc_dev.dv_xname);
571 IFQ_DEQUEUE(&ifp->if_snd, m0);
572 m_freem(m0);
573 continue;
574 }
575 /* Short on resources, just stop for now. */
576 break;
577 }
578
579 /*
580 * Ensure we have enough descriptors free to describe
581 * the packet.
582 */
583 if (dmamap->dm_nsegs > sc->sc_txfree) {
584 /*
585 * Not enough free descriptors to transmit this
586 * packet. We haven't committed anything yet,
587 * so just unload the DMA map, put the packet
588 * back on the queue, and punt. Notify the upper
589 * layer that there are not more slots left.
590 *
591 */
592 ifp->if_flags |= IFF_OACTIVE;
593 bus_dmamap_unload(sc->sc_dmat, dmamap);
594 EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
595 break;
596 }
597
598 IFQ_DEQUEUE(&ifp->if_snd, m0);
599
600 /*
601 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
602 */
603
604 /* Sync the DMA map. */
605 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
606 BUS_DMASYNC_PREWRITE);
607
608 /*
609 * Store a pointer to the packet so that we can free it
610 * later.
611 */
612 txs->txs_mbuf = m0;
613 txs->txs_firstdesc = sc->sc_txnext;
614 txs->txs_ndesc = dmamap->dm_nsegs;
615
616 /*
617 * Initialize the transmit descriptor.
618 */
619 firsttx = sc->sc_txnext;
620 for (nexttx = sc->sc_txnext, seg = 0;
621 seg < dmamap->dm_nsegs;
622 seg++, nexttx = EMAC_NEXTTX(nexttx)) {
623 /*
624 * If this is the first descriptor we're
625 * enqueueing, don't set the TX_READY bit just
626 * yet. That could cause a race condition.
627 * We'll do it below.
628 */
629 sc->sc_txdescs[nexttx].md_data =
630 dmamap->dm_segs[seg].ds_addr;
631 sc->sc_txdescs[nexttx].md_data_len =
632 dmamap->dm_segs[seg].ds_len;
633 sc->sc_txdescs[nexttx].md_stat_ctrl =
634 (sc->sc_txdescs[nexttx].md_stat_ctrl & MAL_TX_WRAP) |
635 (nexttx == firsttx ? 0 : MAL_TX_READY) |
636 EMAC_TXC_GFCS | EMAC_TXC_GPAD;
637 lasttx = nexttx;
638 }
639
640 /* Set the LAST bit on the last segment. */
641 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
642
643 txs->txs_lastdesc = lasttx;
644
645 /* Sync the descriptors we're using. */
646 EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
647 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
648
649 /*
650 * The entire packet chain is set up. Give the
651 * first descriptor to the chip now.
652 */
653 sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
654 EMAC_CDTXSYNC(sc, firsttx, 1,
655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
656 /*
657 * Tell the EMAC that a new packet is available.
658 */
659 EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0);
660
661 /* Advance the tx pointer. */
662 sc->sc_txfree -= txs->txs_ndesc;
663 sc->sc_txnext = nexttx;
664
665 sc->sc_txsfree--;
666 sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
667
668 #if NBPFILTER > 0
669 /*
670 * Pass the packet to any BPF listeners.
671 */
672 if (ifp->if_bpf)
673 bpf_mtap(ifp->if_bpf, m0);
674 #endif /* NBPFILTER > 0 */
675 }
676
677 if (txs == NULL || sc->sc_txfree == 0) {
678 /* No more slots left; notify upper layer. */
679 ifp->if_flags |= IFF_OACTIVE;
680 }
681
682 if (sc->sc_txfree != ofree) {
683 /* Set a watchdog timer in case the chip flakes out. */
684 ifp->if_timer = 5;
685 }
686 }
687
688 static int
689 emac_init(struct ifnet *ifp)
690 {
691 struct emac_softc *sc = ifp->if_softc;
692 struct emac_rxsoft *rxs;
693 unsigned char *enaddr = board_data.mac_address_local;
694 int error, i;
695
696 error = 0;
697
698 /* Cancel any pending I/O. */
699 emac_stop(ifp, 0);
700
701 /* Reset the chip to a known state. */
702 emac_reset(sc);
703
704 /*
705 * Initialise the transmit descriptor ring.
706 */
707 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
708 /* set wrap on last descriptor */
709 sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
710 EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
711 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712 sc->sc_txfree = EMAC_NTXDESC;
713 sc->sc_txnext = 0;
714
715 /*
716 * Initialise the transmit job descriptors.
717 */
718 for (i = 0; i < EMAC_TXQUEUELEN; i++)
719 sc->sc_txsoft[i].txs_mbuf = NULL;
720 sc->sc_txsfree = EMAC_TXQUEUELEN;
721 sc->sc_txsnext = 0;
722 sc->sc_txsdirty = 0;
723
724 /*
725 * Initialise the receiver descriptor and receive job
726 * descriptor rings.
727 */
728 for (i = 0; i < EMAC_NRXDESC; i++) {
729 rxs = &sc->sc_rxsoft[i];
730 if (rxs->rxs_mbuf == NULL) {
731 if ((error = emac_add_rxbuf(sc, i)) != 0) {
732 printf("%s: unable to allocate or map rx "
733 "buffer %d, error = %d\n",
734 sc->sc_dev.dv_xname, i, error);
735 /*
736 * XXX Should attempt to run with fewer receive
737 * XXX buffers instead of just failing.
738 */
739 emac_rxdrain(sc);
740 goto out;
741 }
742 } else
743 EMAC_INIT_RXDESC(sc, i);
744 }
745 sc->sc_rxptr = 0;
746
747 /*
748 * Set the current media.
749 */
750 mii_mediachg(&sc->sc_mii);
751
752 /*
753 * Give the transmit and receive rings to the MAL.
754 */
755 mtdcr(DCR_MAL0_TXCTP0R, EMAC_CDTXADDR(sc, 0));
756 mtdcr(DCR_MAL0_RXCTP0R, EMAC_CDRXADDR(sc, 0));
757
758 /*
759 * Load the MAC address.
760 */
761 EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
762 EMAC_WRITE(sc, EMAC_IALR,
763 enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
764
765 /*
766 * Set the receive channel buffer size (in units of 16 bytes).
767 */
768 #if MCLBYTES > (4096 - 16) /* XXX! */
769 # error MCLBYTES > max rx channel buffer size
770 #endif
771 mtdcr(DCR_MAL0_RCBS0, MCLBYTES / 16);
772
773 /* Set fifos, media modes. */
774 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
775
776 /*
777 * Enable Individual and (possibly) Broadcast Address modes,
778 * runt packets, and strip padding.
779 *
780 * XXX: promiscuous mode (and promiscuous multicast mode) need to be
781 * dealt with here!
782 */
783 EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP |
784 (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
785
786 /*
787 * Set low- and urgent-priority request thresholds.
788 */
789 EMAC_WRITE(sc, EMAC_TMR1,
790 ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
791 ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
792 /*
793 * Set Transmit Request Threshold Register.
794 */
795 EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
796
797 /*
798 * Set high and low receive watermarks.
799 */
800 EMAC_WRITE(sc, EMAC_RWMR,
801 30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
802
803 /*
804 * Set frame gap.
805 */
806 EMAC_WRITE(sc, EMAC_IPGVR, 8);
807
808 /*
809 * Set interrupt status enable bits for EMAC and MAL.
810 */
811 EMAC_WRITE(sc, EMAC_ISER,
812 ISR_BP | ISR_SE | ISR_ALE | ISR_BFCS | ISR_PTLE | ISR_ORE | ISR_IRE);
813 mtdcr(DCR_MAL0_IER, MAL0_IER_DE | MAL0_IER_NWE | MAL0_IER_TO |
814 MAL0_IER_OPB | MAL0_IER_PLB);
815
816 /*
817 * Enable the transmit and receive channel on the MAL.
818 */
819 mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
820 mtdcr(DCR_MAL0_TXCASR, MAL0_TXCASR_CHAN0);
821
822 /*
823 * Enable the transmit and receive channel on the EMAC.
824 */
825 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
826
827 /*
828 * Start the one second MII clock.
829 */
830 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
831
832 /*
833 * ... all done!
834 */
835 ifp->if_flags |= IFF_RUNNING;
836 ifp->if_flags &= ~IFF_OACTIVE;
837
838 out:
839 if (error) {
840 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
841 ifp->if_timer = 0;
842 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
843 }
844 return (error);
845 }
846
847 static int
848 emac_add_rxbuf(struct emac_softc *sc, int idx)
849 {
850 struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
851 struct mbuf *m;
852 int error;
853
854 MGETHDR(m, M_DONTWAIT, MT_DATA);
855 if (m == NULL)
856 return (ENOBUFS);
857
858 MCLGET(m, M_DONTWAIT);
859 if ((m->m_flags & M_EXT) == 0) {
860 m_freem(m);
861 return (ENOBUFS);
862 }
863
864 if (rxs->rxs_mbuf != NULL)
865 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
866
867 rxs->rxs_mbuf = m;
868
869 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
870 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
871 if (error) {
872 printf("%s: can't load rx DMA map %d, error = %d\n",
873 sc->sc_dev.dv_xname, idx, error);
874 panic("emac_add_rxbuf"); /* XXX */
875 }
876
877 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
878 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
879
880 EMAC_INIT_RXDESC(sc, idx);
881
882 return (0);
883 }
884
885 /* ifnet interface function */
886 static void
887 emac_watchdog(struct ifnet *ifp)
888 {
889 struct emac_softc *sc = ifp->if_softc;
890
891 /*
892 * Since we're not interrupting every packet, sweep
893 * up before we report an error.
894 */
895 emac_txreap(sc);
896
897 if (sc->sc_txfree != EMAC_NTXDESC) {
898 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
899 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
900 sc->sc_txnext);
901 ifp->if_oerrors++;
902
903 /* Reset the interface. */
904 (void)emac_init(ifp);
905 } else if (ifp->if_flags & IFF_DEBUG)
906 printf("%s: recovered from device timeout\n",
907 sc->sc_dev.dv_xname);
908
909 /* try to get more packets going */
910 emac_start(ifp);
911 }
912
913 static void
914 emac_rxdrain(struct emac_softc *sc)
915 {
916 struct emac_rxsoft *rxs;
917 int i;
918
919 for (i = 0; i < EMAC_NRXDESC; i++) {
920 rxs = &sc->sc_rxsoft[i];
921 if (rxs->rxs_mbuf != NULL) {
922 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
923 m_freem(rxs->rxs_mbuf);
924 rxs->rxs_mbuf = NULL;
925 }
926 }
927 }
928
929 /* ifnet interface function */
930 static void
931 emac_stop(struct ifnet *ifp, int disable)
932 {
933 struct emac_softc *sc = ifp->if_softc;
934 struct emac_txsoft *txs;
935 int i;
936
937 /* Stop the one second clock. */
938 callout_stop(&sc->sc_callout);
939
940 /* Down the MII */
941 mii_down(&sc->sc_mii);
942
943 /* Disable interrupts. */
944 #if 0 /* Can't disable MAL interrupts without a reset... */
945 EMAC_WRITE(sc, EMAC_ISER, 0);
946 #endif
947 mtdcr(DCR_MAL0_IER, 0);
948
949 /* Disable the receive and transmit channels. */
950 mtdcr(DCR_MAL0_RXCARR, MAL0_RXCARR_CHAN0);
951 mtdcr(DCR_MAL0_TXCARR, MAL0_TXCARR_CHAN0 | MAL0_TXCARR_CHAN1);
952
953 /* Disable the transmit enable and receive MACs. */
954 EMAC_WRITE(sc, EMAC_MR0,
955 EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
956
957 /* Release any queued transmit buffers. */
958 for (i = 0; i < EMAC_TXQUEUELEN; i++) {
959 txs = &sc->sc_txsoft[i];
960 if (txs->txs_mbuf != NULL) {
961 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
962 m_freem(txs->txs_mbuf);
963 txs->txs_mbuf = NULL;
964 }
965 }
966
967 if (disable)
968 emac_rxdrain(sc);
969
970 /*
971 * Mark the interface down and cancel the watchdog timer.
972 */
973 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
974 ifp->if_timer = 0;
975 }
976
977 /* ifnet interface function */
978 static int
979 emac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
980 {
981 struct emac_softc *sc = ifp->if_softc;
982 struct ifreq *ifr = (struct ifreq *)data;
983 int s, error;
984
985 s = splnet();
986
987 switch (cmd) {
988 case SIOCSIFMEDIA:
989 case SIOCGIFMEDIA:
990 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
991 break;
992
993 default:
994 error = ether_ioctl(ifp, cmd, data);
995 if (error == ENETRESET) {
996 /*
997 * Multicast list has changed; set the hardware filter
998 * accordingly.
999 */
1000 #if 0
1001 error = emac_set_filter(sc); /* XXX not done yet */
1002 #else
1003 error = emac_init(ifp);
1004 #endif
1005 }
1006 break;
1007 }
1008
1009 /* try to get more packets going */
1010 emac_start(ifp);
1011
1012 splx(s);
1013 return (error);
1014 }
1015
1016 static void
1017 emac_reset(struct emac_softc *sc)
1018 {
1019
1020 /* reset the MAL */
1021 mtdcr(DCR_MAL0_CFG, MAL0_CFG_SR);
1022
1023 EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1024 delay(5);
1025
1026 /* XXX: check if MR0_SRST is clear until a timeout instead? */
1027 EMAC_WRITE(sc, EMAC_MR0, EMAC_READ(sc, EMAC_MR0) & ~MR0_SRST);
1028
1029 /* XXX clear interrupts in EMAC_ISR just to be sure?? */
1030
1031 /* set the MAL config register */
1032 mtdcr(DCR_MAL0_CFG, MAL0_CFG_PLBB | MAL0_CFG_OPBBL | MAL0_CFG_LEA |
1033 MAL0_CFG_SD | MAL0_CFG_PLBLT);
1034 }
1035
1036 /*
1037 * EMAC General interrupt handler
1038 */
1039 static int
1040 emac_intr(void *arg)
1041 {
1042 struct emac_softc *sc = arg;
1043 uint32_t status;
1044
1045 EMAC_EVCNT_INCR(&sc->sc_ev_intr);
1046 status = EMAC_READ(sc, EMAC_ISR);
1047
1048 /* Clear the interrupt status bits. */
1049 EMAC_WRITE(sc, EMAC_ISR, status);
1050
1051 return (0);
1052 }
1053
1054 /*
1055 * EMAC Wake-On-LAN interrupt handler
1056 */
1057 static int
1058 emac_wol_intr(void *arg)
1059 {
1060 struct emac_softc *sc = arg;
1061
1062 EMAC_EVCNT_INCR(&sc->sc_ev_wol);
1063 printf("%s: emac_wol_intr\n", sc->sc_dev.dv_xname);
1064 return (0);
1065 }
1066
1067 /*
1068 * MAL System ERRor interrupt handler
1069 */
1070 static int
1071 emac_serr_intr(void *arg)
1072 {
1073 #ifdef EMAC_EVENT_COUNTERS
1074 struct emac_softc *sc = arg;
1075 #endif
1076 u_int32_t esr;
1077
1078 EMAC_EVCNT_INCR(&sc->sc_ev_serr);
1079 esr = mfdcr(DCR_MAL0_ESR);
1080
1081 /* Clear the interrupt status bits. */
1082 mtdcr(DCR_MAL0_ESR, esr);
1083 return (0);
1084 }
1085
1086 /*
1087 * MAL Transmit End-Of-Buffer interrupt handler.
1088 * NOTE: This shouldn't be called!
1089 */
1090 static int
1091 emac_txeob_intr(void *arg)
1092 {
1093 #ifdef EMAC_EVENT_COUNTERS
1094 struct emac_softc *sc = arg;
1095 #endif
1096
1097 EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1098 emac_txreap(arg);
1099
1100 return (0);
1101
1102 }
1103
1104 /*
1105 * Reap completed Tx descriptors.
1106 */
1107 static int
1108 emac_txreap(struct emac_softc *sc)
1109 {
1110 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1111 struct emac_txsoft *txs;
1112 int i;
1113 u_int32_t txstat;
1114
1115 EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1116
1117 /* Clear the interrupt */
1118 mtdcr(DCR_MAL0_TXEOBISR, mfdcr(DCR_MAL0_TXEOBISR));
1119
1120 ifp->if_flags &= ~IFF_OACTIVE;
1121
1122 /*
1123 * Go through our Tx list and free mbufs for those
1124 * frames that have been transmitted.
1125 */
1126 for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1127 i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1128 txs = &sc->sc_txsoft[i];
1129
1130 EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1131 txs->txs_dmamap->dm_nsegs,
1132 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1133
1134 txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1135 if (txstat & MAL_TX_READY)
1136 break;
1137
1138 /*
1139 * Check for errors and collisions.
1140 */
1141 if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1142 ifp->if_oerrors++;
1143
1144 #ifdef EMAC_EVENT_COUNTERS
1145 if (txstat & EMAC_TXS_UR)
1146 EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1147 #endif /* EMAC_EVENT_COUNTERS */
1148
1149 if (txstat & (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1150 if (txstat & EMAC_TXS_EC)
1151 ifp->if_collisions += 16;
1152 else if (txstat & EMAC_TXS_MC)
1153 ifp->if_collisions += 2; /* XXX? */
1154 else if (txstat & EMAC_TXS_SC)
1155 ifp->if_collisions++;
1156 if (txstat & EMAC_TXS_LC)
1157 ifp->if_collisions++;
1158 } else
1159 ifp->if_opackets++;
1160
1161 if (ifp->if_flags & IFF_DEBUG) {
1162 if (txstat & EMAC_TXS_ED)
1163 printf("%s: excessive deferral\n",
1164 sc->sc_dev.dv_xname);
1165 if (txstat & EMAC_TXS_EC)
1166 printf("%s: excessive collisions\n",
1167 sc->sc_dev.dv_xname);
1168 }
1169
1170 sc->sc_txfree += txs->txs_ndesc;
1171 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1172 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1173 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1174 m_freem(txs->txs_mbuf);
1175 txs->txs_mbuf = NULL;
1176 }
1177
1178 /* Update the dirty transmit buffer pointer. */
1179 sc->sc_txsdirty = i;
1180
1181 /*
1182 * If there are no more pending transmissions, cancel the watchdog
1183 * timer.
1184 */
1185 if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1186 ifp->if_timer = 0;
1187
1188 return (0);
1189 }
1190
1191 /*
1192 * MAL Receive End-Of-Buffer interrupt handler
1193 */
1194 static int
1195 emac_rxeob_intr(void *arg)
1196 {
1197 struct emac_softc *sc = arg;
1198 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1199 struct emac_rxsoft *rxs;
1200 struct mbuf *m;
1201 u_int32_t rxstat;
1202 int i, len;
1203
1204 EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1205
1206 /* Clear the interrupt */
1207 mtdcr(DCR_MAL0_RXEOBISR, mfdcr(DCR_MAL0_RXEOBISR));
1208
1209 for (i = sc->sc_rxptr;; i = EMAC_NEXTRX(i)) {
1210 rxs = &sc->sc_rxsoft[i];
1211
1212 EMAC_CDRXSYNC(sc, i,
1213 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1214
1215 rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1216
1217 if (rxstat & MAL_RX_EMPTY)
1218 /*
1219 * We have processed all of the receive buffers.
1220 */
1221 break;
1222
1223 /*
1224 * If an error occurred, update stats, clear the status
1225 * word, and leave the packet buffer in place. It will
1226 * simply be reused the next time the ring comes around.
1227 */
1228 if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1229 EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1230 EMAC_RXS_IRE)) {
1231 #define PRINTERR(bit, str) \
1232 if (rxstat & (bit)) \
1233 printf("%s: receive error: %s\n", \
1234 sc->sc_dev.dv_xname, str)
1235 ifp->if_ierrors++;
1236 PRINTERR(EMAC_RXS_OE, "overrun error");
1237 PRINTERR(EMAC_RXS_BP, "bad packet");
1238 PRINTERR(EMAC_RXS_RP, "runt packet");
1239 PRINTERR(EMAC_RXS_SE, "short event");
1240 PRINTERR(EMAC_RXS_AE, "alignment error");
1241 PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1242 PRINTERR(EMAC_RXS_PTL, "packet too long");
1243 PRINTERR(EMAC_RXS_ORE, "out of range error");
1244 PRINTERR(EMAC_RXS_IRE, "in range error");
1245 #undef PRINTERR
1246 EMAC_INIT_RXDESC(sc, i);
1247 continue;
1248 }
1249
1250 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1251 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1252
1253 /*
1254 * No errors; receive the packet. Note, the 405GP emac
1255 * includes the CRC with every packet.
1256 */
1257 len = sc->sc_rxdescs[i].md_data_len;
1258
1259 /*
1260 * If the packet is small enough to fit in a
1261 * single header mbuf, allocate one and copy
1262 * the data into it. This greatly reduces
1263 * memory consumption when we receive lots
1264 * of small packets.
1265 *
1266 * Otherwise, we add a new buffer to the receive
1267 * chain. If this fails, we drop the packet and
1268 * recycle the old buffer.
1269 */
1270 if (emac_copy_small != 0 && len <= MHLEN) {
1271 MGETHDR(m, M_DONTWAIT, MT_DATA);
1272 if (m == NULL)
1273 goto dropit;
1274 memcpy(mtod(m, caddr_t),
1275 mtod(rxs->rxs_mbuf, caddr_t), len);
1276 EMAC_INIT_RXDESC(sc, i);
1277 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1278 rxs->rxs_dmamap->dm_mapsize,
1279 BUS_DMASYNC_PREREAD);
1280 } else {
1281 m = rxs->rxs_mbuf;
1282 if (emac_add_rxbuf(sc, i) != 0) {
1283 dropit:
1284 ifp->if_ierrors++;
1285 EMAC_INIT_RXDESC(sc, i);
1286 bus_dmamap_sync(sc->sc_dmat,
1287 rxs->rxs_dmamap, 0,
1288 rxs->rxs_dmamap->dm_mapsize,
1289 BUS_DMASYNC_PREREAD);
1290 continue;
1291 }
1292 }
1293
1294 ifp->if_ipackets++;
1295 m->m_flags |= M_HASFCS;
1296 m->m_pkthdr.rcvif = ifp;
1297 m->m_pkthdr.len = m->m_len = len;
1298
1299 #if NBPFILTER > 0
1300 /*
1301 * Pass this up to any BPF listeners, but only
1302 * pass if up the stack if it's for us.
1303 */
1304 if (ifp->if_bpf)
1305 bpf_mtap(ifp->if_bpf, m);
1306 #endif /* NBPFILTER > 0 */
1307
1308 /* Pass it on. */
1309 (*ifp->if_input)(ifp, m);
1310 }
1311
1312 /* Update the receive pointer. */
1313 sc->sc_rxptr = i;
1314
1315 return (0);
1316 }
1317
1318 /*
1319 * MAL Transmit Descriptor Error interrupt handler
1320 */
1321 static int
1322 emac_txde_intr(void *arg)
1323 {
1324 struct emac_softc *sc = arg;
1325
1326 EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1327 printf("%s: emac_txde_intr\n", sc->sc_dev.dv_xname);
1328 return (0);
1329 }
1330
1331 /*
1332 * MAL Receive Descriptor Error interrupt handler
1333 */
1334 static int
1335 emac_rxde_intr(void *arg)
1336 {
1337 int i;
1338 struct emac_softc *sc = arg;
1339
1340 EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1341 printf("%s: emac_rxde_intr\n", sc->sc_dev.dv_xname);
1342 /*
1343 * XXX!
1344 * This is a bit drastic; we just drop all descriptors that aren't
1345 * "clean". We should probably send any that are up the stack.
1346 */
1347 for (i = 0; i < EMAC_NRXDESC; i++) {
1348 EMAC_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1349
1350 if (sc->sc_rxdescs[i].md_data_len != MCLBYTES) {
1351 EMAC_INIT_RXDESC(sc, i);
1352 }
1353
1354 }
1355
1356 /* Reenable the receive channel */
1357 mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
1358
1359 /* Clear the interrupt */
1360 mtdcr(DCR_MAL0_RXDEIR, mfdcr(DCR_MAL0_RXDEIR));
1361
1362 return (0);
1363 }
1364
1365 static uint32_t
1366 emac_mii_wait(struct emac_softc *sc)
1367 {
1368 int i;
1369 uint32_t reg;
1370
1371 /* wait for PHY data transfer to complete */
1372 i = 0;
1373 while ((reg = EMAC_READ(sc, EMAC_STACR) & STACR_OC) == 0) {
1374 delay(7);
1375 if (i++ > 5) {
1376 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1377 return (0);
1378 }
1379 }
1380 return (reg);
1381 }
1382
1383 static int
1384 emac_mii_readreg(struct device *self, int phy, int reg)
1385 {
1386 struct emac_softc *sc = (struct emac_softc *)self;
1387 uint32_t sta_reg;
1388
1389 /* wait for PHY data transfer to complete */
1390 if (emac_mii_wait(sc) == 0)
1391 return (0);
1392
1393 sta_reg = reg << STACR_PRASHIFT;
1394 sta_reg |= STACR_READ;
1395 sta_reg |= phy << STACR_PCDASHIFT;
1396
1397 sta_reg &= ~STACR_OPBC_MASK;
1398 sta_reg |= STACR_OPBC_50MHZ;
1399
1400
1401 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1402
1403 if ((sta_reg = emac_mii_wait(sc)) == 0)
1404 return (0);
1405 sta_reg = EMAC_READ(sc, EMAC_STACR);
1406 if ((sta_reg & STACR_PHYE) != 0)
1407 return (0);
1408 return (sta_reg >> STACR_PHYDSHIFT);
1409 }
1410
1411 static void
1412 emac_mii_writereg(struct device *self, int phy, int reg, int val)
1413 {
1414 struct emac_softc *sc = (struct emac_softc *)self;
1415 uint32_t sta_reg;
1416
1417 /* wait for PHY data transfer to complete */
1418 if (emac_mii_wait(sc) == 0)
1419 return;
1420
1421 sta_reg = reg << STACR_PRASHIFT;
1422 sta_reg |= STACR_WRITE;
1423 sta_reg |= phy << STACR_PCDASHIFT;
1424
1425 sta_reg &= ~STACR_OPBC_MASK;
1426 sta_reg |= STACR_OPBC_50MHZ;
1427
1428 sta_reg |= val << STACR_PHYDSHIFT;
1429
1430 EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1431
1432 if ((sta_reg = emac_mii_wait(sc)) == 0)
1433 return;
1434 if ((sta_reg & STACR_PHYE) != 0)
1435 /* error */
1436 return;
1437 }
1438
1439 static void
1440 emac_mii_statchg(struct device *self)
1441 {
1442 struct emac_softc *sc = (void *)self;
1443
1444 if (sc->sc_mii.mii_media_active & IFM_FDX)
1445 sc->sc_mr1 |= MR1_FDE;
1446 else
1447 sc->sc_mr1 &= ~(MR1_FDE | MR1_EIFC);
1448
1449 /* XXX 802.1x flow-control? */
1450
1451 /*
1452 * MR1 can only be written immediately after a reset...
1453 */
1454 emac_reset(sc);
1455 }
1456
1457 static void
1458 emac_mii_tick(void *arg)
1459 {
1460 struct emac_softc *sc = arg;
1461 int s;
1462
1463 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1464 return;
1465
1466 s = splnet();
1467 mii_tick(&sc->sc_mii);
1468 splx(s);
1469
1470 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1471 }
1472
1473 /* ifmedia interface function */
1474 static void
1475 emac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1476 {
1477 struct emac_softc *sc = ifp->if_softc;
1478
1479 mii_pollstat(&sc->sc_mii);
1480
1481 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1482 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1483 }
1484
1485 /* ifmedia interface function */
1486 static int
1487 emac_mediachange(struct ifnet *ifp)
1488 {
1489 struct emac_softc *sc = ifp->if_softc;
1490
1491 if (ifp->if_flags & IFF_UP)
1492 mii_mediachg(&sc->sc_mii);
1493 return (0);
1494 }
1495