if_ae.c revision 1.11.6.2 1 /* $Id: if_ae.c,v 1.11.6.2 2008/06/02 13:22:24 mjf Exp $ */
2 /*-
3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
4 * Copyright (c) 2006 Garrett D'Amore.
5 * All rights reserved.
6 *
7 * This code was written by Garrett D'Amore for the Champaign-Urbana
8 * Community Wireless Network Project.
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 * 3. All advertising materials mentioning features or use of this
20 * software must display the following acknowledgements:
21 * This product includes software developed by the Urbana-Champaign
22 * Independent Media Center.
23 * This product includes software developed by Garrett D'Amore.
24 * 4. Urbana-Champaign Independent Media Center's name and Garrett
25 * D'Amore's name may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 */
42 /*-
43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
44 * All rights reserved.
45 *
46 * This code is derived from software contributed to The NetBSD Foundation
47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
48 * NASA Ames Research Center; and by Charles M. Hannum.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69 * POSSIBILITY OF SUCH DAMAGE.
70 */
71
72 /*
73 * Device driver for the onboard ethernet MAC found on the AR5312
74 * chip's AHB bus.
75 *
76 * This device is very simliar to the tulip in most regards, and
77 * the code is directly derived from NetBSD's tulip.c. However, it
78 * is different enough that it did not seem to be a good idea to
79 * add further complexity to the tulip driver, so we have our own.
80 *
81 * Also tulip has a lot of complexity in it for various parts/options
82 * that we don't need, and on these little boxes with only ~8MB RAM, we
83 * don't want any extra bloat.
84 */
85
86 /*
87 * TODO:
88 *
89 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align
90 * inbound packets on a half-word boundary, which would make life easier
91 * for TCP/IP. (Aligning IP headers on a word.)
92 *
93 * 2) There is stuff in original tulip to shut down the device when reacting
94 * to a a change in link status. Is that needed.
95 *
96 * 3) Test with variety of 10/100 HDX/FDX scenarios.
97 *
98 */
99
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.11.6.2 2008/06/02 13:22:24 mjf Exp $");
102
103 #include "bpfilter.h"
104
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/callout.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/socket.h>
112 #include <sys/ioctl.h>
113 #include <sys/errno.h>
114 #include <sys/device.h>
115
116 #include <machine/endian.h>
117
118 #include <uvm/uvm_extern.h>
119
120 #include <net/if.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/if_ether.h>
124
125 #if NBPFILTER > 0
126 #include <net/bpf.h>
127 #endif
128
129 #include <machine/bus.h>
130 #include <machine/intr.h>
131
132 #include <dev/mii/mii.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/mii_bitbang.h>
135
136 #include <mips/atheros/include/arbusvar.h>
137 #include <mips/atheros/dev/aereg.h>
138 #include <mips/atheros/dev/aevar.h>
139
140 static const struct {
141 u_int32_t txth_opmode; /* OPMODE bits */
142 const char *txth_name; /* name of mode */
143 } ae_txthresh[] = {
144 { OPMODE_TR_32, "32 words" },
145 { OPMODE_TR_64, "64 words" },
146 { OPMODE_TR_128, "128 words" },
147 { OPMODE_TR_256, "256 words" },
148 { OPMODE_SF, "store and forward mode" },
149 { 0, NULL },
150 };
151
152 static int ae_match(device_t, struct cfdata *, void *);
153 static void ae_attach(device_t, device_t, void *);
154 static int ae_detach(device_t, int);
155 static int ae_activate(device_t, enum devact);
156
157 static void ae_reset(struct ae_softc *);
158 static void ae_idle(struct ae_softc *, u_int32_t);
159
160 static void ae_start(struct ifnet *);
161 static void ae_watchdog(struct ifnet *);
162 static int ae_ioctl(struct ifnet *, u_long, void *);
163 static int ae_init(struct ifnet *);
164 static void ae_stop(struct ifnet *, int);
165
166 static void ae_shutdown(void *);
167
168 static void ae_rxdrain(struct ae_softc *);
169 static int ae_add_rxbuf(struct ae_softc *, int);
170
171 static int ae_enable(struct ae_softc *);
172 static void ae_disable(struct ae_softc *);
173 static void ae_power(int, void *);
174
175 static void ae_filter_setup(struct ae_softc *);
176
177 static int ae_intr(void *);
178 static void ae_rxintr(struct ae_softc *);
179 static void ae_txintr(struct ae_softc *);
180
181 static void ae_mii_tick(void *);
182 static void ae_mii_statchg(device_t);
183
184 static int ae_mii_readreg(device_t, int, int);
185 static void ae_mii_writereg(device_t, int, int, int);
186
187 #ifdef AE_DEBUG
188 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
189 printf x
190 #else
191 #define DPRINTF(sc, x) /* nothing */
192 #endif
193
194 #ifdef AE_STATS
195 static void ae_print_stats(struct ae_softc *);
196 #endif
197
198 CFATTACH_DECL(ae, sizeof(struct ae_softc),
199 ae_match, ae_attach, ae_detach, ae_activate);
200
201 /*
202 * ae_match:
203 *
204 * Check for a device match.
205 */
206 int
207 ae_match(device_t parent, struct cfdata *cf, void *aux)
208 {
209 struct arbus_attach_args *aa = aux;
210
211 if (strcmp(aa->aa_name, cf->cf_name) == 0)
212 return 1;
213
214 return 0;
215
216 }
217
218 /*
219 * ae_attach:
220 *
221 * Attach an ae interface to the system.
222 */
223 void
224 ae_attach(device_t parent, device_t self, void *aux)
225 {
226 const uint8_t *enaddr;
227 prop_data_t ea;
228 struct ae_softc *sc = device_private(self);
229 struct arbus_attach_args *aa = aux;
230 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
231 int i, error;
232
233 callout_init(&sc->sc_tick_callout, 0);
234
235 printf(": Atheros AR531X 10/100 Ethernet\n");
236
237 /*
238 * Try to get MAC address.
239 */
240 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
241 if (ea == NULL) {
242 printf("%s: unable to get mac-addr property\n",
243 sc->sc_dev.dv_xname);
244 return;
245 }
246 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
247 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
248 enaddr = prop_data_data_nocopy(ea);
249
250 /* Announce ourselves. */
251 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
252 ether_sprintf(enaddr));
253
254 sc->sc_cirq = aa->aa_cirq;
255 sc->sc_mirq = aa->aa_mirq;
256 sc->sc_st = aa->aa_bst;
257 sc->sc_dmat = aa->aa_dmat;
258
259 SIMPLEQ_INIT(&sc->sc_txfreeq);
260 SIMPLEQ_INIT(&sc->sc_txdirtyq);
261
262 /*
263 * Map registers.
264 */
265 sc->sc_size = aa->aa_size;
266 if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
267 &sc->sc_sh)) != 0) {
268 printf("%s: unable to map registers, error = %d\n",
269 sc->sc_dev.dv_xname, error);
270 goto fail_0;
271 }
272
273 /*
274 * Allocate the control data structures, and create and load the
275 * DMA map for it.
276 */
277 if ((error = bus_dmamem_alloc(sc->sc_dmat,
278 sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
279 1, &sc->sc_cdnseg, 0)) != 0) {
280 printf("%s: unable to allocate control data, error = %d\n",
281 sc->sc_dev.dv_xname, error);
282 goto fail_1;
283 }
284
285 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
286 sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
287 BUS_DMA_COHERENT)) != 0) {
288 printf("%s: unable to map control data, error = %d\n",
289 sc->sc_dev.dv_xname, error);
290 goto fail_2;
291 }
292
293 if ((error = bus_dmamap_create(sc->sc_dmat,
294 sizeof(struct ae_control_data), 1,
295 sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
296 printf("%s: unable to create control data DMA map, "
297 "error = %d\n", sc->sc_dev.dv_xname, error);
298 goto fail_3;
299 }
300
301 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
302 sc->sc_control_data, sizeof(struct ae_control_data), NULL,
303 0)) != 0) {
304 printf("%s: unable to load control data DMA map, error = %d\n",
305 sc->sc_dev.dv_xname, error);
306 goto fail_4;
307 }
308
309 /*
310 * Create the transmit buffer DMA maps.
311 */
312 for (i = 0; i < AE_TXQUEUELEN; i++) {
313 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
314 AE_NTXSEGS, MCLBYTES, 0, 0,
315 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
316 printf("%s: unable to create tx DMA map %d, "
317 "error = %d\n", sc->sc_dev.dv_xname, i, error);
318 goto fail_5;
319 }
320 }
321
322 /*
323 * Create the receive buffer DMA maps.
324 */
325 for (i = 0; i < AE_NRXDESC; i++) {
326 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
327 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
328 printf("%s: unable to create rx DMA map %d, "
329 "error = %d\n", sc->sc_dev.dv_xname, i, error);
330 goto fail_6;
331 }
332 sc->sc_rxsoft[i].rxs_mbuf = NULL;
333 }
334
335 /*
336 * Reset the chip to a known state.
337 */
338 ae_reset(sc);
339
340 /*
341 * From this point forward, the attachment cannot fail. A failure
342 * before this point releases all resources that may have been
343 * allocated.
344 */
345 sc->sc_flags |= AE_ATTACHED;
346
347 /*
348 * Initialize our media structures. This may probe the MII, if
349 * present.
350 */
351 sc->sc_mii.mii_ifp = ifp;
352 sc->sc_mii.mii_readreg = ae_mii_readreg;
353 sc->sc_mii.mii_writereg = ae_mii_writereg;
354 sc->sc_mii.mii_statchg = ae_mii_statchg;
355 sc->sc_ethercom.ec_mii = &sc->sc_mii;
356 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
357 ether_mediastatus);
358 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
359 MII_OFFSET_ANY, 0);
360
361 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
362 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
363 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
364 } else
365 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
366
367 sc->sc_tick = ae_mii_tick;
368
369 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
370 ifp->if_softc = sc;
371 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
372 sc->sc_if_flags = ifp->if_flags;
373 ifp->if_ioctl = ae_ioctl;
374 ifp->if_start = ae_start;
375 ifp->if_watchdog = ae_watchdog;
376 ifp->if_init = ae_init;
377 ifp->if_stop = ae_stop;
378 IFQ_SET_READY(&ifp->if_snd);
379
380 /*
381 * We can support 802.1Q VLAN-sized frames.
382 */
383 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
384
385 /*
386 * Attach the interface.
387 */
388 if_attach(ifp);
389 ether_ifattach(ifp, enaddr);
390
391 #if NRND > 0
392 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dev.dv_xname,
393 RND_TYPE_NET, 0);
394 #endif
395
396 /*
397 * Make sure the interface is shutdown during reboot.
398 */
399 sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
400 if (sc->sc_sdhook == NULL)
401 printf("%s: WARNING: unable to establish shutdown hook\n",
402 sc->sc_dev.dv_xname);
403
404 /*
405 * Add a suspend hook to make sure we come back up after a
406 * resume.
407 */
408 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
409 ae_power, sc);
410 if (sc->sc_powerhook == NULL)
411 printf("%s: WARNING: unable to establish power hook\n",
412 sc->sc_dev.dv_xname);
413 return;
414
415 /*
416 * Free any resources we've allocated during the failed attach
417 * attempt. Do this in reverse order and fall through.
418 */
419 fail_6:
420 for (i = 0; i < AE_NRXDESC; i++) {
421 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
422 bus_dmamap_destroy(sc->sc_dmat,
423 sc->sc_rxsoft[i].rxs_dmamap);
424 }
425 fail_5:
426 for (i = 0; i < AE_TXQUEUELEN; i++) {
427 if (sc->sc_txsoft[i].txs_dmamap != NULL)
428 bus_dmamap_destroy(sc->sc_dmat,
429 sc->sc_txsoft[i].txs_dmamap);
430 }
431 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
432 fail_4:
433 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
434 fail_3:
435 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
436 sizeof(struct ae_control_data));
437 fail_2:
438 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
439 fail_1:
440 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
441 fail_0:
442 return;
443 }
444
445 /*
446 * ae_activate:
447 *
448 * Handle device activation/deactivation requests.
449 */
450 int
451 ae_activate(device_t self, enum devact act)
452 {
453 struct ae_softc *sc = device_private(self);
454 int s, error = 0;
455
456 s = splnet();
457 switch (act) {
458 case DVACT_ACTIVATE:
459 error = EOPNOTSUPP;
460 break;
461
462 case DVACT_DEACTIVATE:
463 mii_activate(&sc->sc_mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
464 if_deactivate(&sc->sc_ethercom.ec_if);
465 break;
466 }
467 splx(s);
468
469 return (error);
470 }
471
472 /*
473 * ae_detach:
474 *
475 * Detach a device interface.
476 */
477 int
478 ae_detach(device_t self, int flags)
479 {
480 struct ae_softc *sc = device_private(self);
481 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
482 struct ae_rxsoft *rxs;
483 struct ae_txsoft *txs;
484 int i;
485
486 /*
487 * Succeed now if there isn't any work to do.
488 */
489 if ((sc->sc_flags & AE_ATTACHED) == 0)
490 return (0);
491
492 /* Unhook our tick handler. */
493 if (sc->sc_tick)
494 callout_stop(&sc->sc_tick_callout);
495
496 /* Detach all PHYs */
497 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
498
499 /* Delete all remaining media. */
500 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
501
502 #if NRND > 0
503 rnd_detach_source(&sc->sc_rnd_source);
504 #endif
505 ether_ifdetach(ifp);
506 if_detach(ifp);
507
508 for (i = 0; i < AE_NRXDESC; i++) {
509 rxs = &sc->sc_rxsoft[i];
510 if (rxs->rxs_mbuf != NULL) {
511 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
512 m_freem(rxs->rxs_mbuf);
513 rxs->rxs_mbuf = NULL;
514 }
515 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
516 }
517 for (i = 0; i < AE_TXQUEUELEN; i++) {
518 txs = &sc->sc_txsoft[i];
519 if (txs->txs_mbuf != NULL) {
520 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
521 m_freem(txs->txs_mbuf);
522 txs->txs_mbuf = NULL;
523 }
524 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
525 }
526 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
527 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
528 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
529 sizeof(struct ae_control_data));
530 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
531
532 shutdownhook_disestablish(sc->sc_sdhook);
533 powerhook_disestablish(sc->sc_powerhook);
534
535 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
536
537
538 return (0);
539 }
540
541 /*
542 * ae_shutdown:
543 *
544 * Make sure the interface is stopped at reboot time.
545 */
546 static void
547 ae_shutdown(void *arg)
548 {
549 struct ae_softc *sc = arg;
550
551 ae_stop(&sc->sc_ethercom.ec_if, 1);
552 }
553
554 /*
555 * ae_start: [ifnet interface function]
556 *
557 * Start packet transmission on the interface.
558 */
559 static void
560 ae_start(struct ifnet *ifp)
561 {
562 struct ae_softc *sc = ifp->if_softc;
563 struct mbuf *m0, *m;
564 struct ae_txsoft *txs, *last_txs = NULL;
565 bus_dmamap_t dmamap;
566 int error, firsttx, nexttx, lasttx = 1, ofree, seg;
567
568 DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
569 sc->sc_dev.dv_xname, sc->sc_flags, ifp->if_flags));
570
571
572 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
573 return;
574
575 /*
576 * Remember the previous number of free descriptors and
577 * the first descriptor we'll use.
578 */
579 ofree = sc->sc_txfree;
580 firsttx = sc->sc_txnext;
581
582 DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
583 sc->sc_dev.dv_xname, ofree, firsttx));
584
585 /*
586 * Loop through the send queue, setting up transmit descriptors
587 * until we drain the queue, or use up all available transmit
588 * descriptors.
589 */
590 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
591 sc->sc_txfree != 0) {
592 /*
593 * Grab a packet off the queue.
594 */
595 IFQ_POLL(&ifp->if_snd, m0);
596 if (m0 == NULL)
597 break;
598 m = NULL;
599
600 dmamap = txs->txs_dmamap;
601
602 /*
603 * Load the DMA map. If this fails, the packet either
604 * didn't fit in the alloted number of segments, or we were
605 * short on resources. In this case, we'll copy and try
606 * again.
607 */
608 if (((mtod(m0, uintptr_t) & 3) != 0) ||
609 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
610 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
611 MGETHDR(m, M_DONTWAIT, MT_DATA);
612 if (m == NULL) {
613 printf("%s: unable to allocate Tx mbuf\n",
614 sc->sc_dev.dv_xname);
615 break;
616 }
617 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
618 if (m0->m_pkthdr.len > MHLEN) {
619 MCLGET(m, M_DONTWAIT);
620 if ((m->m_flags & M_EXT) == 0) {
621 printf("%s: unable to allocate Tx "
622 "cluster\n", sc->sc_dev.dv_xname);
623 m_freem(m);
624 break;
625 }
626 }
627 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
628 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
629 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
630 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
631 if (error) {
632 printf("%s: unable to load Tx buffer, "
633 "error = %d\n", sc->sc_dev.dv_xname,
634 error);
635 break;
636 }
637 }
638
639 /*
640 * Ensure we have enough descriptors free to describe
641 * the packet.
642 */
643 if (dmamap->dm_nsegs > sc->sc_txfree) {
644 /*
645 * Not enough free descriptors to transmit this
646 * packet. We haven't committed to anything yet,
647 * so just unload the DMA map, put the packet
648 * back on the queue, and punt. Notify the upper
649 * layer that there are no more slots left.
650 *
651 * XXX We could allocate an mbuf and copy, but
652 * XXX it is worth it?
653 */
654 ifp->if_flags |= IFF_OACTIVE;
655 bus_dmamap_unload(sc->sc_dmat, dmamap);
656 if (m != NULL)
657 m_freem(m);
658 break;
659 }
660
661 IFQ_DEQUEUE(&ifp->if_snd, m0);
662 if (m != NULL) {
663 m_freem(m0);
664 m0 = m;
665 }
666
667 /*
668 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
669 */
670
671 /* Sync the DMA map. */
672 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
673 BUS_DMASYNC_PREWRITE);
674
675 /*
676 * Initialize the transmit descriptors.
677 */
678 for (nexttx = sc->sc_txnext, seg = 0;
679 seg < dmamap->dm_nsegs;
680 seg++, nexttx = AE_NEXTTX(nexttx)) {
681 /*
682 * If this is the first descriptor we're
683 * enqueueing, don't set the OWN bit just
684 * yet. That could cause a race condition.
685 * We'll do it below.
686 */
687 sc->sc_txdescs[nexttx].ad_status =
688 (nexttx == firsttx) ? 0 : ADSTAT_OWN;
689 sc->sc_txdescs[nexttx].ad_bufaddr1 =
690 dmamap->dm_segs[seg].ds_addr;
691 sc->sc_txdescs[nexttx].ad_ctl =
692 (dmamap->dm_segs[seg].ds_len <<
693 ADCTL_SIZE1_SHIFT) |
694 (nexttx == (AE_NTXDESC - 1) ?
695 ADCTL_ER : 0);
696 lasttx = nexttx;
697 }
698
699 KASSERT(lasttx != -1);
700
701 /* Set `first segment' and `last segment' appropriately. */
702 sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
703 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;
704
705 #ifdef AE_DEBUG
706 if (ifp->if_flags & IFF_DEBUG) {
707 printf(" txsoft %p transmit chain:\n", txs);
708 for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
709 printf(" descriptor %d:\n", seg);
710 printf(" ad_status: 0x%08x\n",
711 sc->sc_txdescs[seg].ad_status);
712 printf(" ad_ctl: 0x%08x\n",
713 sc->sc_txdescs[seg].ad_ctl);
714 printf(" ad_bufaddr1: 0x%08x\n",
715 sc->sc_txdescs[seg].ad_bufaddr1);
716 printf(" ad_bufaddr2: 0x%08x\n",
717 sc->sc_txdescs[seg].ad_bufaddr2);
718 if (seg == lasttx)
719 break;
720 }
721 }
722 #endif
723
724 /* Sync the descriptors we're using. */
725 AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
726 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
727
728 /*
729 * Store a pointer to the packet so we can free it later,
730 * and remember what txdirty will be once the packet is
731 * done.
732 */
733 txs->txs_mbuf = m0;
734 txs->txs_firstdesc = sc->sc_txnext;
735 txs->txs_lastdesc = lasttx;
736 txs->txs_ndescs = dmamap->dm_nsegs;
737
738 /* Advance the tx pointer. */
739 sc->sc_txfree -= dmamap->dm_nsegs;
740 sc->sc_txnext = nexttx;
741
742 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
743 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
744
745 last_txs = txs;
746
747 #if NBPFILTER > 0
748 /*
749 * Pass the packet to any BPF listeners.
750 */
751 if (ifp->if_bpf)
752 bpf_mtap(ifp->if_bpf, m0);
753 #endif /* NBPFILTER > 0 */
754 }
755
756 if (txs == NULL || sc->sc_txfree == 0) {
757 /* No more slots left; notify upper layer. */
758 ifp->if_flags |= IFF_OACTIVE;
759 }
760
761 if (sc->sc_txfree != ofree) {
762 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
763 sc->sc_dev.dv_xname, lasttx, firsttx));
764 /*
765 * Cause a transmit interrupt to happen on the
766 * last packet we enqueued.
767 */
768 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC;
769 AE_CDTXSYNC(sc, lasttx, 1,
770 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
771
772 /*
773 * The entire packet chain is set up. Give the
774 * first descriptor to the chip now.
775 */
776 sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN;
777 AE_CDTXSYNC(sc, firsttx, 1,
778 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
779
780 /* Wake up the transmitter. */
781 /* XXX USE AUTOPOLLING? */
782 AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD);
783 AE_BARRIER(sc);
784
785 /* Set a watchdog timer in case the chip flakes out. */
786 ifp->if_timer = 5;
787 }
788 }
789
790 /*
791 * ae_watchdog: [ifnet interface function]
792 *
793 * Watchdog timer handler.
794 */
795 static void
796 ae_watchdog(struct ifnet *ifp)
797 {
798 struct ae_softc *sc = ifp->if_softc;
799 int doing_transmit;
800
801 doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq));
802
803 if (doing_transmit) {
804 printf("%s: transmit timeout\n", sc->sc_dev.dv_xname);
805 ifp->if_oerrors++;
806 }
807 else
808 printf("%s: spurious watchdog timeout\n", sc->sc_dev.dv_xname);
809
810 (void) ae_init(ifp);
811
812 /* Try to get more packets going. */
813 ae_start(ifp);
814 }
815
816 /*
817 * ae_ioctl: [ifnet interface function]
818 *
819 * Handle control requests from the operator.
820 */
821 static int
822 ae_ioctl(struct ifnet *ifp, u_long cmd, void *data)
823 {
824 struct ae_softc *sc = ifp->if_softc;
825 int s, error;
826
827 s = splnet();
828
829 switch (cmd) {
830 case SIOCSIFFLAGS:
831 /* If the interface is up and running, only modify the receive
832 * filter when setting promiscuous or debug mode. Otherwise
833 * fall through to ether_ioctl, which will reset the chip.
834 */
835 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
836 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
837 == (IFF_UP|IFF_RUNNING))
838 && ((ifp->if_flags & (~RESETIGN))
839 == (sc->sc_if_flags & (~RESETIGN)))) {
840 /* Set up the receive filter. */
841 ae_filter_setup(sc);
842 error = 0;
843 break;
844 #undef RESETIGN
845 }
846 /* FALLTHROUGH */
847 default:
848 error = ether_ioctl(ifp, cmd, data);
849 if (error == ENETRESET) {
850 if (ifp->if_flags & IFF_RUNNING) {
851 /*
852 * Multicast list has changed. Set the
853 * hardware filter accordingly.
854 */
855 ae_filter_setup(sc);
856 }
857 error = 0;
858 }
859 break;
860 }
861
862 /* Try to get more packets going. */
863 if (AE_IS_ENABLED(sc))
864 ae_start(ifp);
865
866 sc->sc_if_flags = ifp->if_flags;
867 splx(s);
868 return (error);
869 }
870
871 /*
872 * ae_intr:
873 *
874 * Interrupt service routine.
875 */
876 int
877 ae_intr(void *arg)
878 {
879 struct ae_softc *sc = arg;
880 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
881 u_int32_t status, rxstatus, txstatus;
882 int handled = 0, txthresh;
883
884 DPRINTF(sc, ("%s: ae_intr\n", sc->sc_dev.dv_xname));
885
886 #ifdef DEBUG
887 if (AE_IS_ENABLED(sc) == 0)
888 panic("%s: ae_intr: not enabled", sc->sc_dev.dv_xname);
889 #endif
890
891 /*
892 * If the interface isn't running, the interrupt couldn't
893 * possibly have come from us.
894 */
895 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
896 !device_is_active(&sc->sc_dev)) {
897 printf("spurious?!?\n");
898 return (0);
899 }
900
901 for (;;) {
902 status = AE_READ(sc, CSR_STATUS);
903 if (status) {
904 AE_WRITE(sc, CSR_STATUS, status);
905 AE_BARRIER(sc);
906 }
907
908 if ((status & sc->sc_inten) == 0)
909 break;
910
911 handled = 1;
912
913 rxstatus = status & sc->sc_rxint_mask;
914 txstatus = status & sc->sc_txint_mask;
915
916 if (rxstatus) {
917 /* Grab new any new packets. */
918 ae_rxintr(sc);
919
920 if (rxstatus & STATUS_RU) {
921 printf("%s: receive ring overrun\n",
922 sc->sc_dev.dv_xname);
923 /* Get the receive process going again. */
924 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
925 AE_BARRIER(sc);
926 break;
927 }
928 }
929
930 if (txstatus) {
931 /* Sweep up transmit descriptors. */
932 ae_txintr(sc);
933
934 if (txstatus & STATUS_TJT)
935 printf("%s: transmit jabber timeout\n",
936 sc->sc_dev.dv_xname);
937
938 if (txstatus & STATUS_UNF) {
939 /*
940 * Increase our transmit threshold if
941 * another is available.
942 */
943 txthresh = sc->sc_txthresh + 1;
944 if (ae_txthresh[txthresh].txth_name != NULL) {
945 uint32_t opmode;
946 /* Idle the transmit process. */
947 opmode = AE_READ(sc, CSR_OPMODE);
948 ae_idle(sc, OPMODE_ST);
949
950 sc->sc_txthresh = txthresh;
951 opmode &=
952 ~(OPMODE_TR|OPMODE_SF);
953 opmode |=
954 ae_txthresh[txthresh].txth_opmode;
955 printf("%s: transmit underrun; new "
956 "threshold: %s\n",
957 sc->sc_dev.dv_xname,
958 ae_txthresh[txthresh].txth_name);
959
960 /*
961 * Set the new threshold and restart
962 * the transmit process.
963 */
964 AE_WRITE(sc, CSR_OPMODE, opmode);
965 AE_BARRIER(sc);
966 }
967 /*
968 * XXX Log every Nth underrun from
969 * XXX now on?
970 */
971 }
972 }
973
974 if (status & (STATUS_TPS|STATUS_RPS)) {
975 if (status & STATUS_TPS)
976 printf("%s: transmit process stopped\n",
977 sc->sc_dev.dv_xname);
978 if (status & STATUS_RPS)
979 printf("%s: receive process stopped\n",
980 sc->sc_dev.dv_xname);
981 (void) ae_init(ifp);
982 break;
983 }
984
985 if (status & STATUS_SE) {
986 const char *str;
987
988 if (status & STATUS_TX_ABORT)
989 str = "tx abort";
990 else if (status & STATUS_RX_ABORT)
991 str = "rx abort";
992 else
993 str = "unknown error";
994
995 printf("%s: fatal system error: %s\n",
996 sc->sc_dev.dv_xname, str);
997 (void) ae_init(ifp);
998 break;
999 }
1000
1001 /*
1002 * Not handled:
1003 *
1004 * Transmit buffer unavailable -- normal
1005 * condition, nothing to do, really.
1006 *
1007 * General purpose timer experied -- we don't
1008 * use the general purpose timer.
1009 *
1010 * Early receive interrupt -- not available on
1011 * all chips, we just use RI. We also only
1012 * use single-segment receive DMA, so this
1013 * is mostly useless.
1014 */
1015 }
1016
1017 /* Try to get more packets going. */
1018 ae_start(ifp);
1019
1020 #if NRND > 0
1021 if (handled)
1022 rnd_add_uint32(&sc->sc_rnd_source, status);
1023 #endif
1024 return (handled);
1025 }
1026
1027 /*
1028 * ae_rxintr:
1029 *
1030 * Helper; handle receive interrupts.
1031 */
1032 static void
1033 ae_rxintr(struct ae_softc *sc)
1034 {
1035 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1036 struct ether_header *eh;
1037 struct ae_rxsoft *rxs;
1038 struct mbuf *m;
1039 u_int32_t rxstat;
1040 int i, len;
1041
1042 for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) {
1043 rxs = &sc->sc_rxsoft[i];
1044
1045 AE_CDRXSYNC(sc, i,
1046 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1047
1048 rxstat = sc->sc_rxdescs[i].ad_status;
1049
1050 if (rxstat & ADSTAT_OWN) {
1051 /*
1052 * We have processed all of the receive buffers.
1053 */
1054 break;
1055 }
1056
1057 /*
1058 * If any collisions were seen on the wire, count one.
1059 */
1060 if (rxstat & ADSTAT_Rx_CS)
1061 ifp->if_collisions++;
1062
1063 /*
1064 * If an error occurred, update stats, clear the status
1065 * word, and leave the packet buffer in place. It will
1066 * simply be reused the next time the ring comes around.
1067 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long
1068 * error.
1069 */
1070 if (rxstat & ADSTAT_ES &&
1071 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 ||
1072 (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF |
1073 ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) {
1074 #define PRINTERR(bit, str) \
1075 if (rxstat & (bit)) \
1076 printf("%s: receive error: %s\n", \
1077 sc->sc_dev.dv_xname, str)
1078 ifp->if_ierrors++;
1079 PRINTERR(ADSTAT_Rx_DE, "descriptor error");
1080 PRINTERR(ADSTAT_Rx_RF, "runt frame");
1081 PRINTERR(ADSTAT_Rx_TL, "frame too long");
1082 PRINTERR(ADSTAT_Rx_RE, "MII error");
1083 PRINTERR(ADSTAT_Rx_DB, "dribbling bit");
1084 PRINTERR(ADSTAT_Rx_CE, "CRC error");
1085 #undef PRINTERR
1086 AE_INIT_RXDESC(sc, i);
1087 continue;
1088 }
1089
1090 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1091 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1092
1093 /*
1094 * No errors; receive the packet. Note the chip
1095 * includes the CRC with every packet.
1096 */
1097 len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN;
1098
1099 /*
1100 * XXX: the Atheros part can align on half words. what
1101 * is the performance implication of this? Probably
1102 * minimal, and we should use it...
1103 */
1104 #ifdef __NO_STRICT_ALIGNMENT
1105 /*
1106 * Allocate a new mbuf cluster. If that fails, we are
1107 * out of memory, and must drop the packet and recycle
1108 * the buffer that's already attached to this descriptor.
1109 */
1110 m = rxs->rxs_mbuf;
1111 if (ae_add_rxbuf(sc, i) != 0) {
1112 ifp->if_ierrors++;
1113 AE_INIT_RXDESC(sc, i);
1114 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1115 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1116 continue;
1117 }
1118 #else
1119 /*
1120 * The chip's receive buffers must be 4-byte aligned.
1121 * But this means that the data after the Ethernet header
1122 * is misaligned. We must allocate a new buffer and
1123 * copy the data, shifted forward 2 bytes.
1124 */
1125 MGETHDR(m, M_DONTWAIT, MT_DATA);
1126 if (m == NULL) {
1127 dropit:
1128 ifp->if_ierrors++;
1129 AE_INIT_RXDESC(sc, i);
1130 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1131 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1132 continue;
1133 }
1134 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1135 if (len > (MHLEN - 2)) {
1136 MCLGET(m, M_DONTWAIT);
1137 if ((m->m_flags & M_EXT) == 0) {
1138 m_freem(m);
1139 goto dropit;
1140 }
1141 }
1142 m->m_data += 2;
1143
1144 /*
1145 * Note that we use clusters for incoming frames, so the
1146 * buffer is virtually contiguous.
1147 */
1148 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len);
1149
1150 /* Allow the receive descriptor to continue using its mbuf. */
1151 AE_INIT_RXDESC(sc, i);
1152 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1153 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1154 #endif /* __NO_STRICT_ALIGNMENT */
1155
1156 ifp->if_ipackets++;
1157 eh = mtod(m, struct ether_header *);
1158 m->m_pkthdr.rcvif = ifp;
1159 m->m_pkthdr.len = m->m_len = len;
1160
1161 #if NBPFILTER > 0
1162 /*
1163 * Pass this up to any BPF listeners, but only
1164 * pass it up the stack if its for us.
1165 */
1166 if (ifp->if_bpf)
1167 bpf_mtap(ifp->if_bpf, m);
1168 #endif /* NBPFILTER > 0 */
1169
1170 /* Pass it on. */
1171 (*ifp->if_input)(ifp, m);
1172 }
1173
1174 /* Update the receive pointer. */
1175 sc->sc_rxptr = i;
1176 }
1177
1178 /*
1179 * ae_txintr:
1180 *
1181 * Helper; handle transmit interrupts.
1182 */
1183 static void
1184 ae_txintr(struct ae_softc *sc)
1185 {
1186 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1187 struct ae_txsoft *txs;
1188 u_int32_t txstat;
1189
1190 DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n",
1191 sc->sc_dev.dv_xname, sc->sc_flags));
1192
1193 ifp->if_flags &= ~IFF_OACTIVE;
1194
1195 /*
1196 * Go through our Tx list and free mbufs for those
1197 * frames that have been transmitted.
1198 */
1199 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1200 AE_CDTXSYNC(sc, txs->txs_lastdesc,
1201 txs->txs_ndescs,
1202 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1203
1204 #ifdef AE_DEBUG
1205 if (ifp->if_flags & IFF_DEBUG) {
1206 int i;
1207 printf(" txsoft %p transmit chain:\n", txs);
1208 for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) {
1209 printf(" descriptor %d:\n", i);
1210 printf(" ad_status: 0x%08x\n",
1211 sc->sc_txdescs[i].ad_status);
1212 printf(" ad_ctl: 0x%08x\n",
1213 sc->sc_txdescs[i].ad_ctl);
1214 printf(" ad_bufaddr1: 0x%08x\n",
1215 sc->sc_txdescs[i].ad_bufaddr1);
1216 printf(" ad_bufaddr2: 0x%08x\n",
1217 sc->sc_txdescs[i].ad_bufaddr2);
1218 if (i == txs->txs_lastdesc)
1219 break;
1220 }
1221 }
1222 #endif
1223
1224 txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status;
1225 if (txstat & ADSTAT_OWN)
1226 break;
1227
1228 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1229
1230 sc->sc_txfree += txs->txs_ndescs;
1231
1232 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1233 0, txs->txs_dmamap->dm_mapsize,
1234 BUS_DMASYNC_POSTWRITE);
1235 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1236 m_freem(txs->txs_mbuf);
1237 txs->txs_mbuf = NULL;
1238
1239 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1240
1241 /*
1242 * Check for errors and collisions.
1243 */
1244 #ifdef AE_STATS
1245 if (txstat & ADSTAT_Tx_UF)
1246 sc->sc_stats.ts_tx_uf++;
1247 if (txstat & ADSTAT_Tx_TO)
1248 sc->sc_stats.ts_tx_to++;
1249 if (txstat & ADSTAT_Tx_EC)
1250 sc->sc_stats.ts_tx_ec++;
1251 if (txstat & ADSTAT_Tx_LC)
1252 sc->sc_stats.ts_tx_lc++;
1253 #endif
1254
1255 if (txstat & (ADSTAT_Tx_UF|ADSTAT_Tx_TO))
1256 ifp->if_oerrors++;
1257
1258 if (txstat & ADSTAT_Tx_EC)
1259 ifp->if_collisions += 16;
1260 else
1261 ifp->if_collisions += ADSTAT_Tx_COLLISIONS(txstat);
1262 if (txstat & ADSTAT_Tx_LC)
1263 ifp->if_collisions++;
1264
1265 ifp->if_opackets++;
1266 }
1267
1268 /*
1269 * If there are no more pending transmissions, cancel the watchdog
1270 * timer.
1271 */
1272 if (txs == NULL)
1273 ifp->if_timer = 0;
1274 }
1275
1276 #ifdef AE_STATS
1277 void
1278 ae_print_stats(struct ae_softc *sc)
1279 {
1280
1281 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n",
1282 sc->sc_dev.dv_xname,
1283 sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to,
1284 sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc);
1285 }
1286 #endif
1287
1288 /*
1289 * ae_reset:
1290 *
1291 * Perform a soft reset on the chip.
1292 */
1293 void
1294 ae_reset(struct ae_softc *sc)
1295 {
1296 int i;
1297
1298 AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR);
1299 AE_BARRIER(sc);
1300
1301 /*
1302 * The chip doesn't take itself out of reset automatically.
1303 * We need to do so after 2us.
1304 */
1305 delay(10);
1306 AE_WRITE(sc, CSR_BUSMODE, 0);
1307 AE_BARRIER(sc);
1308
1309 for (i = 0; i < 1000; i++) {
1310 /*
1311 * Wait a bit for the reset to complete before peeking
1312 * at the chip again.
1313 */
1314 delay(10);
1315 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0)
1316 break;
1317 }
1318
1319 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR))
1320 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1321
1322 delay(1000);
1323 }
1324
1325 /*
1326 * ae_init: [ ifnet interface function ]
1327 *
1328 * Initialize the interface. Must be called at splnet().
1329 */
1330 static int
1331 ae_init(struct ifnet *ifp)
1332 {
1333 struct ae_softc *sc = ifp->if_softc;
1334 struct ae_txsoft *txs;
1335 struct ae_rxsoft *rxs;
1336 const uint8_t *enaddr;
1337 int i, error = 0;
1338
1339 if ((error = ae_enable(sc)) != 0)
1340 goto out;
1341
1342 /*
1343 * Cancel any pending I/O.
1344 */
1345 ae_stop(ifp, 0);
1346
1347 /*
1348 * Reset the chip to a known state.
1349 */
1350 ae_reset(sc);
1351
1352 /*
1353 * Initialize the BUSMODE register.
1354 */
1355 AE_WRITE(sc, CSR_BUSMODE,
1356 /* XXX: not sure if this is a good thing or not... */
1357 //BUSMODE_ALIGN_16B |
1358 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW);
1359 AE_BARRIER(sc);
1360
1361 /*
1362 * Initialize the transmit descriptor ring.
1363 */
1364 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1365 for (i = 0; i < AE_NTXDESC; i++) {
1366 sc->sc_txdescs[i].ad_ctl = 0;
1367 sc->sc_txdescs[i].ad_bufaddr2 =
1368 AE_CDTXADDR(sc, AE_NEXTTX(i));
1369 }
1370 sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER;
1371 AE_CDTXSYNC(sc, 0, AE_NTXDESC,
1372 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1373 sc->sc_txfree = AE_NTXDESC;
1374 sc->sc_txnext = 0;
1375
1376 /*
1377 * Initialize the transmit job descriptors.
1378 */
1379 SIMPLEQ_INIT(&sc->sc_txfreeq);
1380 SIMPLEQ_INIT(&sc->sc_txdirtyq);
1381 for (i = 0; i < AE_TXQUEUELEN; i++) {
1382 txs = &sc->sc_txsoft[i];
1383 txs->txs_mbuf = NULL;
1384 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1385 }
1386
1387 /*
1388 * Initialize the receive descriptor and receive job
1389 * descriptor rings.
1390 */
1391 for (i = 0; i < AE_NRXDESC; i++) {
1392 rxs = &sc->sc_rxsoft[i];
1393 if (rxs->rxs_mbuf == NULL) {
1394 if ((error = ae_add_rxbuf(sc, i)) != 0) {
1395 printf("%s: unable to allocate or map rx "
1396 "buffer %d, error = %d\n",
1397 sc->sc_dev.dv_xname, i, error);
1398 /*
1399 * XXX Should attempt to run with fewer receive
1400 * XXX buffers instead of just failing.
1401 */
1402 ae_rxdrain(sc);
1403 goto out;
1404 }
1405 } else
1406 AE_INIT_RXDESC(sc, i);
1407 }
1408 sc->sc_rxptr = 0;
1409
1410 /*
1411 * Initialize the interrupt mask and enable interrupts.
1412 */
1413 /* normal interrupts */
1414 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
1415
1416 /* abnormal interrupts */
1417 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
1418 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS;
1419
1420 sc->sc_rxint_mask = STATUS_RI|STATUS_RU;
1421 sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT;
1422
1423 sc->sc_rxint_mask &= sc->sc_inten;
1424 sc->sc_txint_mask &= sc->sc_inten;
1425
1426 AE_WRITE(sc, CSR_INTEN, sc->sc_inten);
1427 AE_WRITE(sc, CSR_STATUS, 0xffffffff);
1428
1429 /*
1430 * Give the transmit and receive rings to the chip.
1431 */
1432 AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext));
1433 AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr));
1434 AE_BARRIER(sc);
1435
1436 /*
1437 * Set the station address.
1438 */
1439 enaddr = CLLADDR(ifp->if_sadl);
1440 AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]);
1441 AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 |
1442 enaddr[1] << 8 | enaddr[0]);
1443 AE_BARRIER(sc);
1444
1445 /*
1446 * Set the receive filter. This will start the transmit and
1447 * receive processes.
1448 */
1449 ae_filter_setup(sc);
1450
1451 /*
1452 * Set the current media.
1453 */
1454 if ((error = ether_mediachange(ifp)) != 0)
1455 goto out;
1456
1457 /*
1458 * Start the mac.
1459 */
1460 AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE);
1461 AE_BARRIER(sc);
1462
1463 /*
1464 * Write out the opmode.
1465 */
1466 AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST |
1467 ae_txthresh[sc->sc_txthresh].txth_opmode);
1468 /*
1469 * Start the receive process.
1470 */
1471 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
1472 AE_BARRIER(sc);
1473
1474 if (sc->sc_tick != NULL) {
1475 /* Start the one second clock. */
1476 callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc);
1477 }
1478
1479 /*
1480 * Note that the interface is now running.
1481 */
1482 ifp->if_flags |= IFF_RUNNING;
1483 ifp->if_flags &= ~IFF_OACTIVE;
1484 sc->sc_if_flags = ifp->if_flags;
1485
1486 out:
1487 if (error) {
1488 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1489 ifp->if_timer = 0;
1490 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1491 }
1492 return (error);
1493 }
1494
1495 /*
1496 * ae_enable:
1497 *
1498 * Enable the chip.
1499 */
1500 static int
1501 ae_enable(struct ae_softc *sc)
1502 {
1503
1504 if (AE_IS_ENABLED(sc) == 0) {
1505 sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq,
1506 ae_intr, sc);
1507 if (sc->sc_ih == NULL) {
1508 printf("%s: unable to establish interrupt\n",
1509 sc->sc_dev.dv_xname);
1510 return (EIO);
1511 }
1512 sc->sc_flags |= AE_ENABLED;
1513 }
1514 return (0);
1515 }
1516
1517 /*
1518 * ae_disable:
1519 *
1520 * Disable the chip.
1521 */
1522 static void
1523 ae_disable(struct ae_softc *sc)
1524 {
1525
1526 if (AE_IS_ENABLED(sc)) {
1527 arbus_intr_disestablish(sc->sc_ih);
1528 sc->sc_flags &= ~AE_ENABLED;
1529 }
1530 }
1531
1532 /*
1533 * ae_power:
1534 *
1535 * Power management (suspend/resume) hook.
1536 */
1537 static void
1538 ae_power(int why, void *arg)
1539 {
1540 struct ae_softc *sc = arg;
1541 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1542 int s;
1543
1544 printf("power called: %d, %x\n", why, (uint32_t)arg);
1545 s = splnet();
1546 switch (why) {
1547 case PWR_STANDBY:
1548 /* do nothing! */
1549 break;
1550 case PWR_SUSPEND:
1551 ae_stop(ifp, 0);
1552 ae_disable(sc);
1553 break;
1554 case PWR_RESUME:
1555 if (ifp->if_flags & IFF_UP) {
1556 ae_enable(sc);
1557 ae_init(ifp);
1558 }
1559 break;
1560 case PWR_SOFTSUSPEND:
1561 case PWR_SOFTSTANDBY:
1562 case PWR_SOFTRESUME:
1563 break;
1564 }
1565 splx(s);
1566 }
1567
1568 /*
1569 * ae_rxdrain:
1570 *
1571 * Drain the receive queue.
1572 */
1573 static void
1574 ae_rxdrain(struct ae_softc *sc)
1575 {
1576 struct ae_rxsoft *rxs;
1577 int i;
1578
1579 for (i = 0; i < AE_NRXDESC; i++) {
1580 rxs = &sc->sc_rxsoft[i];
1581 if (rxs->rxs_mbuf != NULL) {
1582 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1583 m_freem(rxs->rxs_mbuf);
1584 rxs->rxs_mbuf = NULL;
1585 }
1586 }
1587 }
1588
1589 /*
1590 * ae_stop: [ ifnet interface function ]
1591 *
1592 * Stop transmission on the interface.
1593 */
1594 static void
1595 ae_stop(struct ifnet *ifp, int disable)
1596 {
1597 struct ae_softc *sc = ifp->if_softc;
1598 struct ae_txsoft *txs;
1599
1600 if (sc->sc_tick != NULL) {
1601 /* Stop the one second clock. */
1602 callout_stop(&sc->sc_tick_callout);
1603 }
1604
1605 /* Down the MII. */
1606 mii_down(&sc->sc_mii);
1607
1608 /* Disable interrupts. */
1609 AE_WRITE(sc, CSR_INTEN, 0);
1610
1611 /* Stop the transmit and receive processes. */
1612 AE_WRITE(sc, CSR_OPMODE, 0);
1613 AE_WRITE(sc, CSR_RXLIST, 0);
1614 AE_WRITE(sc, CSR_TXLIST, 0);
1615 AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE);
1616 AE_BARRIER(sc);
1617
1618 /*
1619 * Release any queued transmit buffers.
1620 */
1621 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1622 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1623 if (txs->txs_mbuf != NULL) {
1624 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1625 m_freem(txs->txs_mbuf);
1626 txs->txs_mbuf = NULL;
1627 }
1628 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1629 }
1630
1631 /*
1632 * Mark the interface down and cancel the watchdog timer.
1633 */
1634 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1635 sc->sc_if_flags = ifp->if_flags;
1636 ifp->if_timer = 0;
1637
1638 if (disable) {
1639 ae_rxdrain(sc);
1640 ae_disable(sc);
1641 }
1642
1643 /*
1644 * Reset the chip (needed on some flavors to actually disable it).
1645 */
1646 ae_reset(sc);
1647 }
1648
1649 /*
1650 * ae_add_rxbuf:
1651 *
1652 * Add a receive buffer to the indicated descriptor.
1653 */
1654 static int
1655 ae_add_rxbuf(struct ae_softc *sc, int idx)
1656 {
1657 struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx];
1658 struct mbuf *m;
1659 int error;
1660
1661 MGETHDR(m, M_DONTWAIT, MT_DATA);
1662 if (m == NULL)
1663 return (ENOBUFS);
1664
1665 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1666 MCLGET(m, M_DONTWAIT);
1667 if ((m->m_flags & M_EXT) == 0) {
1668 m_freem(m);
1669 return (ENOBUFS);
1670 }
1671
1672 if (rxs->rxs_mbuf != NULL)
1673 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1674
1675 rxs->rxs_mbuf = m;
1676
1677 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1678 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1679 BUS_DMA_READ|BUS_DMA_NOWAIT);
1680 if (error) {
1681 printf("%s: can't load rx DMA map %d, error = %d\n",
1682 sc->sc_dev.dv_xname, idx, error);
1683 panic("ae_add_rxbuf"); /* XXX */
1684 }
1685
1686 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1687 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1688
1689 AE_INIT_RXDESC(sc, idx);
1690
1691 return (0);
1692 }
1693
1694 /*
1695 * ae_filter_setup:
1696 *
1697 * Set the chip's receive filter.
1698 */
1699 static void
1700 ae_filter_setup(struct ae_softc *sc)
1701 {
1702 struct ethercom *ec = &sc->sc_ethercom;
1703 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1704 struct ether_multi *enm;
1705 struct ether_multistep step;
1706 uint32_t hash, mchash[2];
1707 uint32_t macctl = 0;
1708
1709 /*
1710 * If the chip is running, we need to reset the interface,
1711 * and will revisit here (with IFF_RUNNING) clear. The
1712 * chip seems to really not like to have its multicast
1713 * filter programmed without a reset.
1714 */
1715 if (ifp->if_flags & IFF_RUNNING) {
1716 (void) ae_init(ifp);
1717 return;
1718 }
1719
1720 DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n",
1721 sc->sc_dev.dv_xname, sc->sc_flags));
1722
1723 macctl = AE_READ(sc, CSR_MACCTL);
1724 macctl &= ~(MACCTL_PR | MACCTL_PM);
1725 macctl |= MACCTL_HASH;
1726 macctl |= MACCTL_HBD;
1727 macctl |= MACCTL_PR;
1728
1729 if (ifp->if_flags & IFF_PROMISC) {
1730 macctl |= MACCTL_PR;
1731 goto allmulti;
1732 }
1733
1734 mchash[0] = mchash[1] = 0;
1735
1736 ETHER_FIRST_MULTI(step, ec, enm);
1737 while (enm != NULL) {
1738 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1739 /*
1740 * We must listen to a range of multicast addresses.
1741 * For now, just accept all multicasts, rather than
1742 * trying to set only those filter bits needed to match
1743 * the range. (At this time, the only use of address
1744 * ranges is for IP multicast routing, for which the
1745 * range is big enough to require all bits set.)
1746 */
1747 goto allmulti;
1748 }
1749
1750 /* Verify whether we use big or little endian hashes */
1751 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f;
1752 mchash[hash >> 5] |= 1 << (hash & 0x1f);
1753 ETHER_NEXT_MULTI(step, enm);
1754 }
1755 ifp->if_flags &= ~IFF_ALLMULTI;
1756 goto setit;
1757
1758 allmulti:
1759 ifp->if_flags |= IFF_ALLMULTI;
1760 mchash[0] = mchash[1] = 0xffffffff;
1761 macctl |= MACCTL_PM;
1762
1763 setit:
1764 AE_WRITE(sc, CSR_HTHI, mchash[0]);
1765 AE_WRITE(sc, CSR_HTHI, mchash[1]);
1766
1767 AE_WRITE(sc, CSR_MACCTL, macctl);
1768 AE_BARRIER(sc);
1769
1770 DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n",
1771 sc->sc_dev.dv_xname, macctl));
1772 }
1773
1774 /*
1775 * ae_idle:
1776 *
1777 * Cause the transmit and/or receive processes to go idle.
1778 */
1779 void
1780 ae_idle(struct ae_softc *sc, u_int32_t bits)
1781 {
1782 static const char * const txstate_names[] = {
1783 "STOPPED",
1784 "RUNNING - FETCH",
1785 "RUNNING - WAIT",
1786 "RUNNING - READING",
1787 "-- RESERVED --",
1788 "RUNNING - SETUP",
1789 "SUSPENDED",
1790 "RUNNING - CLOSE",
1791 };
1792 static const char * const rxstate_names[] = {
1793 "STOPPED",
1794 "RUNNING - FETCH",
1795 "RUNNING - CHECK",
1796 "RUNNING - WAIT",
1797 "SUSPENDED",
1798 "RUNNING - CLOSE",
1799 "RUNNING - FLUSH",
1800 "RUNNING - QUEUE",
1801 };
1802
1803 u_int32_t csr, ackmask = 0;
1804 int i;
1805
1806 if (bits & OPMODE_ST)
1807 ackmask |= STATUS_TPS;
1808
1809 if (bits & OPMODE_SR)
1810 ackmask |= STATUS_RPS;
1811
1812 AE_CLR(sc, CSR_OPMODE, bits);
1813
1814 for (i = 0; i < 1000; i++) {
1815 if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask)
1816 break;
1817 delay(10);
1818 }
1819
1820 csr = AE_READ(sc, CSR_STATUS);
1821 if ((csr & ackmask) != ackmask) {
1822 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 &&
1823 (csr & STATUS_TS) != STATUS_TS_STOPPED) {
1824 printf("%s: transmit process failed to idle: "
1825 "state %s\n", sc->sc_dev.dv_xname,
1826 txstate_names[(csr & STATUS_TS) >> 20]);
1827 }
1828 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 &&
1829 (csr & STATUS_RS) != STATUS_RS_STOPPED) {
1830 printf("%s: receive process failed to idle: "
1831 "state %s\n", sc->sc_dev.dv_xname,
1832 rxstate_names[(csr & STATUS_RS) >> 17]);
1833 }
1834 }
1835 }
1836
1837 /*****************************************************************************
1838 * Support functions for MII-attached media.
1839 *****************************************************************************/
1840
1841 /*
1842 * ae_mii_tick:
1843 *
1844 * One second timer, used to tick the MII.
1845 */
1846 static void
1847 ae_mii_tick(void *arg)
1848 {
1849 struct ae_softc *sc = arg;
1850 int s;
1851
1852 if (!device_is_active(&sc->sc_dev))
1853 return;
1854
1855 s = splnet();
1856 mii_tick(&sc->sc_mii);
1857 splx(s);
1858
1859 callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc);
1860 }
1861
1862 /*
1863 * ae_mii_statchg: [mii interface function]
1864 *
1865 * Callback from PHY when media changes.
1866 */
1867 static void
1868 ae_mii_statchg(device_t self)
1869 {
1870 struct ae_softc *sc = device_private(self);
1871 uint32_t macctl, flowc;
1872
1873 //opmode = AE_READ(sc, CSR_OPMODE);
1874 macctl = AE_READ(sc, CSR_MACCTL);
1875
1876 /* XXX: do we need to do this? */
1877 /* Idle the transmit and receive processes. */
1878 //ae_idle(sc, OPMODE_ST|OPMODE_SR);
1879
1880 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1881 flowc = FLOWC_FCE;
1882 macctl &= ~MACCTL_DRO;
1883 macctl |= MACCTL_FDX;
1884 } else {
1885 flowc = 0; /* cannot do flow control in HDX */
1886 macctl |= MACCTL_DRO;
1887 macctl &= ~MACCTL_FDX;
1888 }
1889
1890 AE_WRITE(sc, CSR_FLOWC, flowc);
1891 AE_WRITE(sc, CSR_MACCTL, macctl);
1892
1893 /* restore operational mode */
1894 //AE_WRITE(sc, CSR_OPMODE, opmode);
1895 AE_BARRIER(sc);
1896 }
1897
1898 /*
1899 * ae_mii_readreg:
1900 *
1901 * Read a PHY register.
1902 */
1903 static int
1904 ae_mii_readreg(device_t self, int phy, int reg)
1905 {
1906 struct ae_softc *sc = device_private(self);
1907 uint32_t addr;
1908 int i;
1909
1910 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT);
1911 AE_WRITE(sc, CSR_MIIADDR, addr);
1912 AE_BARRIER(sc);
1913 for (i = 0; i < 100000000; i++) {
1914 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1915 break;
1916 }
1917
1918 return (AE_READ(sc, CSR_MIIDATA) & 0xffff);
1919 }
1920
1921 /*
1922 * ae_mii_writereg:
1923 *
1924 * Write a PHY register.
1925 */
1926 static void
1927 ae_mii_writereg(device_t self, int phy, int reg, int val)
1928 {
1929 struct ae_softc *sc = device_private(self);
1930 uint32_t addr;
1931 int i;
1932
1933 /* write the data register */
1934 AE_WRITE(sc, CSR_MIIDATA, val);
1935
1936 /* write the address to latch it in */
1937 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) |
1938 MIIADDR_WRITE;
1939 AE_WRITE(sc, CSR_MIIADDR, addr);
1940 AE_BARRIER(sc);
1941
1942 for (i = 0; i < 100000000; i++) {
1943 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1944 break;
1945 }
1946 }
1947