if_ae.c revision 1.21.2.1 1 /* $Id: if_ae.c,v 1.21.2.1 2012/04/17 00:06:39 yamt Exp $ */
2 /*-
3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
4 * Copyright (c) 2006 Garrett D'Amore.
5 * All rights reserved.
6 *
7 * This code was written by Garrett D'Amore for the Champaign-Urbana
8 * Community Wireless Network Project.
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 * 3. All advertising materials mentioning features or use of this
20 * software must display the following acknowledgements:
21 * This product includes software developed by the Urbana-Champaign
22 * Independent Media Center.
23 * This product includes software developed by Garrett D'Amore.
24 * 4. Urbana-Champaign Independent Media Center's name and Garrett
25 * D'Amore's name may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 */
42 /*-
43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
44 * All rights reserved.
45 *
46 * This code is derived from software contributed to The NetBSD Foundation
47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
48 * NASA Ames Research Center; and by Charles M. Hannum.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69 * POSSIBILITY OF SUCH DAMAGE.
70 */
71
72 /*
73 * Device driver for the onboard ethernet MAC found on the AR5312
74 * chip's AHB bus.
75 *
76 * This device is very simliar to the tulip in most regards, and
77 * the code is directly derived from NetBSD's tulip.c. However, it
78 * is different enough that it did not seem to be a good idea to
79 * add further complexity to the tulip driver, so we have our own.
80 *
81 * Also tulip has a lot of complexity in it for various parts/options
82 * that we don't need, and on these little boxes with only ~8MB RAM, we
83 * don't want any extra bloat.
84 */
85
86 /*
87 * TODO:
88 *
89 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align
90 * inbound packets on a half-word boundary, which would make life easier
91 * for TCP/IP. (Aligning IP headers on a word.)
92 *
93 * 2) There is stuff in original tulip to shut down the device when reacting
94 * to a a change in link status. Is that needed.
95 *
96 * 3) Test with variety of 10/100 HDX/FDX scenarios.
97 *
98 */
99
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.21.2.1 2012/04/17 00:06:39 yamt Exp $");
102
103
104 #include <sys/param.h>
105 #include <sys/bus.h>
106 #include <sys/callout.h>
107 #include <sys/device.h>
108 #include <sys/endian.h>
109 #include <sys/errno.h>
110 #include <sys/intr.h>
111 #include <sys/ioctl.h>
112 #include <sys/kernel.h>
113 #include <sys/malloc.h>
114 #include <sys/mbuf.h>
115 #include <sys/socket.h>
116
117 #include <uvm/uvm_extern.h>
118
119 #include <net/if.h>
120 #include <net/if_dl.h>
121 #include <net/if_media.h>
122 #include <net/if_ether.h>
123
124 #include <net/bpf.h>
125
126 #include <dev/mii/mii.h>
127 #include <dev/mii/miivar.h>
128 #include <dev/mii/mii_bitbang.h>
129
130 #include <mips/atheros/include/arbusvar.h>
131 #include <mips/atheros/dev/aereg.h>
132 #include <mips/atheros/dev/aevar.h>
133
134 static const struct {
135 u_int32_t txth_opmode; /* OPMODE bits */
136 const char *txth_name; /* name of mode */
137 } ae_txthresh[] = {
138 { OPMODE_TR_32, "32 words" },
139 { OPMODE_TR_64, "64 words" },
140 { OPMODE_TR_128, "128 words" },
141 { OPMODE_TR_256, "256 words" },
142 { OPMODE_SF, "store and forward mode" },
143 { 0, NULL },
144 };
145
146 static int ae_match(device_t, struct cfdata *, void *);
147 static void ae_attach(device_t, device_t, void *);
148 static int ae_detach(device_t, int);
149 static int ae_activate(device_t, enum devact);
150
151 static int ae_ifflags_cb(struct ethercom *);
152 static void ae_reset(struct ae_softc *);
153 static void ae_idle(struct ae_softc *, u_int32_t);
154
155 static void ae_start(struct ifnet *);
156 static void ae_watchdog(struct ifnet *);
157 static int ae_ioctl(struct ifnet *, u_long, void *);
158 static int ae_init(struct ifnet *);
159 static void ae_stop(struct ifnet *, int);
160
161 static void ae_shutdown(void *);
162
163 static void ae_rxdrain(struct ae_softc *);
164 static int ae_add_rxbuf(struct ae_softc *, int);
165
166 static int ae_enable(struct ae_softc *);
167 static void ae_disable(struct ae_softc *);
168 static void ae_power(int, void *);
169
170 static void ae_filter_setup(struct ae_softc *);
171
172 static int ae_intr(void *);
173 static void ae_rxintr(struct ae_softc *);
174 static void ae_txintr(struct ae_softc *);
175
176 static void ae_mii_tick(void *);
177 static void ae_mii_statchg(device_t);
178
179 static int ae_mii_readreg(device_t, int, int);
180 static void ae_mii_writereg(device_t, int, int, int);
181
182 #ifdef AE_DEBUG
183 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
184 printf x
185 #else
186 #define DPRINTF(sc, x) /* nothing */
187 #endif
188
189 #ifdef AE_STATS
190 static void ae_print_stats(struct ae_softc *);
191 #endif
192
193 CFATTACH_DECL(ae, sizeof(struct ae_softc),
194 ae_match, ae_attach, ae_detach, ae_activate);
195
196 /*
197 * ae_match:
198 *
199 * Check for a device match.
200 */
201 int
202 ae_match(device_t parent, struct cfdata *cf, void *aux)
203 {
204 struct arbus_attach_args *aa = aux;
205
206 if (strcmp(aa->aa_name, cf->cf_name) == 0)
207 return 1;
208
209 return 0;
210
211 }
212
213 /*
214 * ae_attach:
215 *
216 * Attach an ae interface to the system.
217 */
218 void
219 ae_attach(device_t parent, device_t self, void *aux)
220 {
221 const uint8_t *enaddr;
222 prop_data_t ea;
223 struct ae_softc *sc = device_private(self);
224 struct arbus_attach_args *aa = aux;
225 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
226 int i, error;
227
228 callout_init(&sc->sc_tick_callout, 0);
229
230 printf(": Atheros AR531X 10/100 Ethernet\n");
231
232 /*
233 * Try to get MAC address.
234 */
235 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-address");
236 if (ea == NULL) {
237 printf("%s: unable to get mac-addr property\n",
238 sc->sc_dev.dv_xname);
239 return;
240 }
241 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
242 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
243 enaddr = prop_data_data_nocopy(ea);
244
245 /* Announce ourselves. */
246 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
247 ether_sprintf(enaddr));
248
249 sc->sc_cirq = aa->aa_cirq;
250 sc->sc_mirq = aa->aa_mirq;
251 sc->sc_st = aa->aa_bst;
252 sc->sc_dmat = aa->aa_dmat;
253
254 SIMPLEQ_INIT(&sc->sc_txfreeq);
255 SIMPLEQ_INIT(&sc->sc_txdirtyq);
256
257 /*
258 * Map registers.
259 */
260 sc->sc_size = aa->aa_size;
261 if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
262 &sc->sc_sh)) != 0) {
263 printf("%s: unable to map registers, error = %d\n",
264 sc->sc_dev.dv_xname, error);
265 goto fail_0;
266 }
267
268 /*
269 * Allocate the control data structures, and create and load the
270 * DMA map for it.
271 */
272 if ((error = bus_dmamem_alloc(sc->sc_dmat,
273 sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
274 1, &sc->sc_cdnseg, 0)) != 0) {
275 printf("%s: unable to allocate control data, error = %d\n",
276 sc->sc_dev.dv_xname, error);
277 goto fail_1;
278 }
279
280 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
281 sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
282 BUS_DMA_COHERENT)) != 0) {
283 printf("%s: unable to map control data, error = %d\n",
284 sc->sc_dev.dv_xname, error);
285 goto fail_2;
286 }
287
288 if ((error = bus_dmamap_create(sc->sc_dmat,
289 sizeof(struct ae_control_data), 1,
290 sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
291 printf("%s: unable to create control data DMA map, "
292 "error = %d\n", sc->sc_dev.dv_xname, error);
293 goto fail_3;
294 }
295
296 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
297 sc->sc_control_data, sizeof(struct ae_control_data), NULL,
298 0)) != 0) {
299 printf("%s: unable to load control data DMA map, error = %d\n",
300 sc->sc_dev.dv_xname, error);
301 goto fail_4;
302 }
303
304 /*
305 * Create the transmit buffer DMA maps.
306 */
307 for (i = 0; i < AE_TXQUEUELEN; i++) {
308 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
309 AE_NTXSEGS, MCLBYTES, 0, 0,
310 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
311 printf("%s: unable to create tx DMA map %d, "
312 "error = %d\n", sc->sc_dev.dv_xname, i, error);
313 goto fail_5;
314 }
315 }
316
317 /*
318 * Create the receive buffer DMA maps.
319 */
320 for (i = 0; i < AE_NRXDESC; i++) {
321 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
322 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
323 printf("%s: unable to create rx DMA map %d, "
324 "error = %d\n", sc->sc_dev.dv_xname, i, error);
325 goto fail_6;
326 }
327 sc->sc_rxsoft[i].rxs_mbuf = NULL;
328 }
329
330 /*
331 * Reset the chip to a known state.
332 */
333 ae_reset(sc);
334
335 /*
336 * From this point forward, the attachment cannot fail. A failure
337 * before this point releases all resources that may have been
338 * allocated.
339 */
340 sc->sc_flags |= AE_ATTACHED;
341
342 /*
343 * Initialize our media structures. This may probe the MII, if
344 * present.
345 */
346 sc->sc_mii.mii_ifp = ifp;
347 sc->sc_mii.mii_readreg = ae_mii_readreg;
348 sc->sc_mii.mii_writereg = ae_mii_writereg;
349 sc->sc_mii.mii_statchg = ae_mii_statchg;
350 sc->sc_ethercom.ec_mii = &sc->sc_mii;
351 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
352 ether_mediastatus);
353 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
354 MII_OFFSET_ANY, 0);
355
356 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
357 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
358 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
359 } else
360 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
361
362 sc->sc_tick = ae_mii_tick;
363
364 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
365 ifp->if_softc = sc;
366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
367 sc->sc_if_flags = ifp->if_flags;
368 ifp->if_ioctl = ae_ioctl;
369 ifp->if_start = ae_start;
370 ifp->if_watchdog = ae_watchdog;
371 ifp->if_init = ae_init;
372 ifp->if_stop = ae_stop;
373 IFQ_SET_READY(&ifp->if_snd);
374
375 /*
376 * We can support 802.1Q VLAN-sized frames.
377 */
378 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
379
380 /*
381 * Attach the interface.
382 */
383 if_attach(ifp);
384 ether_ifattach(ifp, enaddr);
385 ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb);
386
387 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dev.dv_xname,
388 RND_TYPE_NET, 0);
389
390 /*
391 * Make sure the interface is shutdown during reboot.
392 */
393 sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
394 if (sc->sc_sdhook == NULL)
395 printf("%s: WARNING: unable to establish shutdown hook\n",
396 sc->sc_dev.dv_xname);
397
398 /*
399 * Add a suspend hook to make sure we come back up after a
400 * resume.
401 */
402 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
403 ae_power, sc);
404 if (sc->sc_powerhook == NULL)
405 printf("%s: WARNING: unable to establish power hook\n",
406 sc->sc_dev.dv_xname);
407 return;
408
409 /*
410 * Free any resources we've allocated during the failed attach
411 * attempt. Do this in reverse order and fall through.
412 */
413 fail_6:
414 for (i = 0; i < AE_NRXDESC; i++) {
415 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
416 bus_dmamap_destroy(sc->sc_dmat,
417 sc->sc_rxsoft[i].rxs_dmamap);
418 }
419 fail_5:
420 for (i = 0; i < AE_TXQUEUELEN; i++) {
421 if (sc->sc_txsoft[i].txs_dmamap != NULL)
422 bus_dmamap_destroy(sc->sc_dmat,
423 sc->sc_txsoft[i].txs_dmamap);
424 }
425 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
426 fail_4:
427 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
428 fail_3:
429 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
430 sizeof(struct ae_control_data));
431 fail_2:
432 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
433 fail_1:
434 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
435 fail_0:
436 return;
437 }
438
439 /*
440 * ae_activate:
441 *
442 * Handle device activation/deactivation requests.
443 */
444 int
445 ae_activate(device_t self, enum devact act)
446 {
447 struct ae_softc *sc = device_private(self);
448
449 switch (act) {
450 case DVACT_DEACTIVATE:
451 if_deactivate(&sc->sc_ethercom.ec_if);
452 return 0;
453 default:
454 return EOPNOTSUPP;
455 }
456 }
457
458 /*
459 * ae_detach:
460 *
461 * Detach a device interface.
462 */
463 int
464 ae_detach(device_t self, int flags)
465 {
466 struct ae_softc *sc = device_private(self);
467 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
468 struct ae_rxsoft *rxs;
469 struct ae_txsoft *txs;
470 int i;
471
472 /*
473 * Succeed now if there isn't any work to do.
474 */
475 if ((sc->sc_flags & AE_ATTACHED) == 0)
476 return (0);
477
478 /* Unhook our tick handler. */
479 if (sc->sc_tick)
480 callout_stop(&sc->sc_tick_callout);
481
482 /* Detach all PHYs */
483 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
484
485 /* Delete all remaining media. */
486 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
487
488 rnd_detach_source(&sc->sc_rnd_source);
489 ether_ifdetach(ifp);
490 if_detach(ifp);
491
492 for (i = 0; i < AE_NRXDESC; i++) {
493 rxs = &sc->sc_rxsoft[i];
494 if (rxs->rxs_mbuf != NULL) {
495 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
496 m_freem(rxs->rxs_mbuf);
497 rxs->rxs_mbuf = NULL;
498 }
499 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
500 }
501 for (i = 0; i < AE_TXQUEUELEN; i++) {
502 txs = &sc->sc_txsoft[i];
503 if (txs->txs_mbuf != NULL) {
504 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
505 m_freem(txs->txs_mbuf);
506 txs->txs_mbuf = NULL;
507 }
508 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
509 }
510 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
511 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
512 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
513 sizeof(struct ae_control_data));
514 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
515
516 shutdownhook_disestablish(sc->sc_sdhook);
517 powerhook_disestablish(sc->sc_powerhook);
518
519 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
520
521
522 return (0);
523 }
524
525 /*
526 * ae_shutdown:
527 *
528 * Make sure the interface is stopped at reboot time.
529 */
530 static void
531 ae_shutdown(void *arg)
532 {
533 struct ae_softc *sc = arg;
534
535 ae_stop(&sc->sc_ethercom.ec_if, 1);
536 }
537
538 /*
539 * ae_start: [ifnet interface function]
540 *
541 * Start packet transmission on the interface.
542 */
543 static void
544 ae_start(struct ifnet *ifp)
545 {
546 struct ae_softc *sc = ifp->if_softc;
547 struct mbuf *m0, *m;
548 struct ae_txsoft *txs, *last_txs = NULL;
549 bus_dmamap_t dmamap;
550 int error, firsttx, nexttx, lasttx = 1, ofree, seg;
551
552 DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
553 sc->sc_dev.dv_xname, sc->sc_flags, ifp->if_flags));
554
555
556 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
557 return;
558
559 /*
560 * Remember the previous number of free descriptors and
561 * the first descriptor we'll use.
562 */
563 ofree = sc->sc_txfree;
564 firsttx = sc->sc_txnext;
565
566 DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
567 sc->sc_dev.dv_xname, ofree, firsttx));
568
569 /*
570 * Loop through the send queue, setting up transmit descriptors
571 * until we drain the queue, or use up all available transmit
572 * descriptors.
573 */
574 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
575 sc->sc_txfree != 0) {
576 /*
577 * Grab a packet off the queue.
578 */
579 IFQ_POLL(&ifp->if_snd, m0);
580 if (m0 == NULL)
581 break;
582 m = NULL;
583
584 dmamap = txs->txs_dmamap;
585
586 /*
587 * Load the DMA map. If this fails, the packet either
588 * didn't fit in the alloted number of segments, or we were
589 * short on resources. In this case, we'll copy and try
590 * again.
591 */
592 if (((mtod(m0, uintptr_t) & 3) != 0) ||
593 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
594 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
595 MGETHDR(m, M_DONTWAIT, MT_DATA);
596 if (m == NULL) {
597 printf("%s: unable to allocate Tx mbuf\n",
598 sc->sc_dev.dv_xname);
599 break;
600 }
601 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
602 if (m0->m_pkthdr.len > MHLEN) {
603 MCLGET(m, M_DONTWAIT);
604 if ((m->m_flags & M_EXT) == 0) {
605 printf("%s: unable to allocate Tx "
606 "cluster\n", sc->sc_dev.dv_xname);
607 m_freem(m);
608 break;
609 }
610 }
611 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
612 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
613 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
614 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
615 if (error) {
616 printf("%s: unable to load Tx buffer, "
617 "error = %d\n", sc->sc_dev.dv_xname,
618 error);
619 break;
620 }
621 }
622
623 /*
624 * Ensure we have enough descriptors free to describe
625 * the packet.
626 */
627 if (dmamap->dm_nsegs > sc->sc_txfree) {
628 /*
629 * Not enough free descriptors to transmit this
630 * packet. We haven't committed to anything yet,
631 * so just unload the DMA map, put the packet
632 * back on the queue, and punt. Notify the upper
633 * layer that there are no more slots left.
634 *
635 * XXX We could allocate an mbuf and copy, but
636 * XXX it is worth it?
637 */
638 ifp->if_flags |= IFF_OACTIVE;
639 bus_dmamap_unload(sc->sc_dmat, dmamap);
640 if (m != NULL)
641 m_freem(m);
642 break;
643 }
644
645 IFQ_DEQUEUE(&ifp->if_snd, m0);
646 if (m != NULL) {
647 m_freem(m0);
648 m0 = m;
649 }
650
651 /*
652 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
653 */
654
655 /* Sync the DMA map. */
656 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
657 BUS_DMASYNC_PREWRITE);
658
659 /*
660 * Initialize the transmit descriptors.
661 */
662 for (nexttx = sc->sc_txnext, seg = 0;
663 seg < dmamap->dm_nsegs;
664 seg++, nexttx = AE_NEXTTX(nexttx)) {
665 /*
666 * If this is the first descriptor we're
667 * enqueueing, don't set the OWN bit just
668 * yet. That could cause a race condition.
669 * We'll do it below.
670 */
671 sc->sc_txdescs[nexttx].ad_status =
672 (nexttx == firsttx) ? 0 : ADSTAT_OWN;
673 sc->sc_txdescs[nexttx].ad_bufaddr1 =
674 dmamap->dm_segs[seg].ds_addr;
675 sc->sc_txdescs[nexttx].ad_ctl =
676 (dmamap->dm_segs[seg].ds_len <<
677 ADCTL_SIZE1_SHIFT) |
678 (nexttx == (AE_NTXDESC - 1) ?
679 ADCTL_ER : 0);
680 lasttx = nexttx;
681 }
682
683 KASSERT(lasttx != -1);
684
685 /* Set `first segment' and `last segment' appropriately. */
686 sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
687 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;
688
689 #ifdef AE_DEBUG
690 if (ifp->if_flags & IFF_DEBUG) {
691 printf(" txsoft %p transmit chain:\n", txs);
692 for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
693 printf(" descriptor %d:\n", seg);
694 printf(" ad_status: 0x%08x\n",
695 sc->sc_txdescs[seg].ad_status);
696 printf(" ad_ctl: 0x%08x\n",
697 sc->sc_txdescs[seg].ad_ctl);
698 printf(" ad_bufaddr1: 0x%08x\n",
699 sc->sc_txdescs[seg].ad_bufaddr1);
700 printf(" ad_bufaddr2: 0x%08x\n",
701 sc->sc_txdescs[seg].ad_bufaddr2);
702 if (seg == lasttx)
703 break;
704 }
705 }
706 #endif
707
708 /* Sync the descriptors we're using. */
709 AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
710 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
711
712 /*
713 * Store a pointer to the packet so we can free it later,
714 * and remember what txdirty will be once the packet is
715 * done.
716 */
717 txs->txs_mbuf = m0;
718 txs->txs_firstdesc = sc->sc_txnext;
719 txs->txs_lastdesc = lasttx;
720 txs->txs_ndescs = dmamap->dm_nsegs;
721
722 /* Advance the tx pointer. */
723 sc->sc_txfree -= dmamap->dm_nsegs;
724 sc->sc_txnext = nexttx;
725
726 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
727 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
728
729 last_txs = txs;
730
731 /*
732 * Pass the packet to any BPF listeners.
733 */
734 bpf_mtap(ifp, m0);
735 }
736
737 if (txs == NULL || sc->sc_txfree == 0) {
738 /* No more slots left; notify upper layer. */
739 ifp->if_flags |= IFF_OACTIVE;
740 }
741
742 if (sc->sc_txfree != ofree) {
743 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
744 sc->sc_dev.dv_xname, lasttx, firsttx));
745 /*
746 * Cause a transmit interrupt to happen on the
747 * last packet we enqueued.
748 */
749 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC;
750 AE_CDTXSYNC(sc, lasttx, 1,
751 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
752
753 /*
754 * The entire packet chain is set up. Give the
755 * first descriptor to the chip now.
756 */
757 sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN;
758 AE_CDTXSYNC(sc, firsttx, 1,
759 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
760
761 /* Wake up the transmitter. */
762 /* XXX USE AUTOPOLLING? */
763 AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD);
764 AE_BARRIER(sc);
765
766 /* Set a watchdog timer in case the chip flakes out. */
767 ifp->if_timer = 5;
768 }
769 }
770
771 /*
772 * ae_watchdog: [ifnet interface function]
773 *
774 * Watchdog timer handler.
775 */
776 static void
777 ae_watchdog(struct ifnet *ifp)
778 {
779 struct ae_softc *sc = ifp->if_softc;
780 int doing_transmit;
781
782 doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq));
783
784 if (doing_transmit) {
785 printf("%s: transmit timeout\n", sc->sc_dev.dv_xname);
786 ifp->if_oerrors++;
787 }
788 else
789 printf("%s: spurious watchdog timeout\n", sc->sc_dev.dv_xname);
790
791 (void) ae_init(ifp);
792
793 /* Try to get more packets going. */
794 ae_start(ifp);
795 }
796
797 /* If the interface is up and running, only modify the receive
798 * filter when changing to/from promiscuous mode. Otherwise return
799 * ENETRESET so that ether_ioctl will reset the chip.
800 */
801 static int
802 ae_ifflags_cb(struct ethercom *ec)
803 {
804 struct ifnet *ifp = &ec->ec_if;
805 struct ae_softc *sc = ifp->if_softc;
806 int change = ifp->if_flags ^ sc->sc_if_flags;
807
808 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
809 return ENETRESET;
810 else if ((change & IFF_PROMISC) != 0)
811 ae_filter_setup(sc);
812 return 0;
813 }
814
815 /*
816 * ae_ioctl: [ifnet interface function]
817 *
818 * Handle control requests from the operator.
819 */
820 static int
821 ae_ioctl(struct ifnet *ifp, u_long cmd, void *data)
822 {
823 struct ae_softc *sc = ifp->if_softc;
824 int s, error;
825
826 s = splnet();
827
828 error = ether_ioctl(ifp, cmd, data);
829 if (error == ENETRESET) {
830 if (ifp->if_flags & IFF_RUNNING) {
831 /*
832 * Multicast list has changed. Set the
833 * hardware filter accordingly.
834 */
835 ae_filter_setup(sc);
836 }
837 error = 0;
838 }
839
840 /* Try to get more packets going. */
841 if (AE_IS_ENABLED(sc))
842 ae_start(ifp);
843
844 sc->sc_if_flags = ifp->if_flags;
845 splx(s);
846 return (error);
847 }
848
849 /*
850 * ae_intr:
851 *
852 * Interrupt service routine.
853 */
854 int
855 ae_intr(void *arg)
856 {
857 struct ae_softc *sc = arg;
858 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
859 u_int32_t status, rxstatus, txstatus;
860 int handled = 0, txthresh;
861
862 DPRINTF(sc, ("%s: ae_intr\n", sc->sc_dev.dv_xname));
863
864 #ifdef DEBUG
865 if (AE_IS_ENABLED(sc) == 0)
866 panic("%s: ae_intr: not enabled", sc->sc_dev.dv_xname);
867 #endif
868
869 /*
870 * If the interface isn't running, the interrupt couldn't
871 * possibly have come from us.
872 */
873 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
874 !device_is_active(&sc->sc_dev)) {
875 printf("spurious?!?\n");
876 return (0);
877 }
878
879 for (;;) {
880 status = AE_READ(sc, CSR_STATUS);
881 if (status) {
882 AE_WRITE(sc, CSR_STATUS, status);
883 AE_BARRIER(sc);
884 }
885
886 if ((status & sc->sc_inten) == 0)
887 break;
888
889 handled = 1;
890
891 rxstatus = status & sc->sc_rxint_mask;
892 txstatus = status & sc->sc_txint_mask;
893
894 if (rxstatus) {
895 /* Grab new any new packets. */
896 ae_rxintr(sc);
897
898 if (rxstatus & STATUS_RU) {
899 printf("%s: receive ring overrun\n",
900 sc->sc_dev.dv_xname);
901 /* Get the receive process going again. */
902 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
903 AE_BARRIER(sc);
904 break;
905 }
906 }
907
908 if (txstatus) {
909 /* Sweep up transmit descriptors. */
910 ae_txintr(sc);
911
912 if (txstatus & STATUS_TJT)
913 printf("%s: transmit jabber timeout\n",
914 sc->sc_dev.dv_xname);
915
916 if (txstatus & STATUS_UNF) {
917 /*
918 * Increase our transmit threshold if
919 * another is available.
920 */
921 txthresh = sc->sc_txthresh + 1;
922 if (ae_txthresh[txthresh].txth_name != NULL) {
923 uint32_t opmode;
924 /* Idle the transmit process. */
925 opmode = AE_READ(sc, CSR_OPMODE);
926 ae_idle(sc, OPMODE_ST);
927
928 sc->sc_txthresh = txthresh;
929 opmode &=
930 ~(OPMODE_TR|OPMODE_SF);
931 opmode |=
932 ae_txthresh[txthresh].txth_opmode;
933 printf("%s: transmit underrun; new "
934 "threshold: %s\n",
935 sc->sc_dev.dv_xname,
936 ae_txthresh[txthresh].txth_name);
937
938 /*
939 * Set the new threshold and restart
940 * the transmit process.
941 */
942 AE_WRITE(sc, CSR_OPMODE, opmode);
943 AE_BARRIER(sc);
944 }
945 /*
946 * XXX Log every Nth underrun from
947 * XXX now on?
948 */
949 }
950 }
951
952 if (status & (STATUS_TPS|STATUS_RPS)) {
953 if (status & STATUS_TPS)
954 printf("%s: transmit process stopped\n",
955 sc->sc_dev.dv_xname);
956 if (status & STATUS_RPS)
957 printf("%s: receive process stopped\n",
958 sc->sc_dev.dv_xname);
959 (void) ae_init(ifp);
960 break;
961 }
962
963 if (status & STATUS_SE) {
964 const char *str;
965
966 if (status & STATUS_TX_ABORT)
967 str = "tx abort";
968 else if (status & STATUS_RX_ABORT)
969 str = "rx abort";
970 else
971 str = "unknown error";
972
973 printf("%s: fatal system error: %s\n",
974 sc->sc_dev.dv_xname, str);
975 (void) ae_init(ifp);
976 break;
977 }
978
979 /*
980 * Not handled:
981 *
982 * Transmit buffer unavailable -- normal
983 * condition, nothing to do, really.
984 *
985 * General purpose timer experied -- we don't
986 * use the general purpose timer.
987 *
988 * Early receive interrupt -- not available on
989 * all chips, we just use RI. We also only
990 * use single-segment receive DMA, so this
991 * is mostly useless.
992 */
993 }
994
995 /* Try to get more packets going. */
996 ae_start(ifp);
997
998 if (handled)
999 rnd_add_uint32(&sc->sc_rnd_source, status);
1000 return (handled);
1001 }
1002
1003 /*
1004 * ae_rxintr:
1005 *
1006 * Helper; handle receive interrupts.
1007 */
1008 static void
1009 ae_rxintr(struct ae_softc *sc)
1010 {
1011 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1012 struct ether_header *eh;
1013 struct ae_rxsoft *rxs;
1014 struct mbuf *m;
1015 u_int32_t rxstat;
1016 int i, len;
1017
1018 for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) {
1019 rxs = &sc->sc_rxsoft[i];
1020
1021 AE_CDRXSYNC(sc, i,
1022 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1023
1024 rxstat = sc->sc_rxdescs[i].ad_status;
1025
1026 if (rxstat & ADSTAT_OWN) {
1027 /*
1028 * We have processed all of the receive buffers.
1029 */
1030 break;
1031 }
1032
1033 /*
1034 * If any collisions were seen on the wire, count one.
1035 */
1036 if (rxstat & ADSTAT_Rx_CS)
1037 ifp->if_collisions++;
1038
1039 /*
1040 * If an error occurred, update stats, clear the status
1041 * word, and leave the packet buffer in place. It will
1042 * simply be reused the next time the ring comes around.
1043 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long
1044 * error.
1045 */
1046 if (rxstat & ADSTAT_ES &&
1047 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 ||
1048 (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF |
1049 ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) {
1050 #define PRINTERR(bit, str) \
1051 if (rxstat & (bit)) \
1052 printf("%s: receive error: %s\n", \
1053 sc->sc_dev.dv_xname, str)
1054 ifp->if_ierrors++;
1055 PRINTERR(ADSTAT_Rx_DE, "descriptor error");
1056 PRINTERR(ADSTAT_Rx_RF, "runt frame");
1057 PRINTERR(ADSTAT_Rx_TL, "frame too long");
1058 PRINTERR(ADSTAT_Rx_RE, "MII error");
1059 PRINTERR(ADSTAT_Rx_DB, "dribbling bit");
1060 PRINTERR(ADSTAT_Rx_CE, "CRC error");
1061 #undef PRINTERR
1062 AE_INIT_RXDESC(sc, i);
1063 continue;
1064 }
1065
1066 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1067 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1068
1069 /*
1070 * No errors; receive the packet. Note the chip
1071 * includes the CRC with every packet.
1072 */
1073 len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN;
1074
1075 /*
1076 * XXX: the Atheros part can align on half words. what
1077 * is the performance implication of this? Probably
1078 * minimal, and we should use it...
1079 */
1080 #ifdef __NO_STRICT_ALIGNMENT
1081 /*
1082 * Allocate a new mbuf cluster. If that fails, we are
1083 * out of memory, and must drop the packet and recycle
1084 * the buffer that's already attached to this descriptor.
1085 */
1086 m = rxs->rxs_mbuf;
1087 if (ae_add_rxbuf(sc, i) != 0) {
1088 ifp->if_ierrors++;
1089 AE_INIT_RXDESC(sc, i);
1090 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1091 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1092 continue;
1093 }
1094 #else
1095 /*
1096 * The chip's receive buffers must be 4-byte aligned.
1097 * But this means that the data after the Ethernet header
1098 * is misaligned. We must allocate a new buffer and
1099 * copy the data, shifted forward 2 bytes.
1100 */
1101 MGETHDR(m, M_DONTWAIT, MT_DATA);
1102 if (m == NULL) {
1103 dropit:
1104 ifp->if_ierrors++;
1105 AE_INIT_RXDESC(sc, i);
1106 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1107 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1108 continue;
1109 }
1110 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1111 if (len > (MHLEN - 2)) {
1112 MCLGET(m, M_DONTWAIT);
1113 if ((m->m_flags & M_EXT) == 0) {
1114 m_freem(m);
1115 goto dropit;
1116 }
1117 }
1118 m->m_data += 2;
1119
1120 /*
1121 * Note that we use clusters for incoming frames, so the
1122 * buffer is virtually contiguous.
1123 */
1124 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len);
1125
1126 /* Allow the receive descriptor to continue using its mbuf. */
1127 AE_INIT_RXDESC(sc, i);
1128 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1129 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1130 #endif /* __NO_STRICT_ALIGNMENT */
1131
1132 ifp->if_ipackets++;
1133 eh = mtod(m, struct ether_header *);
1134 m->m_pkthdr.rcvif = ifp;
1135 m->m_pkthdr.len = m->m_len = len;
1136
1137 /*
1138 * Pass this up to any BPF listeners, but only
1139 * pass it up the stack if its for us.
1140 */
1141 bpf_mtap(ifp, m);
1142
1143 /* Pass it on. */
1144 (*ifp->if_input)(ifp, m);
1145 }
1146
1147 /* Update the receive pointer. */
1148 sc->sc_rxptr = i;
1149 }
1150
1151 /*
1152 * ae_txintr:
1153 *
1154 * Helper; handle transmit interrupts.
1155 */
1156 static void
1157 ae_txintr(struct ae_softc *sc)
1158 {
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160 struct ae_txsoft *txs;
1161 u_int32_t txstat;
1162
1163 DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n",
1164 sc->sc_dev.dv_xname, sc->sc_flags));
1165
1166 ifp->if_flags &= ~IFF_OACTIVE;
1167
1168 /*
1169 * Go through our Tx list and free mbufs for those
1170 * frames that have been transmitted.
1171 */
1172 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1173 AE_CDTXSYNC(sc, txs->txs_lastdesc,
1174 txs->txs_ndescs,
1175 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1176
1177 #ifdef AE_DEBUG
1178 if (ifp->if_flags & IFF_DEBUG) {
1179 int i;
1180 printf(" txsoft %p transmit chain:\n", txs);
1181 for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) {
1182 printf(" descriptor %d:\n", i);
1183 printf(" ad_status: 0x%08x\n",
1184 sc->sc_txdescs[i].ad_status);
1185 printf(" ad_ctl: 0x%08x\n",
1186 sc->sc_txdescs[i].ad_ctl);
1187 printf(" ad_bufaddr1: 0x%08x\n",
1188 sc->sc_txdescs[i].ad_bufaddr1);
1189 printf(" ad_bufaddr2: 0x%08x\n",
1190 sc->sc_txdescs[i].ad_bufaddr2);
1191 if (i == txs->txs_lastdesc)
1192 break;
1193 }
1194 }
1195 #endif
1196
1197 txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status;
1198 if (txstat & ADSTAT_OWN)
1199 break;
1200
1201 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1202
1203 sc->sc_txfree += txs->txs_ndescs;
1204
1205 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1206 0, txs->txs_dmamap->dm_mapsize,
1207 BUS_DMASYNC_POSTWRITE);
1208 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1209 m_freem(txs->txs_mbuf);
1210 txs->txs_mbuf = NULL;
1211
1212 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1213
1214 /*
1215 * Check for errors and collisions.
1216 */
1217 #ifdef AE_STATS
1218 if (txstat & ADSTAT_Tx_UF)
1219 sc->sc_stats.ts_tx_uf++;
1220 if (txstat & ADSTAT_Tx_TO)
1221 sc->sc_stats.ts_tx_to++;
1222 if (txstat & ADSTAT_Tx_EC)
1223 sc->sc_stats.ts_tx_ec++;
1224 if (txstat & ADSTAT_Tx_LC)
1225 sc->sc_stats.ts_tx_lc++;
1226 #endif
1227
1228 if (txstat & (ADSTAT_Tx_UF|ADSTAT_Tx_TO))
1229 ifp->if_oerrors++;
1230
1231 if (txstat & ADSTAT_Tx_EC)
1232 ifp->if_collisions += 16;
1233 else
1234 ifp->if_collisions += ADSTAT_Tx_COLLISIONS(txstat);
1235 if (txstat & ADSTAT_Tx_LC)
1236 ifp->if_collisions++;
1237
1238 ifp->if_opackets++;
1239 }
1240
1241 /*
1242 * If there are no more pending transmissions, cancel the watchdog
1243 * timer.
1244 */
1245 if (txs == NULL)
1246 ifp->if_timer = 0;
1247 }
1248
1249 #ifdef AE_STATS
1250 void
1251 ae_print_stats(struct ae_softc *sc)
1252 {
1253
1254 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n",
1255 sc->sc_dev.dv_xname,
1256 sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to,
1257 sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc);
1258 }
1259 #endif
1260
1261 /*
1262 * ae_reset:
1263 *
1264 * Perform a soft reset on the chip.
1265 */
1266 void
1267 ae_reset(struct ae_softc *sc)
1268 {
1269 int i;
1270
1271 AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR);
1272 AE_BARRIER(sc);
1273
1274 /*
1275 * The chip doesn't take itself out of reset automatically.
1276 * We need to do so after 2us.
1277 */
1278 delay(10);
1279 AE_WRITE(sc, CSR_BUSMODE, 0);
1280 AE_BARRIER(sc);
1281
1282 for (i = 0; i < 1000; i++) {
1283 /*
1284 * Wait a bit for the reset to complete before peeking
1285 * at the chip again.
1286 */
1287 delay(10);
1288 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0)
1289 break;
1290 }
1291
1292 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR))
1293 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1294
1295 delay(1000);
1296 }
1297
1298 /*
1299 * ae_init: [ ifnet interface function ]
1300 *
1301 * Initialize the interface. Must be called at splnet().
1302 */
1303 static int
1304 ae_init(struct ifnet *ifp)
1305 {
1306 struct ae_softc *sc = ifp->if_softc;
1307 struct ae_txsoft *txs;
1308 struct ae_rxsoft *rxs;
1309 const uint8_t *enaddr;
1310 int i, error = 0;
1311
1312 if ((error = ae_enable(sc)) != 0)
1313 goto out;
1314
1315 /*
1316 * Cancel any pending I/O.
1317 */
1318 ae_stop(ifp, 0);
1319
1320 /*
1321 * Reset the chip to a known state.
1322 */
1323 ae_reset(sc);
1324
1325 /*
1326 * Initialize the BUSMODE register.
1327 */
1328 AE_WRITE(sc, CSR_BUSMODE,
1329 /* XXX: not sure if this is a good thing or not... */
1330 //BUSMODE_ALIGN_16B |
1331 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW);
1332 AE_BARRIER(sc);
1333
1334 /*
1335 * Initialize the transmit descriptor ring.
1336 */
1337 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1338 for (i = 0; i < AE_NTXDESC; i++) {
1339 sc->sc_txdescs[i].ad_ctl = 0;
1340 sc->sc_txdescs[i].ad_bufaddr2 =
1341 AE_CDTXADDR(sc, AE_NEXTTX(i));
1342 }
1343 sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER;
1344 AE_CDTXSYNC(sc, 0, AE_NTXDESC,
1345 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1346 sc->sc_txfree = AE_NTXDESC;
1347 sc->sc_txnext = 0;
1348
1349 /*
1350 * Initialize the transmit job descriptors.
1351 */
1352 SIMPLEQ_INIT(&sc->sc_txfreeq);
1353 SIMPLEQ_INIT(&sc->sc_txdirtyq);
1354 for (i = 0; i < AE_TXQUEUELEN; i++) {
1355 txs = &sc->sc_txsoft[i];
1356 txs->txs_mbuf = NULL;
1357 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1358 }
1359
1360 /*
1361 * Initialize the receive descriptor and receive job
1362 * descriptor rings.
1363 */
1364 for (i = 0; i < AE_NRXDESC; i++) {
1365 rxs = &sc->sc_rxsoft[i];
1366 if (rxs->rxs_mbuf == NULL) {
1367 if ((error = ae_add_rxbuf(sc, i)) != 0) {
1368 printf("%s: unable to allocate or map rx "
1369 "buffer %d, error = %d\n",
1370 sc->sc_dev.dv_xname, i, error);
1371 /*
1372 * XXX Should attempt to run with fewer receive
1373 * XXX buffers instead of just failing.
1374 */
1375 ae_rxdrain(sc);
1376 goto out;
1377 }
1378 } else
1379 AE_INIT_RXDESC(sc, i);
1380 }
1381 sc->sc_rxptr = 0;
1382
1383 /*
1384 * Initialize the interrupt mask and enable interrupts.
1385 */
1386 /* normal interrupts */
1387 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
1388
1389 /* abnormal interrupts */
1390 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
1391 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS;
1392
1393 sc->sc_rxint_mask = STATUS_RI|STATUS_RU;
1394 sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT;
1395
1396 sc->sc_rxint_mask &= sc->sc_inten;
1397 sc->sc_txint_mask &= sc->sc_inten;
1398
1399 AE_WRITE(sc, CSR_INTEN, sc->sc_inten);
1400 AE_WRITE(sc, CSR_STATUS, 0xffffffff);
1401
1402 /*
1403 * Give the transmit and receive rings to the chip.
1404 */
1405 AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext));
1406 AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr));
1407 AE_BARRIER(sc);
1408
1409 /*
1410 * Set the station address.
1411 */
1412 enaddr = CLLADDR(ifp->if_sadl);
1413 AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]);
1414 AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 |
1415 enaddr[1] << 8 | enaddr[0]);
1416 AE_BARRIER(sc);
1417
1418 /*
1419 * Set the receive filter. This will start the transmit and
1420 * receive processes.
1421 */
1422 ae_filter_setup(sc);
1423
1424 /*
1425 * Set the current media.
1426 */
1427 if ((error = ether_mediachange(ifp)) != 0)
1428 goto out;
1429
1430 /*
1431 * Start the mac.
1432 */
1433 AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE);
1434 AE_BARRIER(sc);
1435
1436 /*
1437 * Write out the opmode.
1438 */
1439 AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST |
1440 ae_txthresh[sc->sc_txthresh].txth_opmode);
1441 /*
1442 * Start the receive process.
1443 */
1444 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
1445 AE_BARRIER(sc);
1446
1447 if (sc->sc_tick != NULL) {
1448 /* Start the one second clock. */
1449 callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc);
1450 }
1451
1452 /*
1453 * Note that the interface is now running.
1454 */
1455 ifp->if_flags |= IFF_RUNNING;
1456 ifp->if_flags &= ~IFF_OACTIVE;
1457 sc->sc_if_flags = ifp->if_flags;
1458
1459 out:
1460 if (error) {
1461 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1462 ifp->if_timer = 0;
1463 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1464 }
1465 return (error);
1466 }
1467
1468 /*
1469 * ae_enable:
1470 *
1471 * Enable the chip.
1472 */
1473 static int
1474 ae_enable(struct ae_softc *sc)
1475 {
1476
1477 if (AE_IS_ENABLED(sc) == 0) {
1478 sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq,
1479 ae_intr, sc);
1480 if (sc->sc_ih == NULL) {
1481 printf("%s: unable to establish interrupt\n",
1482 sc->sc_dev.dv_xname);
1483 return (EIO);
1484 }
1485 sc->sc_flags |= AE_ENABLED;
1486 }
1487 return (0);
1488 }
1489
1490 /*
1491 * ae_disable:
1492 *
1493 * Disable the chip.
1494 */
1495 static void
1496 ae_disable(struct ae_softc *sc)
1497 {
1498
1499 if (AE_IS_ENABLED(sc)) {
1500 arbus_intr_disestablish(sc->sc_ih);
1501 sc->sc_flags &= ~AE_ENABLED;
1502 }
1503 }
1504
1505 /*
1506 * ae_power:
1507 *
1508 * Power management (suspend/resume) hook.
1509 */
1510 static void
1511 ae_power(int why, void *arg)
1512 {
1513 struct ae_softc *sc = arg;
1514 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1515 int s;
1516
1517 printf("power called: %d, %x\n", why, (uint32_t)arg);
1518 s = splnet();
1519 switch (why) {
1520 case PWR_STANDBY:
1521 /* do nothing! */
1522 break;
1523 case PWR_SUSPEND:
1524 ae_stop(ifp, 0);
1525 ae_disable(sc);
1526 break;
1527 case PWR_RESUME:
1528 if (ifp->if_flags & IFF_UP) {
1529 ae_enable(sc);
1530 ae_init(ifp);
1531 }
1532 break;
1533 case PWR_SOFTSUSPEND:
1534 case PWR_SOFTSTANDBY:
1535 case PWR_SOFTRESUME:
1536 break;
1537 }
1538 splx(s);
1539 }
1540
1541 /*
1542 * ae_rxdrain:
1543 *
1544 * Drain the receive queue.
1545 */
1546 static void
1547 ae_rxdrain(struct ae_softc *sc)
1548 {
1549 struct ae_rxsoft *rxs;
1550 int i;
1551
1552 for (i = 0; i < AE_NRXDESC; i++) {
1553 rxs = &sc->sc_rxsoft[i];
1554 if (rxs->rxs_mbuf != NULL) {
1555 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1556 m_freem(rxs->rxs_mbuf);
1557 rxs->rxs_mbuf = NULL;
1558 }
1559 }
1560 }
1561
1562 /*
1563 * ae_stop: [ ifnet interface function ]
1564 *
1565 * Stop transmission on the interface.
1566 */
1567 static void
1568 ae_stop(struct ifnet *ifp, int disable)
1569 {
1570 struct ae_softc *sc = ifp->if_softc;
1571 struct ae_txsoft *txs;
1572
1573 if (sc->sc_tick != NULL) {
1574 /* Stop the one second clock. */
1575 callout_stop(&sc->sc_tick_callout);
1576 }
1577
1578 /* Down the MII. */
1579 mii_down(&sc->sc_mii);
1580
1581 /* Disable interrupts. */
1582 AE_WRITE(sc, CSR_INTEN, 0);
1583
1584 /* Stop the transmit and receive processes. */
1585 AE_WRITE(sc, CSR_OPMODE, 0);
1586 AE_WRITE(sc, CSR_RXLIST, 0);
1587 AE_WRITE(sc, CSR_TXLIST, 0);
1588 AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE);
1589 AE_BARRIER(sc);
1590
1591 /*
1592 * Release any queued transmit buffers.
1593 */
1594 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1595 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1596 if (txs->txs_mbuf != NULL) {
1597 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1598 m_freem(txs->txs_mbuf);
1599 txs->txs_mbuf = NULL;
1600 }
1601 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1602 }
1603
1604 /*
1605 * Mark the interface down and cancel the watchdog timer.
1606 */
1607 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1608 sc->sc_if_flags = ifp->if_flags;
1609 ifp->if_timer = 0;
1610
1611 if (disable) {
1612 ae_rxdrain(sc);
1613 ae_disable(sc);
1614 }
1615
1616 /*
1617 * Reset the chip (needed on some flavors to actually disable it).
1618 */
1619 ae_reset(sc);
1620 }
1621
1622 /*
1623 * ae_add_rxbuf:
1624 *
1625 * Add a receive buffer to the indicated descriptor.
1626 */
1627 static int
1628 ae_add_rxbuf(struct ae_softc *sc, int idx)
1629 {
1630 struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx];
1631 struct mbuf *m;
1632 int error;
1633
1634 MGETHDR(m, M_DONTWAIT, MT_DATA);
1635 if (m == NULL)
1636 return (ENOBUFS);
1637
1638 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1639 MCLGET(m, M_DONTWAIT);
1640 if ((m->m_flags & M_EXT) == 0) {
1641 m_freem(m);
1642 return (ENOBUFS);
1643 }
1644
1645 if (rxs->rxs_mbuf != NULL)
1646 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1647
1648 rxs->rxs_mbuf = m;
1649
1650 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1651 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1652 BUS_DMA_READ|BUS_DMA_NOWAIT);
1653 if (error) {
1654 printf("%s: can't load rx DMA map %d, error = %d\n",
1655 sc->sc_dev.dv_xname, idx, error);
1656 panic("ae_add_rxbuf"); /* XXX */
1657 }
1658
1659 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1660 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1661
1662 AE_INIT_RXDESC(sc, idx);
1663
1664 return (0);
1665 }
1666
1667 /*
1668 * ae_filter_setup:
1669 *
1670 * Set the chip's receive filter.
1671 */
1672 static void
1673 ae_filter_setup(struct ae_softc *sc)
1674 {
1675 struct ethercom *ec = &sc->sc_ethercom;
1676 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1677 struct ether_multi *enm;
1678 struct ether_multistep step;
1679 uint32_t hash, mchash[2];
1680 uint32_t macctl = 0;
1681
1682 /*
1683 * If the chip is running, we need to reset the interface,
1684 * and will revisit here (with IFF_RUNNING) clear. The
1685 * chip seems to really not like to have its multicast
1686 * filter programmed without a reset.
1687 */
1688 if (ifp->if_flags & IFF_RUNNING) {
1689 (void) ae_init(ifp);
1690 return;
1691 }
1692
1693 DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n",
1694 sc->sc_dev.dv_xname, sc->sc_flags));
1695
1696 macctl = AE_READ(sc, CSR_MACCTL);
1697 macctl &= ~(MACCTL_PR | MACCTL_PM);
1698 macctl |= MACCTL_HASH;
1699 macctl |= MACCTL_HBD;
1700 macctl |= MACCTL_PR;
1701
1702 if (ifp->if_flags & IFF_PROMISC) {
1703 macctl |= MACCTL_PR;
1704 goto allmulti;
1705 }
1706
1707 mchash[0] = mchash[1] = 0;
1708
1709 ETHER_FIRST_MULTI(step, ec, enm);
1710 while (enm != NULL) {
1711 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1712 /*
1713 * We must listen to a range of multicast addresses.
1714 * For now, just accept all multicasts, rather than
1715 * trying to set only those filter bits needed to match
1716 * the range. (At this time, the only use of address
1717 * ranges is for IP multicast routing, for which the
1718 * range is big enough to require all bits set.)
1719 */
1720 goto allmulti;
1721 }
1722
1723 /* Verify whether we use big or little endian hashes */
1724 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f;
1725 mchash[hash >> 5] |= 1 << (hash & 0x1f);
1726 ETHER_NEXT_MULTI(step, enm);
1727 }
1728 ifp->if_flags &= ~IFF_ALLMULTI;
1729 goto setit;
1730
1731 allmulti:
1732 ifp->if_flags |= IFF_ALLMULTI;
1733 mchash[0] = mchash[1] = 0xffffffff;
1734 macctl |= MACCTL_PM;
1735
1736 setit:
1737 AE_WRITE(sc, CSR_HTHI, mchash[0]);
1738 AE_WRITE(sc, CSR_HTHI, mchash[1]);
1739
1740 AE_WRITE(sc, CSR_MACCTL, macctl);
1741 AE_BARRIER(sc);
1742
1743 DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n",
1744 sc->sc_dev.dv_xname, macctl));
1745 }
1746
1747 /*
1748 * ae_idle:
1749 *
1750 * Cause the transmit and/or receive processes to go idle.
1751 */
1752 void
1753 ae_idle(struct ae_softc *sc, u_int32_t bits)
1754 {
1755 static const char * const txstate_names[] = {
1756 "STOPPED",
1757 "RUNNING - FETCH",
1758 "RUNNING - WAIT",
1759 "RUNNING - READING",
1760 "-- RESERVED --",
1761 "RUNNING - SETUP",
1762 "SUSPENDED",
1763 "RUNNING - CLOSE",
1764 };
1765 static const char * const rxstate_names[] = {
1766 "STOPPED",
1767 "RUNNING - FETCH",
1768 "RUNNING - CHECK",
1769 "RUNNING - WAIT",
1770 "SUSPENDED",
1771 "RUNNING - CLOSE",
1772 "RUNNING - FLUSH",
1773 "RUNNING - QUEUE",
1774 };
1775
1776 u_int32_t csr, ackmask = 0;
1777 int i;
1778
1779 if (bits & OPMODE_ST)
1780 ackmask |= STATUS_TPS;
1781
1782 if (bits & OPMODE_SR)
1783 ackmask |= STATUS_RPS;
1784
1785 AE_CLR(sc, CSR_OPMODE, bits);
1786
1787 for (i = 0; i < 1000; i++) {
1788 if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask)
1789 break;
1790 delay(10);
1791 }
1792
1793 csr = AE_READ(sc, CSR_STATUS);
1794 if ((csr & ackmask) != ackmask) {
1795 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 &&
1796 (csr & STATUS_TS) != STATUS_TS_STOPPED) {
1797 printf("%s: transmit process failed to idle: "
1798 "state %s\n", sc->sc_dev.dv_xname,
1799 txstate_names[(csr & STATUS_TS) >> 20]);
1800 }
1801 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 &&
1802 (csr & STATUS_RS) != STATUS_RS_STOPPED) {
1803 printf("%s: receive process failed to idle: "
1804 "state %s\n", sc->sc_dev.dv_xname,
1805 rxstate_names[(csr & STATUS_RS) >> 17]);
1806 }
1807 }
1808 }
1809
1810 /*****************************************************************************
1811 * Support functions for MII-attached media.
1812 *****************************************************************************/
1813
1814 /*
1815 * ae_mii_tick:
1816 *
1817 * One second timer, used to tick the MII.
1818 */
1819 static void
1820 ae_mii_tick(void *arg)
1821 {
1822 struct ae_softc *sc = arg;
1823 int s;
1824
1825 if (!device_is_active(&sc->sc_dev))
1826 return;
1827
1828 s = splnet();
1829 mii_tick(&sc->sc_mii);
1830 splx(s);
1831
1832 callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc);
1833 }
1834
1835 /*
1836 * ae_mii_statchg: [mii interface function]
1837 *
1838 * Callback from PHY when media changes.
1839 */
1840 static void
1841 ae_mii_statchg(device_t self)
1842 {
1843 struct ae_softc *sc = device_private(self);
1844 uint32_t macctl, flowc;
1845
1846 //opmode = AE_READ(sc, CSR_OPMODE);
1847 macctl = AE_READ(sc, CSR_MACCTL);
1848
1849 /* XXX: do we need to do this? */
1850 /* Idle the transmit and receive processes. */
1851 //ae_idle(sc, OPMODE_ST|OPMODE_SR);
1852
1853 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1854 flowc = FLOWC_FCE;
1855 macctl &= ~MACCTL_DRO;
1856 macctl |= MACCTL_FDX;
1857 } else {
1858 flowc = 0; /* cannot do flow control in HDX */
1859 macctl |= MACCTL_DRO;
1860 macctl &= ~MACCTL_FDX;
1861 }
1862
1863 AE_WRITE(sc, CSR_FLOWC, flowc);
1864 AE_WRITE(sc, CSR_MACCTL, macctl);
1865
1866 /* restore operational mode */
1867 //AE_WRITE(sc, CSR_OPMODE, opmode);
1868 AE_BARRIER(sc);
1869 }
1870
1871 /*
1872 * ae_mii_readreg:
1873 *
1874 * Read a PHY register.
1875 */
1876 static int
1877 ae_mii_readreg(device_t self, int phy, int reg)
1878 {
1879 struct ae_softc *sc = device_private(self);
1880 uint32_t addr;
1881 int i;
1882
1883 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT);
1884 AE_WRITE(sc, CSR_MIIADDR, addr);
1885 AE_BARRIER(sc);
1886 for (i = 0; i < 100000000; i++) {
1887 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1888 break;
1889 }
1890
1891 return (AE_READ(sc, CSR_MIIDATA) & 0xffff);
1892 }
1893
1894 /*
1895 * ae_mii_writereg:
1896 *
1897 * Write a PHY register.
1898 */
1899 static void
1900 ae_mii_writereg(device_t self, int phy, int reg, int val)
1901 {
1902 struct ae_softc *sc = device_private(self);
1903 uint32_t addr;
1904 int i;
1905
1906 /* write the data register */
1907 AE_WRITE(sc, CSR_MIIDATA, val);
1908
1909 /* write the address to latch it in */
1910 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) |
1911 MIIADDR_WRITE;
1912 AE_WRITE(sc, CSR_MIIADDR, addr);
1913 AE_BARRIER(sc);
1914
1915 for (i = 0; i < 100000000; i++) {
1916 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1917 break;
1918 }
1919 }
1920