dp83932.c revision 1.25 1 /* $NetBSD: dp83932.c,v 1.25 2008/04/23 13:29:44 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the National Semiconductor DP83932
41 * Systems-Oriented Network Interface Controller (SONIC).
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.25 2008/04/23 13:29:44 tsutsui Exp $");
46
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <net/if.h>
62 #include <net/if_dl.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #include <sys/bus.h>
70 #include <sys/intr.h>
71
72 #include <dev/ic/dp83932reg.h>
73 #include <dev/ic/dp83932var.h>
74
75 void sonic_start(struct ifnet *);
76 void sonic_watchdog(struct ifnet *);
77 int sonic_ioctl(struct ifnet *, u_long, void *);
78 int sonic_init(struct ifnet *);
79 void sonic_stop(struct ifnet *, int);
80
81 void sonic_shutdown(void *);
82
83 void sonic_reset(struct sonic_softc *);
84 void sonic_rxdrain(struct sonic_softc *);
85 int sonic_add_rxbuf(struct sonic_softc *, int);
86 void sonic_set_filter(struct sonic_softc *);
87
88 uint16_t sonic_txintr(struct sonic_softc *);
89 void sonic_rxintr(struct sonic_softc *);
90
91 int sonic_copy_small = 0;
92
93 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
94
95 /*
96 * sonic_attach:
97 *
98 * Attach a SONIC interface to the system.
99 */
100 void
101 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
102 {
103 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
104 int i, rseg, error;
105 bus_dma_segment_t seg;
106 size_t cdatasize;
107 uint8_t *nullbuf;
108
109 /*
110 * Allocate the control data structures, and create and load the
111 * DMA map for it.
112 */
113 if (sc->sc_32bit)
114 cdatasize = sizeof(struct sonic_control_data32);
115 else
116 cdatasize = sizeof(struct sonic_control_data16);
117
118 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
119 PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
120 BUS_DMA_NOWAIT)) != 0) {
121 aprint_error_dev(sc->sc_dev,
122 "unable to allocate control data, error = %d\n", error);
123 goto fail_0;
124 }
125
126 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
127 cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
128 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
129 aprint_error_dev(sc->sc_dev,
130 "unable to map control data, error = %d\n", error);
131 goto fail_1;
132 }
133 nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
134 memset(nullbuf, 0, ETHER_PAD_LEN);
135
136 if ((error = bus_dmamap_create(sc->sc_dmat,
137 cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
138 &sc->sc_cddmamap)) != 0) {
139 aprint_error_dev(sc->sc_dev,
140 "unable to create control data DMA map, error = %d\n",
141 error);
142 goto fail_2;
143 }
144
145 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
146 sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
147 aprint_error_dev(sc->sc_dev,
148 "unable to load control data DMA map, error = %d\n", error);
149 goto fail_3;
150 }
151
152 /*
153 * Create the transmit buffer DMA maps.
154 */
155 for (i = 0; i < SONIC_NTXDESC; i++) {
156 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
157 SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
158 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
159 aprint_error_dev(sc->sc_dev,
160 "unable to create tx DMA map %d, error = %d\n",
161 i, error);
162 goto fail_4;
163 }
164 }
165
166 /*
167 * Create the receive buffer DMA maps.
168 */
169 for (i = 0; i < SONIC_NRXDESC; i++) {
170 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
171 MCLBYTES, 0, BUS_DMA_NOWAIT,
172 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
173 aprint_error_dev(sc->sc_dev,
174 "unable to create rx DMA map %d, error = %d\n",
175 i, error);
176 goto fail_5;
177 }
178 sc->sc_rxsoft[i].ds_mbuf = NULL;
179 }
180
181 /*
182 * create and map the pad buffer
183 */
184 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
185 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
186 aprint_error_dev(sc->sc_dev,
187 "unable to create pad buffer DMA map, error = %d\n", error);
188 goto fail_5;
189 }
190
191 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
192 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
193 aprint_error_dev(sc->sc_dev,
194 "unable to load pad buffer DMA map, error = %d\n", error);
195 goto fail_6;
196 }
197 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
198 BUS_DMASYNC_PREWRITE);
199
200 /*
201 * Reset the chip to a known state.
202 */
203 sonic_reset(sc);
204
205 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
206 ether_sprintf(enaddr));
207
208 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
209 ifp->if_softc = sc;
210 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
211 ifp->if_ioctl = sonic_ioctl;
212 ifp->if_start = sonic_start;
213 ifp->if_watchdog = sonic_watchdog;
214 ifp->if_init = sonic_init;
215 ifp->if_stop = sonic_stop;
216 IFQ_SET_READY(&ifp->if_snd);
217
218 /*
219 * We can suport 802.1Q VLAN-sized frames.
220 */
221 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
222
223 /*
224 * Attach the interface.
225 */
226 if_attach(ifp);
227 ether_ifattach(ifp, enaddr);
228
229 /*
230 * Make sure the interface is shutdown during reboot.
231 */
232 sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc);
233 if (sc->sc_sdhook == NULL)
234 aprint_error_dev(sc->sc_dev,
235 "WARNING: unable to establish shutdown hook\n");
236 return;
237
238 /*
239 * Free any resources we've allocated during the failed attach
240 * attempt. Do this in reverse order and fall through.
241 */
242 fail_6:
243 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
244 fail_5:
245 for (i = 0; i < SONIC_NRXDESC; i++) {
246 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
247 bus_dmamap_destroy(sc->sc_dmat,
248 sc->sc_rxsoft[i].ds_dmamap);
249 }
250 fail_4:
251 for (i = 0; i < SONIC_NTXDESC; i++) {
252 if (sc->sc_txsoft[i].ds_dmamap != NULL)
253 bus_dmamap_destroy(sc->sc_dmat,
254 sc->sc_txsoft[i].ds_dmamap);
255 }
256 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
257 fail_3:
258 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
259 fail_2:
260 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
261 fail_1:
262 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
263 fail_0:
264 return;
265 }
266
267 /*
268 * sonic_shutdown:
269 *
270 * Make sure the interface is stopped at reboot.
271 */
272 void
273 sonic_shutdown(void *arg)
274 {
275 struct sonic_softc *sc = arg;
276
277 sonic_stop(&sc->sc_ethercom.ec_if, 1);
278 }
279
280 /*
281 * sonic_start: [ifnet interface function]
282 *
283 * Start packet transmission on the interface.
284 */
285 void
286 sonic_start(struct ifnet *ifp)
287 {
288 struct sonic_softc *sc = ifp->if_softc;
289 struct mbuf *m0, *m;
290 struct sonic_tda16 *tda16;
291 struct sonic_tda32 *tda32;
292 struct sonic_descsoft *ds;
293 bus_dmamap_t dmamap;
294 int error, olasttx, nexttx, opending, totlen, olseg;
295 int seg = 0; /* XXX: gcc */
296
297 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
298 return;
299
300 /*
301 * Remember the previous txpending and the current "last txdesc
302 * used" index.
303 */
304 opending = sc->sc_txpending;
305 olasttx = sc->sc_txlast;
306
307 /*
308 * Loop through the send queue, setting up transmit descriptors
309 * until we drain the queue, or use up all available transmit
310 * descriptors. Leave one at the end for sanity's sake.
311 */
312 while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
313 /*
314 * Grab a packet off the queue.
315 */
316 IFQ_POLL(&ifp->if_snd, m0);
317 if (m0 == NULL)
318 break;
319 m = NULL;
320
321 /*
322 * Get the next available transmit descriptor.
323 */
324 nexttx = SONIC_NEXTTX(sc->sc_txlast);
325 ds = &sc->sc_txsoft[nexttx];
326 dmamap = ds->ds_dmamap;
327
328 /*
329 * Load the DMA map. If this fails, the packet either
330 * didn't fit in the allotted number of frags, or we were
331 * short on resources. In this case, we'll copy and try
332 * again.
333 */
334 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
335 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
336 (m0->m_pkthdr.len < ETHER_PAD_LEN &&
337 dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
338 if (error == 0)
339 bus_dmamap_unload(sc->sc_dmat, dmamap);
340 MGETHDR(m, M_DONTWAIT, MT_DATA);
341 if (m == NULL) {
342 printf("%s: unable to allocate Tx mbuf\n",
343 device_xname(sc->sc_dev));
344 break;
345 }
346 if (m0->m_pkthdr.len > MHLEN) {
347 MCLGET(m, M_DONTWAIT);
348 if ((m->m_flags & M_EXT) == 0) {
349 printf("%s: unable to allocate Tx "
350 "cluster\n",
351 device_xname(sc->sc_dev));
352 m_freem(m);
353 break;
354 }
355 }
356 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
357 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
358 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
359 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
360 if (error) {
361 printf("%s: unable to load Tx buffer, "
362 "error = %d\n", device_xname(sc->sc_dev),
363 error);
364 m_freem(m);
365 break;
366 }
367 }
368 IFQ_DEQUEUE(&ifp->if_snd, m0);
369 if (m != NULL) {
370 m_freem(m0);
371 m0 = m;
372 }
373
374 /*
375 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
376 */
377
378 /* Sync the DMA map. */
379 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
380 BUS_DMASYNC_PREWRITE);
381
382 /*
383 * Store a pointer to the packet so we can free it later.
384 */
385 ds->ds_mbuf = m0;
386
387 /*
388 * Initialize the transmit descriptor.
389 */
390 totlen = 0;
391 if (sc->sc_32bit) {
392 tda32 = &sc->sc_tda32[nexttx];
393 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
394 tda32->tda_frags[seg].frag_ptr1 =
395 htosonic32(sc,
396 (dmamap->dm_segs[seg].ds_addr >> 16) &
397 0xffff);
398 tda32->tda_frags[seg].frag_ptr0 =
399 htosonic32(sc,
400 dmamap->dm_segs[seg].ds_addr & 0xffff);
401 tda32->tda_frags[seg].frag_size =
402 htosonic32(sc, dmamap->dm_segs[seg].ds_len);
403 totlen += dmamap->dm_segs[seg].ds_len;
404 }
405 if (totlen < ETHER_PAD_LEN) {
406 tda32->tda_frags[seg].frag_ptr1 =
407 htosonic32(sc,
408 (sc->sc_nulldma >> 16) & 0xffff);
409 tda32->tda_frags[seg].frag_ptr0 =
410 htosonic32(sc, sc->sc_nulldma & 0xffff);
411 tda32->tda_frags[seg].frag_size =
412 htosonic32(sc, ETHER_PAD_LEN - totlen);
413 totlen = ETHER_PAD_LEN;
414 seg++;
415 }
416
417 tda32->tda_status = 0;
418 tda32->tda_pktconfig = 0;
419 tda32->tda_pktsize = htosonic32(sc, totlen);
420 tda32->tda_fragcnt = htosonic32(sc, seg);
421
422 /* Link it up. */
423 tda32->tda_frags[seg].frag_ptr0 =
424 htosonic32(sc, SONIC_CDTXADDR32(sc,
425 SONIC_NEXTTX(nexttx)) & 0xffff);
426
427 /* Sync the Tx descriptor. */
428 SONIC_CDTXSYNC32(sc, nexttx,
429 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
430 } else {
431 tda16 = &sc->sc_tda16[nexttx];
432 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
433 tda16->tda_frags[seg].frag_ptr1 =
434 htosonic16(sc,
435 (dmamap->dm_segs[seg].ds_addr >> 16) &
436 0xffff);
437 tda16->tda_frags[seg].frag_ptr0 =
438 htosonic16(sc,
439 dmamap->dm_segs[seg].ds_addr & 0xffff);
440 tda16->tda_frags[seg].frag_size =
441 htosonic16(sc, dmamap->dm_segs[seg].ds_len);
442 totlen += dmamap->dm_segs[seg].ds_len;
443 }
444 if (totlen < ETHER_PAD_LEN) {
445 tda16->tda_frags[seg].frag_ptr1 =
446 htosonic16(sc,
447 (sc->sc_nulldma >> 16) & 0xffff);
448 tda16->tda_frags[seg].frag_ptr0 =
449 htosonic16(sc, sc->sc_nulldma & 0xffff);
450 tda16->tda_frags[seg].frag_size =
451 htosonic16(sc, ETHER_PAD_LEN - totlen);
452 totlen = ETHER_PAD_LEN;
453 seg++;
454 }
455
456 tda16->tda_status = 0;
457 tda16->tda_pktconfig = 0;
458 tda16->tda_pktsize = htosonic16(sc, totlen);
459 tda16->tda_fragcnt = htosonic16(sc, seg);
460
461 /* Link it up. */
462 tda16->tda_frags[seg].frag_ptr0 =
463 htosonic16(sc, SONIC_CDTXADDR16(sc,
464 SONIC_NEXTTX(nexttx)) & 0xffff);
465
466 /* Sync the Tx descriptor. */
467 SONIC_CDTXSYNC16(sc, nexttx,
468 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
469 }
470
471 /* Advance the Tx pointer. */
472 sc->sc_txpending++;
473 sc->sc_txlast = nexttx;
474
475 #if NBPFILTER > 0
476 /*
477 * Pass the packet to any BPF listeners.
478 */
479 if (ifp->if_bpf)
480 bpf_mtap(ifp->if_bpf, m0);
481 #endif
482 }
483
484 if (sc->sc_txpending == (SONIC_NTXDESC - 1)) {
485 /* No more slots left; notify upper layer. */
486 ifp->if_flags |= IFF_OACTIVE;
487 }
488
489 if (sc->sc_txpending != opending) {
490 /*
491 * We enqueued packets. If the transmitter was idle,
492 * reset the txdirty pointer.
493 */
494 if (opending == 0)
495 sc->sc_txdirty = SONIC_NEXTTX(olasttx);
496
497 /*
498 * Stop the SONIC on the last packet we've set up,
499 * and clear end-of-list on the descriptor previous
500 * to our new chain.
501 *
502 * NOTE: our `seg' variable should still be valid!
503 */
504 if (sc->sc_32bit) {
505 olseg =
506 sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
507 sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
508 htosonic32(sc, TDA_LINK_EOL);
509 SONIC_CDTXSYNC32(sc, sc->sc_txlast,
510 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
511 sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
512 htosonic32(sc, ~TDA_LINK_EOL);
513 SONIC_CDTXSYNC32(sc, olasttx,
514 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
515 } else {
516 olseg =
517 sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
518 sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
519 htosonic16(sc, TDA_LINK_EOL);
520 SONIC_CDTXSYNC16(sc, sc->sc_txlast,
521 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
522 sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
523 htosonic16(sc, ~TDA_LINK_EOL);
524 SONIC_CDTXSYNC16(sc, olasttx,
525 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
526 }
527
528 /* Start the transmitter. */
529 CSR_WRITE(sc, SONIC_CR, CR_TXP);
530
531 /* Set a watchdog timer in case the chip flakes out. */
532 ifp->if_timer = 5;
533 }
534 }
535
536 /*
537 * sonic_watchdog: [ifnet interface function]
538 *
539 * Watchdog timer handler.
540 */
541 void
542 sonic_watchdog(struct ifnet *ifp)
543 {
544 struct sonic_softc *sc = ifp->if_softc;
545
546 printf("%s: device timeout\n", device_xname(sc->sc_dev));
547 ifp->if_oerrors++;
548
549 (void)sonic_init(ifp);
550 }
551
552 /*
553 * sonic_ioctl: [ifnet interface function]
554 *
555 * Handle control requests from the operator.
556 */
557 int
558 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
559 {
560 int s, error;
561
562 s = splnet();
563
564 error = ether_ioctl(ifp, cmd, data);
565 if (error == ENETRESET) {
566 /*
567 * Multicast list has changed; set the hardware
568 * filter accordingly.
569 */
570 if (ifp->if_flags & IFF_RUNNING)
571 (void)sonic_init(ifp);
572 error = 0;
573 }
574
575 splx(s);
576 return error;
577 }
578
579 /*
580 * sonic_intr:
581 *
582 * Interrupt service routine.
583 */
584 int
585 sonic_intr(void *arg)
586 {
587 struct sonic_softc *sc = arg;
588 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
589 uint16_t isr;
590 int handled = 0, wantinit;
591
592 for (wantinit = 0; wantinit == 0;) {
593 isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
594 if (isr == 0)
595 break;
596 CSR_WRITE(sc, SONIC_ISR, isr); /* ACK */
597
598 handled = 1;
599
600 if (isr & IMR_PRX)
601 sonic_rxintr(sc);
602
603 if (isr & (IMR_PTX|IMR_TXER)) {
604 if (sonic_txintr(sc) & TCR_FU) {
605 printf("%s: transmit FIFO underrun\n",
606 device_xname(sc->sc_dev));
607 wantinit = 1;
608 }
609 }
610
611 if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) {
612 #define PRINTERR(bit, str) \
613 if (isr & (bit)) \
614 printf("%s: %s\n",device_xname(sc->sc_dev), str)
615 PRINTERR(IMR_RFO, "receive FIFO overrun");
616 PRINTERR(IMR_RBA, "receive buffer exceeded");
617 PRINTERR(IMR_RBE, "receive buffers exhausted");
618 PRINTERR(IMR_RDE, "receive descriptors exhausted");
619 wantinit = 1;
620 }
621 }
622
623 if (handled) {
624 if (wantinit)
625 (void)sonic_init(ifp);
626 sonic_start(ifp);
627 }
628
629 return handled;
630 }
631
632 /*
633 * sonic_txintr:
634 *
635 * Helper; handle transmit complete interrupts.
636 */
637 uint16_t
638 sonic_txintr(struct sonic_softc *sc)
639 {
640 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
641 struct sonic_descsoft *ds;
642 struct sonic_tda32 *tda32;
643 struct sonic_tda16 *tda16;
644 uint16_t status, totstat = 0;
645 int i;
646
647 ifp->if_flags &= ~IFF_OACTIVE;
648
649 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
650 i = SONIC_NEXTTX(i), sc->sc_txpending--) {
651 ds = &sc->sc_txsoft[i];
652
653 if (sc->sc_32bit) {
654 SONIC_CDTXSYNC32(sc, i,
655 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
656 tda32 = &sc->sc_tda32[i];
657 status = sonic32toh(sc, tda32->tda_status);
658 SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
659 } else {
660 SONIC_CDTXSYNC16(sc, i,
661 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
662 tda16 = &sc->sc_tda16[i];
663 status = sonic16toh(sc, tda16->tda_status);
664 SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
665 }
666
667 if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0)
668 break;
669
670 totstat |= status;
671
672 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
673 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
674 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
675 m_freem(ds->ds_mbuf);
676 ds->ds_mbuf = NULL;
677
678 /*
679 * Check for errors and collisions.
680 */
681 if (status & TCR_PTX)
682 ifp->if_opackets++;
683 else
684 ifp->if_oerrors++;
685 ifp->if_collisions += TDA_STATUS_NCOL(status);
686 }
687
688 /* Update the dirty transmit buffer pointer. */
689 sc->sc_txdirty = i;
690
691 /*
692 * Cancel the watchdog timer if there are no pending
693 * transmissions.
694 */
695 if (sc->sc_txpending == 0)
696 ifp->if_timer = 0;
697
698 return totstat;
699 }
700
701 /*
702 * sonic_rxintr:
703 *
704 * Helper; handle receive interrupts.
705 */
706 void
707 sonic_rxintr(struct sonic_softc *sc)
708 {
709 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
710 struct sonic_descsoft *ds;
711 struct sonic_rda32 *rda32;
712 struct sonic_rda16 *rda16;
713 struct mbuf *m;
714 int i, len;
715 uint16_t status, bytecount, ptr0, ptr1, seqno;
716
717 for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
718 ds = &sc->sc_rxsoft[i];
719
720 if (sc->sc_32bit) {
721 SONIC_CDRXSYNC32(sc, i,
722 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
723 rda32 = &sc->sc_rda32[i];
724 SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
725 if (rda32->rda_inuse != 0)
726 break;
727 status = sonic32toh(sc, rda32->rda_status);
728 bytecount = sonic32toh(sc, rda32->rda_bytecount);
729 ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0);
730 ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1);
731 seqno = sonic32toh(sc, rda32->rda_seqno);
732 } else {
733 SONIC_CDRXSYNC16(sc, i,
734 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
735 rda16 = &sc->sc_rda16[i];
736 SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
737 if (rda16->rda_inuse != 0)
738 break;
739 status = sonic16toh(sc, rda16->rda_status);
740 bytecount = sonic16toh(sc, rda16->rda_bytecount);
741 ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0);
742 ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1);
743 seqno = sonic16toh(sc, rda16->rda_seqno);
744 }
745
746 /*
747 * Make absolutely sure this is the only packet
748 * in this receive buffer. Our entire Rx buffer
749 * management scheme depends on this, and if the
750 * SONIC didn't follow our rule, it means we've
751 * misconfigured it.
752 */
753 KASSERT(status & RCR_LPKT);
754
755 /*
756 * Make sure the packet arrived OK. If an error occurred,
757 * update stats and reset the descriptor. The buffer will
758 * be reused the next time the descriptor comes up in the
759 * ring.
760 */
761 if ((status & RCR_PRX) == 0) {
762 if (status & RCR_FAER)
763 printf("%s: Rx frame alignment error\n",
764 device_xname(sc->sc_dev));
765 else if (status & RCR_CRCR)
766 printf("%s: Rx CRC error\n",
767 device_xname(sc->sc_dev));
768 ifp->if_ierrors++;
769 SONIC_INIT_RXDESC(sc, i);
770 continue;
771 }
772
773 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
774 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
775
776 /*
777 * The SONIC includes the CRC with every packet.
778 */
779 len = bytecount - ETHER_CRC_LEN;
780
781 /*
782 * Ok, if the chip is in 32-bit mode, then receive
783 * buffers must be aligned to 32-bit boundaries,
784 * which means the payload is misaligned. In this
785 * case, we must allocate a new mbuf, and copy the
786 * packet into it, scooted forward 2 bytes to ensure
787 * proper alignment.
788 *
789 * Note, in 16-bit mode, we can configure the SONIC
790 * to do what we want, and we have.
791 */
792 #ifndef __NO_STRICT_ALIGNMENT
793 if (sc->sc_32bit) {
794 MGETHDR(m, M_DONTWAIT, MT_DATA);
795 if (m == NULL)
796 goto dropit;
797 if (len > (MHLEN - 2)) {
798 MCLGET(m, M_DONTWAIT);
799 if ((m->m_flags & M_EXT) == 0)
800 goto dropit;
801 }
802 m->m_data += 2;
803 /*
804 * Note that we use a cluster for incoming frames,
805 * so the buffer is virtually contiguous.
806 */
807 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
808 len);
809 SONIC_INIT_RXDESC(sc, i);
810 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
811 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
812 } else
813 #endif /* ! __NO_STRICT_ALIGNMENT */
814 /*
815 * If the packet is small enough to fit in a single
816 * header mbuf, allocate one and copy the data into
817 * it. This greatly reduces memory consumption when
818 * we receive lots of small packets.
819 */
820 if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
821 MGETHDR(m, M_DONTWAIT, MT_DATA);
822 if (m == NULL)
823 goto dropit;
824 m->m_data += 2;
825 /*
826 * Note that we use a cluster for incoming frames,
827 * so the buffer is virtually contiguous.
828 */
829 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
830 len);
831 SONIC_INIT_RXDESC(sc, i);
832 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
833 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
834 } else {
835 m = ds->ds_mbuf;
836 if (sonic_add_rxbuf(sc, i) != 0) {
837 dropit:
838 ifp->if_ierrors++;
839 SONIC_INIT_RXDESC(sc, i);
840 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
841 ds->ds_dmamap->dm_mapsize,
842 BUS_DMASYNC_PREREAD);
843 continue;
844 }
845 }
846
847 ifp->if_ipackets++;
848 m->m_pkthdr.rcvif = ifp;
849 m->m_pkthdr.len = m->m_len = len;
850
851 #if NBPFILTER > 0
852 /*
853 * Pass this up to any BPF listeners.
854 */
855 if (ifp->if_bpf)
856 bpf_mtap(ifp->if_bpf, m);
857 #endif /* NBPFILTER > 0 */
858
859 /* Pass it on. */
860 (*ifp->if_input)(ifp, m);
861 }
862
863 /* Update the receive pointer. */
864 sc->sc_rxptr = i;
865 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
866 }
867
868 /*
869 * sonic_reset:
870 *
871 * Perform a soft reset on the SONIC.
872 */
873 void
874 sonic_reset(struct sonic_softc *sc)
875 {
876
877 /* stop TX, RX and timer, and ensure RST is clear */
878 CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
879 delay(1000);
880
881 CSR_WRITE(sc, SONIC_CR, CR_RST);
882 delay(1000);
883
884 /* clear all interrupts */
885 CSR_WRITE(sc, SONIC_IMR, 0);
886 CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
887
888 CSR_WRITE(sc, SONIC_CR, 0);
889 delay(1000);
890 }
891
892 /*
893 * sonic_init: [ifnet interface function]
894 *
895 * Initialize the interface. Must be called at splnet().
896 */
897 int
898 sonic_init(struct ifnet *ifp)
899 {
900 struct sonic_softc *sc = ifp->if_softc;
901 struct sonic_descsoft *ds;
902 int i, error = 0;
903 uint16_t reg;
904
905 /*
906 * Cancel any pending I/O.
907 */
908 sonic_stop(ifp, 0);
909
910 /*
911 * Reset the SONIC to a known state.
912 */
913 sonic_reset(sc);
914
915 /*
916 * Bring the SONIC into reset state, and program the DCR.
917 *
918 * Note: We don't bother optimizing the transmit and receive
919 * thresholds, here. TFT/RFT values should be set in MD attachments.
920 */
921 reg = sc->sc_dcr;
922 if (sc->sc_32bit)
923 reg |= DCR_DW;
924 CSR_WRITE(sc, SONIC_CR, CR_RST);
925 CSR_WRITE(sc, SONIC_DCR, reg);
926 CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
927 CSR_WRITE(sc, SONIC_CR, 0);
928
929 /*
930 * Initialize the transmit descriptors.
931 */
932 if (sc->sc_32bit) {
933 for (i = 0; i < SONIC_NTXDESC; i++) {
934 memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
935 SONIC_CDTXSYNC32(sc, i,
936 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
937 }
938 } else {
939 for (i = 0; i < SONIC_NTXDESC; i++) {
940 memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
941 SONIC_CDTXSYNC16(sc, i,
942 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
943 }
944 }
945 sc->sc_txpending = 0;
946 sc->sc_txdirty = 0;
947 sc->sc_txlast = SONIC_NTXDESC - 1;
948
949 /*
950 * Initialize the receive descriptor ring.
951 */
952 for (i = 0; i < SONIC_NRXDESC; i++) {
953 ds = &sc->sc_rxsoft[i];
954 if (ds->ds_mbuf == NULL) {
955 if ((error = sonic_add_rxbuf(sc, i)) != 0) {
956 printf("%s: unable to allocate or map Rx "
957 "buffer %d, error = %d\n",
958 device_xname(sc->sc_dev), i, error);
959 /*
960 * XXX Should attempt to run with fewer receive
961 * XXX buffers instead of just failing.
962 */
963 sonic_rxdrain(sc);
964 goto out;
965 }
966 } else
967 SONIC_INIT_RXDESC(sc, i);
968 }
969 sc->sc_rxptr = 0;
970
971 /* Give the transmit ring to the SONIC. */
972 CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
973 CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
974
975 /* Give the receive descriptor ring to the SONIC. */
976 CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
977 CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
978
979 /* Give the receive buffer ring to the SONIC. */
980 CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
981 CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
982 if (sc->sc_32bit)
983 CSR_WRITE(sc, SONIC_REAR,
984 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
985 sizeof(struct sonic_rra32)) & 0xffff);
986 else
987 CSR_WRITE(sc, SONIC_REAR,
988 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
989 sizeof(struct sonic_rra16)) & 0xffff);
990 CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
991 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
992
993 /*
994 * Set the End-Of-Buffer counter such that only one packet
995 * will be placed into each buffer we provide. Note we are
996 * following the recommendation of section 3.4.4 of the manual
997 * here, and have "lengthened" the receive buffers accordingly.
998 */
999 if (sc->sc_32bit)
1000 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
1001 else
1002 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
1003
1004 /* Reset the receive sequence counter. */
1005 CSR_WRITE(sc, SONIC_RSC, 0);
1006
1007 /* Clear the tally registers. */
1008 CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
1009 CSR_WRITE(sc, SONIC_FAET, 0xffff);
1010 CSR_WRITE(sc, SONIC_MPT, 0xffff);
1011
1012 /* Set the receive filter. */
1013 sonic_set_filter(sc);
1014
1015 /*
1016 * Set the interrupt mask register.
1017 */
1018 sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
1019 IMR_TXER | IMR_PTX | IMR_PRX;
1020 CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1021
1022 /*
1023 * Start the receive process in motion. Note, we don't
1024 * start the transmit process until we actually try to
1025 * transmit packets.
1026 */
1027 CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1028
1029 /*
1030 * ...all done!
1031 */
1032 ifp->if_flags |= IFF_RUNNING;
1033 ifp->if_flags &= ~IFF_OACTIVE;
1034
1035 out:
1036 if (error)
1037 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1038 return error;
1039 }
1040
1041 /*
1042 * sonic_rxdrain:
1043 *
1044 * Drain the receive queue.
1045 */
1046 void
1047 sonic_rxdrain(struct sonic_softc *sc)
1048 {
1049 struct sonic_descsoft *ds;
1050 int i;
1051
1052 for (i = 0; i < SONIC_NRXDESC; i++) {
1053 ds = &sc->sc_rxsoft[i];
1054 if (ds->ds_mbuf != NULL) {
1055 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1056 m_freem(ds->ds_mbuf);
1057 ds->ds_mbuf = NULL;
1058 }
1059 }
1060 }
1061
1062 /*
1063 * sonic_stop: [ifnet interface function]
1064 *
1065 * Stop transmission on the interface.
1066 */
1067 void
1068 sonic_stop(struct ifnet *ifp, int disable)
1069 {
1070 struct sonic_softc *sc = ifp->if_softc;
1071 struct sonic_descsoft *ds;
1072 int i;
1073
1074 /*
1075 * Disable interrupts.
1076 */
1077 CSR_WRITE(sc, SONIC_IMR, 0);
1078
1079 /*
1080 * Stop the transmitter, receiver, and timer.
1081 */
1082 CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP);
1083 for (i = 0; i < 1000; i++) {
1084 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0)
1085 break;
1086 delay(2);
1087 }
1088 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0)
1089 printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev));
1090
1091 /*
1092 * Release any queued transmit buffers.
1093 */
1094 for (i = 0; i < SONIC_NTXDESC; i++) {
1095 ds = &sc->sc_txsoft[i];
1096 if (ds->ds_mbuf != NULL) {
1097 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1098 m_freem(ds->ds_mbuf);
1099 ds->ds_mbuf = NULL;
1100 }
1101 }
1102
1103 /*
1104 * Mark the interface down and cancel the watchdog timer.
1105 */
1106 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1107 ifp->if_timer = 0;
1108
1109 if (disable)
1110 sonic_rxdrain(sc);
1111 }
1112
1113 /*
1114 * sonic_add_rxbuf:
1115 *
1116 * Add a receive buffer to the indicated descriptor.
1117 */
1118 int
1119 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1120 {
1121 struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1122 struct mbuf *m;
1123 int error;
1124
1125 MGETHDR(m, M_DONTWAIT, MT_DATA);
1126 if (m == NULL)
1127 return ENOBUFS;
1128
1129 MCLGET(m, M_DONTWAIT);
1130 if ((m->m_flags & M_EXT) == 0) {
1131 m_freem(m);
1132 return ENOBUFS;
1133 }
1134
1135 if (ds->ds_mbuf != NULL)
1136 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1137
1138 ds->ds_mbuf = m;
1139
1140 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1141 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1142 BUS_DMA_READ|BUS_DMA_NOWAIT);
1143 if (error) {
1144 printf("%s: can't load rx DMA map %d, error = %d\n",
1145 device_xname(sc->sc_dev), idx, error);
1146 panic("sonic_add_rxbuf"); /* XXX */
1147 }
1148
1149 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1150 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1151
1152 SONIC_INIT_RXDESC(sc, idx);
1153
1154 return 0;
1155 }
1156
1157 static void
1158 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1159 {
1160
1161 if (sc->sc_32bit) {
1162 struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1163
1164 cda->cda_entry = htosonic32(sc, entry);
1165 cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1166 cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1167 cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1168 } else {
1169 struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1170
1171 cda->cda_entry = htosonic16(sc, entry);
1172 cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1173 cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1174 cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1175 }
1176 }
1177
1178 /*
1179 * sonic_set_filter:
1180 *
1181 * Set the SONIC receive filter.
1182 */
1183 void
1184 sonic_set_filter(struct sonic_softc *sc)
1185 {
1186 struct ethercom *ec = &sc->sc_ethercom;
1187 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1188 struct ether_multi *enm;
1189 struct ether_multistep step;
1190 int i, entry = 0;
1191 uint16_t camvalid = 0;
1192 uint16_t rcr = 0;
1193
1194 if (ifp->if_flags & IFF_BROADCAST)
1195 rcr |= RCR_BRD;
1196
1197 if (ifp->if_flags & IFF_PROMISC) {
1198 rcr |= RCR_PRO;
1199 goto allmulti;
1200 }
1201
1202 /* Put our station address in the first CAM slot. */
1203 sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1204 camvalid |= (1U << entry);
1205 entry++;
1206
1207 /* Add the multicast addresses to the CAM. */
1208 ETHER_FIRST_MULTI(step, ec, enm);
1209 while (enm != NULL) {
1210 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1211 /*
1212 * We must listen to a range of multicast addresses.
1213 * The only way to do this on the SONIC is to enable
1214 * reception of all multicast packets.
1215 */
1216 goto allmulti;
1217 }
1218
1219 if (entry == SONIC_NCAMENT) {
1220 /*
1221 * Out of CAM slots. Have to enable reception
1222 * of all multicast addresses.
1223 */
1224 goto allmulti;
1225 }
1226
1227 sonic_set_camentry(sc, entry, enm->enm_addrlo);
1228 camvalid |= (1U << entry);
1229 entry++;
1230
1231 ETHER_NEXT_MULTI(step, enm);
1232 }
1233
1234 ifp->if_flags &= ~IFF_ALLMULTI;
1235 goto setit;
1236
1237 allmulti:
1238 /* Use only the first CAM slot (station address). */
1239 camvalid = 0x0001;
1240 entry = 1;
1241 rcr |= RCR_AMC;
1242
1243 setit:
1244 /* set mask for the CAM Enable register */
1245 if (sc->sc_32bit) {
1246 if (entry == SONIC_NCAMENT)
1247 sc->sc_cdaenable32 = htosonic32(sc, camvalid);
1248 else
1249 sc->sc_cda32[entry].cda_entry =
1250 htosonic32(sc, camvalid);
1251 } else {
1252 if (entry == SONIC_NCAMENT)
1253 sc->sc_cdaenable16 = htosonic16(sc, camvalid);
1254 else
1255 sc->sc_cda16[entry].cda_entry =
1256 htosonic16(sc, camvalid);
1257 }
1258
1259 /* Load the CAM. */
1260 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1261 CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1262 CSR_WRITE(sc, SONIC_CDC, entry);
1263 CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1264 for (i = 0; i < 10000; i++) {
1265 if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1266 break;
1267 delay(2);
1268 }
1269 if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1270 printf("%s: CAM load failed\n", device_xname(sc->sc_dev));
1271 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1272
1273 /* Set the receive control register. */
1274 CSR_WRITE(sc, SONIC_RCR, rcr);
1275 }
1276