dp83932.c revision 1.36.6.3 1 /* $NetBSD: dp83932.c,v 1.36.6.3 2017/02/05 13:40:27 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the National Semiconductor DP83932
34 * Systems-Oriented Network Interface Controller (SONIC).
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.36.6.3 2017/02/05 13:40:27 skrll Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/ioctl.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_ether.h>
53
54 #include <net/bpf.h>
55
56 #include <sys/bus.h>
57 #include <sys/intr.h>
58
59 #include <dev/ic/dp83932reg.h>
60 #include <dev/ic/dp83932var.h>
61
62 static void sonic_start(struct ifnet *);
63 static void sonic_watchdog(struct ifnet *);
64 static int sonic_ioctl(struct ifnet *, u_long, void *);
65 static int sonic_init(struct ifnet *);
66 static void sonic_stop(struct ifnet *, int);
67
68 static bool sonic_shutdown(device_t, int);
69
70 static void sonic_reset(struct sonic_softc *);
71 static void sonic_rxdrain(struct sonic_softc *);
72 static int sonic_add_rxbuf(struct sonic_softc *, int);
73 static void sonic_set_filter(struct sonic_softc *);
74
75 static uint16_t sonic_txintr(struct sonic_softc *);
76 static void sonic_rxintr(struct sonic_softc *);
77
78 int sonic_copy_small = 0;
79
80 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
81
82 /*
83 * sonic_attach:
84 *
85 * Attach a SONIC interface to the system.
86 */
87 void
88 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
89 {
90 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
91 int i, rseg, error;
92 bus_dma_segment_t seg;
93 size_t cdatasize;
94 uint8_t *nullbuf;
95
96 /*
97 * Allocate the control data structures, and create and load the
98 * DMA map for it.
99 */
100 if (sc->sc_32bit)
101 cdatasize = sizeof(struct sonic_control_data32);
102 else
103 cdatasize = sizeof(struct sonic_control_data16);
104
105 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
106 PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
107 BUS_DMA_NOWAIT)) != 0) {
108 aprint_error_dev(sc->sc_dev,
109 "unable to allocate control data, error = %d\n", error);
110 goto fail_0;
111 }
112
113 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
114 cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
115 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
116 aprint_error_dev(sc->sc_dev,
117 "unable to map control data, error = %d\n", error);
118 goto fail_1;
119 }
120 nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
121 memset(nullbuf, 0, ETHER_PAD_LEN);
122
123 if ((error = bus_dmamap_create(sc->sc_dmat,
124 cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
125 &sc->sc_cddmamap)) != 0) {
126 aprint_error_dev(sc->sc_dev,
127 "unable to create control data DMA map, error = %d\n",
128 error);
129 goto fail_2;
130 }
131
132 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
133 sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
134 aprint_error_dev(sc->sc_dev,
135 "unable to load control data DMA map, error = %d\n", error);
136 goto fail_3;
137 }
138
139 /*
140 * Create the transmit buffer DMA maps.
141 */
142 for (i = 0; i < SONIC_NTXDESC; i++) {
143 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
144 SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
145 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
146 aprint_error_dev(sc->sc_dev,
147 "unable to create tx DMA map %d, error = %d\n",
148 i, error);
149 goto fail_4;
150 }
151 }
152
153 /*
154 * Create the receive buffer DMA maps.
155 */
156 for (i = 0; i < SONIC_NRXDESC; i++) {
157 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
158 MCLBYTES, 0, BUS_DMA_NOWAIT,
159 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
160 aprint_error_dev(sc->sc_dev,
161 "unable to create rx DMA map %d, error = %d\n",
162 i, error);
163 goto fail_5;
164 }
165 sc->sc_rxsoft[i].ds_mbuf = NULL;
166 }
167
168 /*
169 * create and map the pad buffer
170 */
171 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
172 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
173 aprint_error_dev(sc->sc_dev,
174 "unable to create pad buffer DMA map, error = %d\n", error);
175 goto fail_5;
176 }
177
178 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
179 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
180 aprint_error_dev(sc->sc_dev,
181 "unable to load pad buffer DMA map, error = %d\n", error);
182 goto fail_6;
183 }
184 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
185 BUS_DMASYNC_PREWRITE);
186
187 /*
188 * Reset the chip to a known state.
189 */
190 sonic_reset(sc);
191
192 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
193 ether_sprintf(enaddr));
194
195 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
196 ifp->if_softc = sc;
197 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
198 ifp->if_ioctl = sonic_ioctl;
199 ifp->if_start = sonic_start;
200 ifp->if_watchdog = sonic_watchdog;
201 ifp->if_init = sonic_init;
202 ifp->if_stop = sonic_stop;
203 IFQ_SET_READY(&ifp->if_snd);
204
205 /*
206 * We can support 802.1Q VLAN-sized frames.
207 */
208 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
209
210 /*
211 * Attach the interface.
212 */
213 if_attach(ifp);
214 ether_ifattach(ifp, enaddr);
215
216 /*
217 * Make sure the interface is shutdown during reboot.
218 */
219 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown))
220 pmf_class_network_register(sc->sc_dev, ifp);
221 else
222 aprint_error_dev(sc->sc_dev,
223 "couldn't establish power handler\n");
224
225 return;
226
227 /*
228 * Free any resources we've allocated during the failed attach
229 * attempt. Do this in reverse order and fall through.
230 */
231 fail_6:
232 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
233 fail_5:
234 for (i = 0; i < SONIC_NRXDESC; i++) {
235 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
236 bus_dmamap_destroy(sc->sc_dmat,
237 sc->sc_rxsoft[i].ds_dmamap);
238 }
239 fail_4:
240 for (i = 0; i < SONIC_NTXDESC; i++) {
241 if (sc->sc_txsoft[i].ds_dmamap != NULL)
242 bus_dmamap_destroy(sc->sc_dmat,
243 sc->sc_txsoft[i].ds_dmamap);
244 }
245 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
246 fail_3:
247 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
248 fail_2:
249 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
250 fail_1:
251 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
252 fail_0:
253 return;
254 }
255
256 /*
257 * sonic_shutdown:
258 *
259 * Make sure the interface is stopped at reboot.
260 */
261 bool
262 sonic_shutdown(device_t self, int howto)
263 {
264 struct sonic_softc *sc = device_private(self);
265
266 sonic_stop(&sc->sc_ethercom.ec_if, 1);
267
268 return true;
269 }
270
271 /*
272 * sonic_start: [ifnet interface function]
273 *
274 * Start packet transmission on the interface.
275 */
276 void
277 sonic_start(struct ifnet *ifp)
278 {
279 struct sonic_softc *sc = ifp->if_softc;
280 struct mbuf *m0, *m;
281 struct sonic_tda16 *tda16;
282 struct sonic_tda32 *tda32;
283 struct sonic_descsoft *ds;
284 bus_dmamap_t dmamap;
285 int error, olasttx, nexttx, opending, totlen, olseg;
286 int seg = 0; /* XXX: gcc */
287
288 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
289 return;
290
291 /*
292 * Remember the previous txpending and the current "last txdesc
293 * used" index.
294 */
295 opending = sc->sc_txpending;
296 olasttx = sc->sc_txlast;
297
298 /*
299 * Loop through the send queue, setting up transmit descriptors
300 * until we drain the queue, or use up all available transmit
301 * descriptors. Leave one at the end for sanity's sake.
302 */
303 while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
304 /*
305 * Grab a packet off the queue.
306 */
307 IFQ_POLL(&ifp->if_snd, m0);
308 if (m0 == NULL)
309 break;
310 m = NULL;
311
312 /*
313 * Get the next available transmit descriptor.
314 */
315 nexttx = SONIC_NEXTTX(sc->sc_txlast);
316 ds = &sc->sc_txsoft[nexttx];
317 dmamap = ds->ds_dmamap;
318
319 /*
320 * Load the DMA map. If this fails, the packet either
321 * didn't fit in the allotted number of frags, or we were
322 * short on resources. In this case, we'll copy and try
323 * again.
324 */
325 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
326 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
327 (m0->m_pkthdr.len < ETHER_PAD_LEN &&
328 dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
329 if (error == 0)
330 bus_dmamap_unload(sc->sc_dmat, dmamap);
331 MGETHDR(m, M_DONTWAIT, MT_DATA);
332 if (m == NULL) {
333 printf("%s: unable to allocate Tx mbuf\n",
334 device_xname(sc->sc_dev));
335 break;
336 }
337 if (m0->m_pkthdr.len > MHLEN) {
338 MCLGET(m, M_DONTWAIT);
339 if ((m->m_flags & M_EXT) == 0) {
340 printf("%s: unable to allocate Tx "
341 "cluster\n",
342 device_xname(sc->sc_dev));
343 m_freem(m);
344 break;
345 }
346 }
347 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
348 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
349 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
350 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
351 if (error) {
352 printf("%s: unable to load Tx buffer, "
353 "error = %d\n", device_xname(sc->sc_dev),
354 error);
355 m_freem(m);
356 break;
357 }
358 }
359 IFQ_DEQUEUE(&ifp->if_snd, m0);
360 if (m != NULL) {
361 m_freem(m0);
362 m0 = m;
363 }
364
365 /*
366 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
367 */
368
369 /* Sync the DMA map. */
370 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
371 BUS_DMASYNC_PREWRITE);
372
373 /*
374 * Store a pointer to the packet so we can free it later.
375 */
376 ds->ds_mbuf = m0;
377
378 /*
379 * Initialize the transmit descriptor.
380 */
381 totlen = 0;
382 if (sc->sc_32bit) {
383 tda32 = &sc->sc_tda32[nexttx];
384 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
385 tda32->tda_frags[seg].frag_ptr1 =
386 htosonic32(sc,
387 (dmamap->dm_segs[seg].ds_addr >> 16) &
388 0xffff);
389 tda32->tda_frags[seg].frag_ptr0 =
390 htosonic32(sc,
391 dmamap->dm_segs[seg].ds_addr & 0xffff);
392 tda32->tda_frags[seg].frag_size =
393 htosonic32(sc, dmamap->dm_segs[seg].ds_len);
394 totlen += dmamap->dm_segs[seg].ds_len;
395 }
396 if (totlen < ETHER_PAD_LEN) {
397 tda32->tda_frags[seg].frag_ptr1 =
398 htosonic32(sc,
399 (sc->sc_nulldma >> 16) & 0xffff);
400 tda32->tda_frags[seg].frag_ptr0 =
401 htosonic32(sc, sc->sc_nulldma & 0xffff);
402 tda32->tda_frags[seg].frag_size =
403 htosonic32(sc, ETHER_PAD_LEN - totlen);
404 totlen = ETHER_PAD_LEN;
405 seg++;
406 }
407
408 tda32->tda_status = 0;
409 tda32->tda_pktconfig = 0;
410 tda32->tda_pktsize = htosonic32(sc, totlen);
411 tda32->tda_fragcnt = htosonic32(sc, seg);
412
413 /* Link it up. */
414 tda32->tda_frags[seg].frag_ptr0 =
415 htosonic32(sc, SONIC_CDTXADDR32(sc,
416 SONIC_NEXTTX(nexttx)) & 0xffff);
417
418 /* Sync the Tx descriptor. */
419 SONIC_CDTXSYNC32(sc, nexttx,
420 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
421 } else {
422 tda16 = &sc->sc_tda16[nexttx];
423 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
424 tda16->tda_frags[seg].frag_ptr1 =
425 htosonic16(sc,
426 (dmamap->dm_segs[seg].ds_addr >> 16) &
427 0xffff);
428 tda16->tda_frags[seg].frag_ptr0 =
429 htosonic16(sc,
430 dmamap->dm_segs[seg].ds_addr & 0xffff);
431 tda16->tda_frags[seg].frag_size =
432 htosonic16(sc, dmamap->dm_segs[seg].ds_len);
433 totlen += dmamap->dm_segs[seg].ds_len;
434 }
435 if (totlen < ETHER_PAD_LEN) {
436 tda16->tda_frags[seg].frag_ptr1 =
437 htosonic16(sc,
438 (sc->sc_nulldma >> 16) & 0xffff);
439 tda16->tda_frags[seg].frag_ptr0 =
440 htosonic16(sc, sc->sc_nulldma & 0xffff);
441 tda16->tda_frags[seg].frag_size =
442 htosonic16(sc, ETHER_PAD_LEN - totlen);
443 totlen = ETHER_PAD_LEN;
444 seg++;
445 }
446
447 tda16->tda_status = 0;
448 tda16->tda_pktconfig = 0;
449 tda16->tda_pktsize = htosonic16(sc, totlen);
450 tda16->tda_fragcnt = htosonic16(sc, seg);
451
452 /* Link it up. */
453 tda16->tda_frags[seg].frag_ptr0 =
454 htosonic16(sc, SONIC_CDTXADDR16(sc,
455 SONIC_NEXTTX(nexttx)) & 0xffff);
456
457 /* Sync the Tx descriptor. */
458 SONIC_CDTXSYNC16(sc, nexttx,
459 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
460 }
461
462 /* Advance the Tx pointer. */
463 sc->sc_txpending++;
464 sc->sc_txlast = nexttx;
465
466 /*
467 * Pass the packet to any BPF listeners.
468 */
469 bpf_mtap(ifp, m0);
470 }
471
472 if (sc->sc_txpending == (SONIC_NTXDESC - 1)) {
473 /* No more slots left; notify upper layer. */
474 ifp->if_flags |= IFF_OACTIVE;
475 }
476
477 if (sc->sc_txpending != opending) {
478 /*
479 * We enqueued packets. If the transmitter was idle,
480 * reset the txdirty pointer.
481 */
482 if (opending == 0)
483 sc->sc_txdirty = SONIC_NEXTTX(olasttx);
484
485 /*
486 * Stop the SONIC on the last packet we've set up,
487 * and clear end-of-list on the descriptor previous
488 * to our new chain.
489 *
490 * NOTE: our `seg' variable should still be valid!
491 */
492 if (sc->sc_32bit) {
493 olseg =
494 sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
495 sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
496 htosonic32(sc, TDA_LINK_EOL);
497 SONIC_CDTXSYNC32(sc, sc->sc_txlast,
498 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
499 sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
500 htosonic32(sc, ~TDA_LINK_EOL);
501 SONIC_CDTXSYNC32(sc, olasttx,
502 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
503 } else {
504 olseg =
505 sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
506 sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
507 htosonic16(sc, TDA_LINK_EOL);
508 SONIC_CDTXSYNC16(sc, sc->sc_txlast,
509 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
510 sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
511 htosonic16(sc, ~TDA_LINK_EOL);
512 SONIC_CDTXSYNC16(sc, olasttx,
513 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
514 }
515
516 /* Start the transmitter. */
517 CSR_WRITE(sc, SONIC_CR, CR_TXP);
518
519 /* Set a watchdog timer in case the chip flakes out. */
520 ifp->if_timer = 5;
521 }
522 }
523
524 /*
525 * sonic_watchdog: [ifnet interface function]
526 *
527 * Watchdog timer handler.
528 */
529 void
530 sonic_watchdog(struct ifnet *ifp)
531 {
532 struct sonic_softc *sc = ifp->if_softc;
533
534 printf("%s: device timeout\n", device_xname(sc->sc_dev));
535 ifp->if_oerrors++;
536
537 (void)sonic_init(ifp);
538 }
539
540 /*
541 * sonic_ioctl: [ifnet interface function]
542 *
543 * Handle control requests from the operator.
544 */
545 int
546 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
547 {
548 int s, error;
549
550 s = splnet();
551
552 error = ether_ioctl(ifp, cmd, data);
553 if (error == ENETRESET) {
554 /*
555 * Multicast list has changed; set the hardware
556 * filter accordingly.
557 */
558 if (ifp->if_flags & IFF_RUNNING)
559 (void)sonic_init(ifp);
560 error = 0;
561 }
562
563 splx(s);
564 return error;
565 }
566
567 /*
568 * sonic_intr:
569 *
570 * Interrupt service routine.
571 */
572 int
573 sonic_intr(void *arg)
574 {
575 struct sonic_softc *sc = arg;
576 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
577 uint16_t isr;
578 int handled = 0, wantinit;
579
580 for (wantinit = 0; wantinit == 0;) {
581 isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
582 if (isr == 0)
583 break;
584 CSR_WRITE(sc, SONIC_ISR, isr); /* ACK */
585
586 handled = 1;
587
588 if (isr & IMR_PRX)
589 sonic_rxintr(sc);
590
591 if (isr & (IMR_PTX|IMR_TXER)) {
592 if (sonic_txintr(sc) & TCR_FU) {
593 printf("%s: transmit FIFO underrun\n",
594 device_xname(sc->sc_dev));
595 wantinit = 1;
596 }
597 }
598
599 if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) {
600 #define PRINTERR(bit, str) \
601 if (isr & (bit)) \
602 printf("%s: %s\n",device_xname(sc->sc_dev), str)
603 PRINTERR(IMR_RFO, "receive FIFO overrun");
604 PRINTERR(IMR_RBA, "receive buffer exceeded");
605 PRINTERR(IMR_RBE, "receive buffers exhausted");
606 PRINTERR(IMR_RDE, "receive descriptors exhausted");
607 wantinit = 1;
608 }
609 }
610
611 if (handled) {
612 if (wantinit)
613 (void)sonic_init(ifp);
614 sonic_start(ifp);
615 }
616
617 return handled;
618 }
619
620 /*
621 * sonic_txintr:
622 *
623 * Helper; handle transmit complete interrupts.
624 */
625 uint16_t
626 sonic_txintr(struct sonic_softc *sc)
627 {
628 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
629 struct sonic_descsoft *ds;
630 struct sonic_tda32 *tda32;
631 struct sonic_tda16 *tda16;
632 uint16_t status, totstat = 0;
633 int i;
634
635 ifp->if_flags &= ~IFF_OACTIVE;
636
637 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
638 i = SONIC_NEXTTX(i), sc->sc_txpending--) {
639 ds = &sc->sc_txsoft[i];
640
641 if (sc->sc_32bit) {
642 SONIC_CDTXSYNC32(sc, i,
643 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
644 tda32 = &sc->sc_tda32[i];
645 status = sonic32toh(sc, tda32->tda_status);
646 SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
647 } else {
648 SONIC_CDTXSYNC16(sc, i,
649 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
650 tda16 = &sc->sc_tda16[i];
651 status = sonic16toh(sc, tda16->tda_status);
652 SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
653 }
654
655 if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0)
656 break;
657
658 totstat |= status;
659
660 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
661 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
662 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
663 m_freem(ds->ds_mbuf);
664 ds->ds_mbuf = NULL;
665
666 /*
667 * Check for errors and collisions.
668 */
669 if (status & TCR_PTX)
670 ifp->if_opackets++;
671 else
672 ifp->if_oerrors++;
673 ifp->if_collisions += TDA_STATUS_NCOL(status);
674 }
675
676 /* Update the dirty transmit buffer pointer. */
677 sc->sc_txdirty = i;
678
679 /*
680 * Cancel the watchdog timer if there are no pending
681 * transmissions.
682 */
683 if (sc->sc_txpending == 0)
684 ifp->if_timer = 0;
685
686 return totstat;
687 }
688
689 /*
690 * sonic_rxintr:
691 *
692 * Helper; handle receive interrupts.
693 */
694 void
695 sonic_rxintr(struct sonic_softc *sc)
696 {
697 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
698 struct sonic_descsoft *ds;
699 struct sonic_rda32 *rda32;
700 struct sonic_rda16 *rda16;
701 struct mbuf *m;
702 int i, len;
703 uint16_t status, bytecount /*, ptr0, ptr1, seqno */;
704
705 for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
706 ds = &sc->sc_rxsoft[i];
707
708 if (sc->sc_32bit) {
709 SONIC_CDRXSYNC32(sc, i,
710 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
711 rda32 = &sc->sc_rda32[i];
712 SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
713 if (rda32->rda_inuse != 0)
714 break;
715 status = sonic32toh(sc, rda32->rda_status);
716 bytecount = sonic32toh(sc, rda32->rda_bytecount);
717 /* ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); */
718 /* ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); */
719 /* seqno = sonic32toh(sc, rda32->rda_seqno); */
720 } else {
721 SONIC_CDRXSYNC16(sc, i,
722 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
723 rda16 = &sc->sc_rda16[i];
724 SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
725 if (rda16->rda_inuse != 0)
726 break;
727 status = sonic16toh(sc, rda16->rda_status);
728 bytecount = sonic16toh(sc, rda16->rda_bytecount);
729 /* ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); */
730 /* ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); */
731 /* seqno = sonic16toh(sc, rda16->rda_seqno); */
732 }
733
734 /*
735 * Make absolutely sure this is the only packet
736 * in this receive buffer. Our entire Rx buffer
737 * management scheme depends on this, and if the
738 * SONIC didn't follow our rule, it means we've
739 * misconfigured it.
740 */
741 KASSERT(status & RCR_LPKT);
742
743 /*
744 * Make sure the packet arrived OK. If an error occurred,
745 * update stats and reset the descriptor. The buffer will
746 * be reused the next time the descriptor comes up in the
747 * ring.
748 */
749 if ((status & RCR_PRX) == 0) {
750 if (status & RCR_FAER)
751 printf("%s: Rx frame alignment error\n",
752 device_xname(sc->sc_dev));
753 else if (status & RCR_CRCR)
754 printf("%s: Rx CRC error\n",
755 device_xname(sc->sc_dev));
756 ifp->if_ierrors++;
757 SONIC_INIT_RXDESC(sc, i);
758 continue;
759 }
760
761 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
762 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
763
764 /*
765 * The SONIC includes the CRC with every packet.
766 */
767 len = bytecount - ETHER_CRC_LEN;
768
769 /*
770 * Ok, if the chip is in 32-bit mode, then receive
771 * buffers must be aligned to 32-bit boundaries,
772 * which means the payload is misaligned. In this
773 * case, we must allocate a new mbuf, and copy the
774 * packet into it, scooted forward 2 bytes to ensure
775 * proper alignment.
776 *
777 * Note, in 16-bit mode, we can configure the SONIC
778 * to do what we want, and we have.
779 */
780 #ifndef __NO_STRICT_ALIGNMENT
781 if (sc->sc_32bit) {
782 MGETHDR(m, M_DONTWAIT, MT_DATA);
783 if (m == NULL)
784 goto dropit;
785 if (len > (MHLEN - 2)) {
786 MCLGET(m, M_DONTWAIT);
787 if ((m->m_flags & M_EXT) == 0)
788 goto dropit;
789 }
790 m->m_data += 2;
791 /*
792 * Note that we use a cluster for incoming frames,
793 * so the buffer is virtually contiguous.
794 */
795 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
796 len);
797 SONIC_INIT_RXDESC(sc, i);
798 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
799 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
800 } else
801 #endif /* ! __NO_STRICT_ALIGNMENT */
802 /*
803 * If the packet is small enough to fit in a single
804 * header mbuf, allocate one and copy the data into
805 * it. This greatly reduces memory consumption when
806 * we receive lots of small packets.
807 */
808 if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
809 MGETHDR(m, M_DONTWAIT, MT_DATA);
810 if (m == NULL)
811 goto dropit;
812 m->m_data += 2;
813 /*
814 * Note that we use a cluster for incoming frames,
815 * so the buffer is virtually contiguous.
816 */
817 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
818 len);
819 SONIC_INIT_RXDESC(sc, i);
820 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
821 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
822 } else {
823 m = ds->ds_mbuf;
824 if (sonic_add_rxbuf(sc, i) != 0) {
825 dropit:
826 ifp->if_ierrors++;
827 SONIC_INIT_RXDESC(sc, i);
828 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
829 ds->ds_dmamap->dm_mapsize,
830 BUS_DMASYNC_PREREAD);
831 continue;
832 }
833 }
834
835 m_set_rcvif(m, ifp);
836 m->m_pkthdr.len = m->m_len = len;
837
838 /* Pass it on. */
839 if_percpuq_enqueue(ifp->if_percpuq, m);
840 }
841
842 /* Update the receive pointer. */
843 sc->sc_rxptr = i;
844 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
845 }
846
847 /*
848 * sonic_reset:
849 *
850 * Perform a soft reset on the SONIC.
851 */
852 void
853 sonic_reset(struct sonic_softc *sc)
854 {
855
856 /* stop TX, RX and timer, and ensure RST is clear */
857 CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
858 delay(1000);
859
860 CSR_WRITE(sc, SONIC_CR, CR_RST);
861 delay(1000);
862
863 /* clear all interrupts */
864 CSR_WRITE(sc, SONIC_IMR, 0);
865 CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
866
867 CSR_WRITE(sc, SONIC_CR, 0);
868 delay(1000);
869 }
870
871 /*
872 * sonic_init: [ifnet interface function]
873 *
874 * Initialize the interface. Must be called at splnet().
875 */
876 int
877 sonic_init(struct ifnet *ifp)
878 {
879 struct sonic_softc *sc = ifp->if_softc;
880 struct sonic_descsoft *ds;
881 int i, error = 0;
882 uint16_t reg;
883
884 /*
885 * Cancel any pending I/O.
886 */
887 sonic_stop(ifp, 0);
888
889 /*
890 * Reset the SONIC to a known state.
891 */
892 sonic_reset(sc);
893
894 /*
895 * Bring the SONIC into reset state, and program the DCR.
896 *
897 * Note: We don't bother optimizing the transmit and receive
898 * thresholds, here. TFT/RFT values should be set in MD attachments.
899 */
900 reg = sc->sc_dcr;
901 if (sc->sc_32bit)
902 reg |= DCR_DW;
903 CSR_WRITE(sc, SONIC_CR, CR_RST);
904 CSR_WRITE(sc, SONIC_DCR, reg);
905 CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
906 CSR_WRITE(sc, SONIC_CR, 0);
907
908 /*
909 * Initialize the transmit descriptors.
910 */
911 if (sc->sc_32bit) {
912 for (i = 0; i < SONIC_NTXDESC; i++) {
913 memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
914 SONIC_CDTXSYNC32(sc, i,
915 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
916 }
917 } else {
918 for (i = 0; i < SONIC_NTXDESC; i++) {
919 memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
920 SONIC_CDTXSYNC16(sc, i,
921 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
922 }
923 }
924 sc->sc_txpending = 0;
925 sc->sc_txdirty = 0;
926 sc->sc_txlast = SONIC_NTXDESC - 1;
927
928 /*
929 * Initialize the receive descriptor ring.
930 */
931 for (i = 0; i < SONIC_NRXDESC; i++) {
932 ds = &sc->sc_rxsoft[i];
933 if (ds->ds_mbuf == NULL) {
934 if ((error = sonic_add_rxbuf(sc, i)) != 0) {
935 printf("%s: unable to allocate or map Rx "
936 "buffer %d, error = %d\n",
937 device_xname(sc->sc_dev), i, error);
938 /*
939 * XXX Should attempt to run with fewer receive
940 * XXX buffers instead of just failing.
941 */
942 sonic_rxdrain(sc);
943 goto out;
944 }
945 } else
946 SONIC_INIT_RXDESC(sc, i);
947 }
948 sc->sc_rxptr = 0;
949
950 /* Give the transmit ring to the SONIC. */
951 CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
952 CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
953
954 /* Give the receive descriptor ring to the SONIC. */
955 CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
956 CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
957
958 /* Give the receive buffer ring to the SONIC. */
959 CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
960 CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
961 if (sc->sc_32bit)
962 CSR_WRITE(sc, SONIC_REAR,
963 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
964 sizeof(struct sonic_rra32)) & 0xffff);
965 else
966 CSR_WRITE(sc, SONIC_REAR,
967 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
968 sizeof(struct sonic_rra16)) & 0xffff);
969 CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
970 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
971
972 /*
973 * Set the End-Of-Buffer counter such that only one packet
974 * will be placed into each buffer we provide. Note we are
975 * following the recommendation of section 3.4.4 of the manual
976 * here, and have "lengthened" the receive buffers accordingly.
977 */
978 if (sc->sc_32bit)
979 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
980 else
981 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
982
983 /* Reset the receive sequence counter. */
984 CSR_WRITE(sc, SONIC_RSC, 0);
985
986 /* Clear the tally registers. */
987 CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
988 CSR_WRITE(sc, SONIC_FAET, 0xffff);
989 CSR_WRITE(sc, SONIC_MPT, 0xffff);
990
991 /* Set the receive filter. */
992 sonic_set_filter(sc);
993
994 /*
995 * Set the interrupt mask register.
996 */
997 sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
998 IMR_TXER | IMR_PTX | IMR_PRX;
999 CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1000
1001 /*
1002 * Start the receive process in motion. Note, we don't
1003 * start the transmit process until we actually try to
1004 * transmit packets.
1005 */
1006 CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1007
1008 /*
1009 * ...all done!
1010 */
1011 ifp->if_flags |= IFF_RUNNING;
1012 ifp->if_flags &= ~IFF_OACTIVE;
1013
1014 out:
1015 if (error)
1016 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1017 return error;
1018 }
1019
1020 /*
1021 * sonic_rxdrain:
1022 *
1023 * Drain the receive queue.
1024 */
1025 void
1026 sonic_rxdrain(struct sonic_softc *sc)
1027 {
1028 struct sonic_descsoft *ds;
1029 int i;
1030
1031 for (i = 0; i < SONIC_NRXDESC; i++) {
1032 ds = &sc->sc_rxsoft[i];
1033 if (ds->ds_mbuf != NULL) {
1034 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1035 m_freem(ds->ds_mbuf);
1036 ds->ds_mbuf = NULL;
1037 }
1038 }
1039 }
1040
1041 /*
1042 * sonic_stop: [ifnet interface function]
1043 *
1044 * Stop transmission on the interface.
1045 */
1046 void
1047 sonic_stop(struct ifnet *ifp, int disable)
1048 {
1049 struct sonic_softc *sc = ifp->if_softc;
1050 struct sonic_descsoft *ds;
1051 int i;
1052
1053 /*
1054 * Disable interrupts.
1055 */
1056 CSR_WRITE(sc, SONIC_IMR, 0);
1057
1058 /*
1059 * Stop the transmitter, receiver, and timer.
1060 */
1061 CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP);
1062 for (i = 0; i < 1000; i++) {
1063 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0)
1064 break;
1065 delay(2);
1066 }
1067 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0)
1068 printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev));
1069
1070 /*
1071 * Release any queued transmit buffers.
1072 */
1073 for (i = 0; i < SONIC_NTXDESC; i++) {
1074 ds = &sc->sc_txsoft[i];
1075 if (ds->ds_mbuf != NULL) {
1076 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1077 m_freem(ds->ds_mbuf);
1078 ds->ds_mbuf = NULL;
1079 }
1080 }
1081
1082 /*
1083 * Mark the interface down and cancel the watchdog timer.
1084 */
1085 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1086 ifp->if_timer = 0;
1087
1088 if (disable)
1089 sonic_rxdrain(sc);
1090 }
1091
1092 /*
1093 * sonic_add_rxbuf:
1094 *
1095 * Add a receive buffer to the indicated descriptor.
1096 */
1097 int
1098 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1099 {
1100 struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1101 struct mbuf *m;
1102 int error;
1103
1104 MGETHDR(m, M_DONTWAIT, MT_DATA);
1105 if (m == NULL)
1106 return ENOBUFS;
1107
1108 MCLGET(m, M_DONTWAIT);
1109 if ((m->m_flags & M_EXT) == 0) {
1110 m_freem(m);
1111 return ENOBUFS;
1112 }
1113
1114 if (ds->ds_mbuf != NULL)
1115 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1116
1117 ds->ds_mbuf = m;
1118
1119 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1120 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1121 BUS_DMA_READ|BUS_DMA_NOWAIT);
1122 if (error) {
1123 printf("%s: can't load rx DMA map %d, error = %d\n",
1124 device_xname(sc->sc_dev), idx, error);
1125 panic("sonic_add_rxbuf"); /* XXX */
1126 }
1127
1128 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1129 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1130
1131 SONIC_INIT_RXDESC(sc, idx);
1132
1133 return 0;
1134 }
1135
1136 static void
1137 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1138 {
1139
1140 if (sc->sc_32bit) {
1141 struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1142
1143 cda->cda_entry = htosonic32(sc, entry);
1144 cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1145 cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1146 cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1147 } else {
1148 struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1149
1150 cda->cda_entry = htosonic16(sc, entry);
1151 cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1152 cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1153 cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1154 }
1155 }
1156
1157 /*
1158 * sonic_set_filter:
1159 *
1160 * Set the SONIC receive filter.
1161 */
1162 void
1163 sonic_set_filter(struct sonic_softc *sc)
1164 {
1165 struct ethercom *ec = &sc->sc_ethercom;
1166 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167 struct ether_multi *enm;
1168 struct ether_multistep step;
1169 int i, entry = 0;
1170 uint16_t camvalid = 0;
1171 uint16_t rcr = 0;
1172
1173 if (ifp->if_flags & IFF_BROADCAST)
1174 rcr |= RCR_BRD;
1175
1176 if (ifp->if_flags & IFF_PROMISC) {
1177 rcr |= RCR_PRO;
1178 goto allmulti;
1179 }
1180
1181 /* Put our station address in the first CAM slot. */
1182 sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1183 camvalid |= (1U << entry);
1184 entry++;
1185
1186 /* Add the multicast addresses to the CAM. */
1187 ETHER_FIRST_MULTI(step, ec, enm);
1188 while (enm != NULL) {
1189 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1190 /*
1191 * We must listen to a range of multicast addresses.
1192 * The only way to do this on the SONIC is to enable
1193 * reception of all multicast packets.
1194 */
1195 goto allmulti;
1196 }
1197
1198 if (entry == SONIC_NCAMENT) {
1199 /*
1200 * Out of CAM slots. Have to enable reception
1201 * of all multicast addresses.
1202 */
1203 goto allmulti;
1204 }
1205
1206 sonic_set_camentry(sc, entry, enm->enm_addrlo);
1207 camvalid |= (1U << entry);
1208 entry++;
1209
1210 ETHER_NEXT_MULTI(step, enm);
1211 }
1212
1213 ifp->if_flags &= ~IFF_ALLMULTI;
1214 goto setit;
1215
1216 allmulti:
1217 /* Use only the first CAM slot (station address). */
1218 camvalid = 0x0001;
1219 entry = 1;
1220 rcr |= RCR_AMC;
1221
1222 setit:
1223 /* set mask for the CAM Enable register */
1224 if (sc->sc_32bit) {
1225 if (entry == SONIC_NCAMENT)
1226 sc->sc_cdaenable32 = htosonic32(sc, camvalid);
1227 else
1228 sc->sc_cda32[entry].cda_entry =
1229 htosonic32(sc, camvalid);
1230 } else {
1231 if (entry == SONIC_NCAMENT)
1232 sc->sc_cdaenable16 = htosonic16(sc, camvalid);
1233 else
1234 sc->sc_cda16[entry].cda_entry =
1235 htosonic16(sc, camvalid);
1236 }
1237
1238 /* Load the CAM. */
1239 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1240 CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1241 CSR_WRITE(sc, SONIC_CDC, entry);
1242 CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1243 for (i = 0; i < 10000; i++) {
1244 if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1245 break;
1246 delay(2);
1247 }
1248 if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1249 printf("%s: CAM load failed\n", device_xname(sc->sc_dev));
1250 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1251
1252 /* Set the receive control register. */
1253 CSR_WRITE(sc, SONIC_RCR, rcr);
1254 }
1255