dp83932.c revision 1.20.2.1 1 /* $NetBSD: dp83932.c,v 1.20.2.1 2008/03/24 07:15:16 keiichi Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the National Semiconductor DP83932
41 * Systems-Oriented Network Interface Controller (SONIC).
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.20.2.1 2008/03/24 07:15:16 keiichi Exp $");
46
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <net/if.h>
62 #include <net/if_dl.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #include <sys/bus.h>
70 #include <sys/intr.h>
71
72 #include <dev/ic/dp83932reg.h>
73 #include <dev/ic/dp83932var.h>
74
75 void sonic_start(struct ifnet *);
76 void sonic_watchdog(struct ifnet *);
77 int sonic_ioctl(struct ifnet *, u_long, void *);
78 int sonic_init(struct ifnet *);
79 void sonic_stop(struct ifnet *, int);
80
81 void sonic_shutdown(void *);
82
83 void sonic_reset(struct sonic_softc *);
84 void sonic_rxdrain(struct sonic_softc *);
85 int sonic_add_rxbuf(struct sonic_softc *, int);
86 void sonic_set_filter(struct sonic_softc *);
87
88 uint16_t sonic_txintr(struct sonic_softc *);
89 void sonic_rxintr(struct sonic_softc *);
90
91 int sonic_copy_small = 0;
92
93 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
94
95 /*
96 * sonic_attach:
97 *
98 * Attach a SONIC interface to the system.
99 */
100 void
101 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
102 {
103 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
104 int i, rseg, error;
105 bus_dma_segment_t seg;
106 size_t cdatasize;
107 char *nullbuf;
108
109 /*
110 * Allocate the control data structures, and create and load the
111 * DMA map for it.
112 */
113 if (sc->sc_32bit)
114 cdatasize = sizeof(struct sonic_control_data32);
115 else
116 cdatasize = sizeof(struct sonic_control_data16);
117
118 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
119 PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
120 BUS_DMA_NOWAIT)) != 0) {
121 printf("%s: unable to allocate control data, error = %d\n",
122 sc->sc_dev.dv_xname, error);
123 goto fail_0;
124 }
125
126 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
127 cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
128 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
129 printf("%s: unable to map control data, error = %d\n",
130 sc->sc_dev.dv_xname, error);
131 goto fail_1;
132 }
133 nullbuf = (char *)sc->sc_cdata16 + cdatasize;
134 memset(nullbuf, 0, ETHER_PAD_LEN);
135
136 if ((error = bus_dmamap_create(sc->sc_dmat,
137 cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
138 &sc->sc_cddmamap)) != 0) {
139 printf("%s: unable to create control data DMA map, "
140 "error = %d\n", sc->sc_dev.dv_xname, error);
141 goto fail_2;
142 }
143
144 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
145 sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
146 printf("%s: unable to load control data DMA map, error = %d\n",
147 sc->sc_dev.dv_xname, error);
148 goto fail_3;
149 }
150
151 /*
152 * Create the transmit buffer DMA maps.
153 */
154 for (i = 0; i < SONIC_NTXDESC; i++) {
155 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
156 SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
157 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
158 printf("%s: unable to create tx DMA map %d, "
159 "error = %d\n", sc->sc_dev.dv_xname, i, error);
160 goto fail_4;
161 }
162 }
163
164 /*
165 * Create the receive buffer DMA maps.
166 */
167 for (i = 0; i < SONIC_NRXDESC; i++) {
168 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
169 MCLBYTES, 0, BUS_DMA_NOWAIT,
170 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
171 printf("%s: unable to create rx DMA map %d, "
172 "error = %d\n", sc->sc_dev.dv_xname, i, error);
173 goto fail_5;
174 }
175 sc->sc_rxsoft[i].ds_mbuf = NULL;
176 }
177
178 /*
179 * create and map the pad buffer
180 */
181 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
182 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
183 printf("%s: unable to create pad buffer DMA map, "
184 "error = %d\n", sc->sc_dev.dv_xname, error);
185 goto fail_5;
186 }
187
188 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
189 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
190 printf("%s: unable to load pad buffer DMA map, "
191 "error = %d\n", sc->sc_dev.dv_xname, error);
192 goto fail_6;
193 }
194 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
195 BUS_DMASYNC_PREWRITE);
196
197 /*
198 * Reset the chip to a known state.
199 */
200 sonic_reset(sc);
201
202 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
203 ether_sprintf(enaddr));
204
205 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
206 ifp->if_softc = sc;
207 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
208 ifp->if_ioctl = sonic_ioctl;
209 ifp->if_start = sonic_start;
210 ifp->if_watchdog = sonic_watchdog;
211 ifp->if_init = sonic_init;
212 ifp->if_stop = sonic_stop;
213 IFQ_SET_READY(&ifp->if_snd);
214
215 /*
216 * We can suport 802.1Q VLAN-sized frames.
217 */
218 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
219
220 /*
221 * Attach the interface.
222 */
223 if_attach(ifp);
224 ether_ifattach(ifp, enaddr);
225
226 /*
227 * Make sure the interface is shutdown during reboot.
228 */
229 sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc);
230 if (sc->sc_sdhook == NULL)
231 printf("%s: WARNING: unable to establish shutdown hook\n",
232 sc->sc_dev.dv_xname);
233 return;
234
235 /*
236 * Free any resources we've allocated during the failed attach
237 * attempt. Do this in reverse order and fall through.
238 */
239 fail_6:
240 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
241 fail_5:
242 for (i = 0; i < SONIC_NRXDESC; i++) {
243 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
244 bus_dmamap_destroy(sc->sc_dmat,
245 sc->sc_rxsoft[i].ds_dmamap);
246 }
247 fail_4:
248 for (i = 0; i < SONIC_NTXDESC; i++) {
249 if (sc->sc_txsoft[i].ds_dmamap != NULL)
250 bus_dmamap_destroy(sc->sc_dmat,
251 sc->sc_txsoft[i].ds_dmamap);
252 }
253 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
254 fail_3:
255 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
256 fail_2:
257 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_cdata16, cdatasize);
258 fail_1:
259 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
260 fail_0:
261 return;
262 }
263
264 /*
265 * sonic_shutdown:
266 *
267 * Make sure the interface is stopped at reboot.
268 */
269 void
270 sonic_shutdown(void *arg)
271 {
272 struct sonic_softc *sc = arg;
273
274 sonic_stop(&sc->sc_ethercom.ec_if, 1);
275 }
276
277 /*
278 * sonic_start: [ifnet interface function]
279 *
280 * Start packet transmission on the interface.
281 */
282 void
283 sonic_start(struct ifnet *ifp)
284 {
285 struct sonic_softc *sc = ifp->if_softc;
286 struct mbuf *m0, *m;
287 struct sonic_tda16 *tda16;
288 struct sonic_tda32 *tda32;
289 struct sonic_descsoft *ds;
290 bus_dmamap_t dmamap;
291 int error, olasttx, nexttx, opending, totlen, olseg;
292 int seg = 0; /* XXX: gcc */
293
294 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
295 return;
296
297 /*
298 * Remember the previous txpending and the current "last txdesc
299 * used" index.
300 */
301 opending = sc->sc_txpending;
302 olasttx = sc->sc_txlast;
303
304 /*
305 * Loop through the send queue, setting up transmit descriptors
306 * until we drain the queue, or use up all available transmit
307 * descriptors. Leave one at the end for sanity's sake.
308 */
309 while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
310 /*
311 * Grab a packet off the queue.
312 */
313 IFQ_POLL(&ifp->if_snd, m0);
314 if (m0 == NULL)
315 break;
316 m = NULL;
317
318 /*
319 * Get the next available transmit descriptor.
320 */
321 nexttx = SONIC_NEXTTX(sc->sc_txlast);
322 ds = &sc->sc_txsoft[nexttx];
323 dmamap = ds->ds_dmamap;
324
325 /*
326 * Load the DMA map. If this fails, the packet either
327 * didn't fit in the allotted number of frags, or we were
328 * short on resources. In this case, we'll copy and try
329 * again.
330 */
331 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
332 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
333 (m0->m_pkthdr.len < ETHER_PAD_LEN &&
334 dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
335 if (error == 0)
336 bus_dmamap_unload(sc->sc_dmat, dmamap);
337 MGETHDR(m, M_DONTWAIT, MT_DATA);
338 if (m == NULL) {
339 printf("%s: unable to allocate Tx mbuf\n",
340 sc->sc_dev.dv_xname);
341 break;
342 }
343 if (m0->m_pkthdr.len > MHLEN) {
344 MCLGET(m, M_DONTWAIT);
345 if ((m->m_flags & M_EXT) == 0) {
346 printf("%s: unable to allocate Tx "
347 "cluster\n", sc->sc_dev.dv_xname);
348 m_freem(m);
349 break;
350 }
351 }
352 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
353 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
354 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
355 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
356 if (error) {
357 printf("%s: unable to load Tx buffer, "
358 "error = %d\n", sc->sc_dev.dv_xname, error);
359 m_freem(m);
360 break;
361 }
362 }
363 IFQ_DEQUEUE(&ifp->if_snd, m0);
364 if (m != NULL) {
365 m_freem(m0);
366 m0 = m;
367 }
368
369 /*
370 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
371 */
372
373 /* Sync the DMA map. */
374 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
375 BUS_DMASYNC_PREWRITE);
376
377 /*
378 * Store a pointer to the packet so we can free it later.
379 */
380 ds->ds_mbuf = m0;
381
382 /*
383 * Initialize the transmit descriptor.
384 */
385 totlen = 0;
386 if (sc->sc_32bit) {
387 tda32 = &sc->sc_tda32[nexttx];
388 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
389 tda32->tda_frags[seg].frag_ptr1 =
390 htosonic32(sc,
391 (dmamap->dm_segs[seg].ds_addr >> 16) &
392 0xffff);
393 tda32->tda_frags[seg].frag_ptr0 =
394 htosonic32(sc,
395 dmamap->dm_segs[seg].ds_addr & 0xffff);
396 tda32->tda_frags[seg].frag_size =
397 htosonic32(sc, dmamap->dm_segs[seg].ds_len);
398 totlen += dmamap->dm_segs[seg].ds_len;
399 }
400 if (totlen < ETHER_PAD_LEN) {
401 tda32->tda_frags[seg].frag_ptr1 =
402 htosonic32(sc,
403 (sc->sc_nulldma >> 16) & 0xffff);
404 tda32->tda_frags[seg].frag_ptr0 =
405 htosonic32(sc, sc->sc_nulldma & 0xffff);
406 tda32->tda_frags[seg].frag_size =
407 htosonic32(sc, ETHER_PAD_LEN - totlen);
408 totlen = ETHER_PAD_LEN;
409 seg++;
410 }
411
412 tda32->tda_status = 0;
413 tda32->tda_pktconfig = 0;
414 tda32->tda_pktsize = htosonic32(sc, totlen);
415 tda32->tda_fragcnt = htosonic32(sc, seg);
416
417 /* Link it up. */
418 tda32->tda_frags[seg].frag_ptr0 =
419 htosonic32(sc, SONIC_CDTXADDR32(sc,
420 SONIC_NEXTTX(nexttx)) & 0xffff);
421
422 /* Sync the Tx descriptor. */
423 SONIC_CDTXSYNC32(sc, nexttx,
424 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
425 } else {
426 tda16 = &sc->sc_tda16[nexttx];
427 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
428 tda16->tda_frags[seg].frag_ptr1 =
429 htosonic16(sc,
430 (dmamap->dm_segs[seg].ds_addr >> 16) &
431 0xffff);
432 tda16->tda_frags[seg].frag_ptr0 =
433 htosonic16(sc,
434 dmamap->dm_segs[seg].ds_addr & 0xffff);
435 tda16->tda_frags[seg].frag_size =
436 htosonic16(sc, dmamap->dm_segs[seg].ds_len);
437 totlen += dmamap->dm_segs[seg].ds_len;
438 }
439 if (totlen < ETHER_PAD_LEN) {
440 tda16->tda_frags[seg].frag_ptr1 =
441 htosonic16(sc,
442 (sc->sc_nulldma >> 16) & 0xffff);
443 tda16->tda_frags[seg].frag_ptr0 =
444 htosonic16(sc, sc->sc_nulldma & 0xffff);
445 tda16->tda_frags[seg].frag_size =
446 htosonic16(sc, ETHER_PAD_LEN - totlen);
447 totlen = ETHER_PAD_LEN;
448 seg++;
449 }
450
451 tda16->tda_status = 0;
452 tda16->tda_pktconfig = 0;
453 tda16->tda_pktsize = htosonic16(sc, totlen);
454 tda16->tda_fragcnt = htosonic16(sc, seg);
455
456 /* Link it up. */
457 tda16->tda_frags[seg].frag_ptr0 =
458 htosonic16(sc, SONIC_CDTXADDR16(sc,
459 SONIC_NEXTTX(nexttx)) & 0xffff);
460
461 /* Sync the Tx descriptor. */
462 SONIC_CDTXSYNC16(sc, nexttx,
463 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
464 }
465
466 /* Advance the Tx pointer. */
467 sc->sc_txpending++;
468 sc->sc_txlast = nexttx;
469
470 #if NBPFILTER > 0
471 /*
472 * Pass the packet to any BPF listeners.
473 */
474 if (ifp->if_bpf)
475 bpf_mtap(ifp->if_bpf, m0);
476 #endif
477 }
478
479 if (sc->sc_txpending == (SONIC_NTXDESC - 1)) {
480 /* No more slots left; notify upper layer. */
481 ifp->if_flags |= IFF_OACTIVE;
482 }
483
484 if (sc->sc_txpending != opending) {
485 /*
486 * We enqueued packets. If the transmitter was idle,
487 * reset the txdirty pointer.
488 */
489 if (opending == 0)
490 sc->sc_txdirty = SONIC_NEXTTX(olasttx);
491
492 /*
493 * Stop the SONIC on the last packet we've set up,
494 * and clear end-of-list on the descriptor previous
495 * to our new chain.
496 *
497 * NOTE: our `seg' variable should still be valid!
498 */
499 if (sc->sc_32bit) {
500 olseg =
501 sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
502 sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
503 htosonic32(sc, TDA_LINK_EOL);
504 SONIC_CDTXSYNC32(sc, sc->sc_txlast,
505 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
506 sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
507 htosonic32(sc, ~TDA_LINK_EOL);
508 SONIC_CDTXSYNC32(sc, olasttx,
509 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
510 } else {
511 olseg =
512 sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
513 sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
514 htosonic16(sc, TDA_LINK_EOL);
515 SONIC_CDTXSYNC16(sc, sc->sc_txlast,
516 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
517 sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
518 htosonic16(sc, ~TDA_LINK_EOL);
519 SONIC_CDTXSYNC16(sc, olasttx,
520 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
521 }
522
523 /* Start the transmitter. */
524 CSR_WRITE(sc, SONIC_CR, CR_TXP);
525
526 /* Set a watchdog timer in case the chip flakes out. */
527 ifp->if_timer = 5;
528 }
529 }
530
531 /*
532 * sonic_watchdog: [ifnet interface function]
533 *
534 * Watchdog timer handler.
535 */
536 void
537 sonic_watchdog(struct ifnet *ifp)
538 {
539 struct sonic_softc *sc = ifp->if_softc;
540
541 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
542 ifp->if_oerrors++;
543
544 (void) sonic_init(ifp);
545 }
546
547 /*
548 * sonic_ioctl: [ifnet interface function]
549 *
550 * Handle control requests from the operator.
551 */
552 int
553 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
554 {
555 int s, error;
556
557 s = splnet();
558
559 error = ether_ioctl(ifp, cmd, data);
560 if (error == ENETRESET) {
561 /*
562 * Multicast list has changed; set the hardware
563 * filter accordingly.
564 */
565 if (ifp->if_flags & IFF_RUNNING)
566 (void) sonic_init(ifp);
567 error = 0;
568 }
569
570 splx(s);
571 return (error);
572 }
573
574 /*
575 * sonic_intr:
576 *
577 * Interrupt service routine.
578 */
579 int
580 sonic_intr(void *arg)
581 {
582 struct sonic_softc *sc = arg;
583 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
584 uint16_t isr;
585 int handled = 0, wantinit;
586
587 for (wantinit = 0; wantinit == 0;) {
588 isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
589 if (isr == 0)
590 break;
591 CSR_WRITE(sc, SONIC_ISR, isr); /* ACK */
592
593 handled = 1;
594
595 if (isr & IMR_PRX)
596 sonic_rxintr(sc);
597
598 if (isr & (IMR_PTX|IMR_TXER)) {
599 if (sonic_txintr(sc) & TCR_FU) {
600 printf("%s: transmit FIFO underrun\n",
601 sc->sc_dev.dv_xname);
602 wantinit = 1;
603 }
604 }
605
606 if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) {
607 #define PRINTERR(bit, str) \
608 if (isr & (bit)) \
609 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
610 PRINTERR(IMR_RFO, "receive FIFO overrun");
611 PRINTERR(IMR_RBA, "receive buffer exceeded");
612 PRINTERR(IMR_RBE, "receive buffers exhausted");
613 PRINTERR(IMR_RDE, "receive descriptors exhausted");
614 wantinit = 1;
615 }
616 }
617
618 if (handled) {
619 if (wantinit)
620 (void) sonic_init(ifp);
621 sonic_start(ifp);
622 }
623
624 return (handled);
625 }
626
627 /*
628 * sonic_txintr:
629 *
630 * Helper; handle transmit complete interrupts.
631 */
632 uint16_t
633 sonic_txintr(struct sonic_softc *sc)
634 {
635 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
636 struct sonic_descsoft *ds;
637 struct sonic_tda32 *tda32;
638 struct sonic_tda16 *tda16;
639 uint16_t status, totstat = 0;
640 int i;
641
642 ifp->if_flags &= ~IFF_OACTIVE;
643
644 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
645 i = SONIC_NEXTTX(i), sc->sc_txpending--) {
646 ds = &sc->sc_txsoft[i];
647
648 if (sc->sc_32bit) {
649 SONIC_CDTXSYNC32(sc, i,
650 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
651 tda32 = &sc->sc_tda32[i];
652 status = sonic32toh(sc, tda32->tda_status);
653 SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
654 } else {
655 SONIC_CDTXSYNC16(sc, i,
656 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
657 tda16 = &sc->sc_tda16[i];
658 status = sonic16toh(sc, tda16->tda_status);
659 SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
660 }
661
662 if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0)
663 break;
664
665 totstat |= status;
666
667 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
668 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
669 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
670 m_freem(ds->ds_mbuf);
671 ds->ds_mbuf = NULL;
672
673 /*
674 * Check for errors and collisions.
675 */
676 if (status & TCR_PTX)
677 ifp->if_opackets++;
678 else
679 ifp->if_oerrors++;
680 ifp->if_collisions += TDA_STATUS_NCOL(status);
681 }
682
683 /* Update the dirty transmit buffer pointer. */
684 sc->sc_txdirty = i;
685
686 /*
687 * Cancel the watchdog timer if there are no pending
688 * transmissions.
689 */
690 if (sc->sc_txpending == 0)
691 ifp->if_timer = 0;
692
693 return (totstat);
694 }
695
696 /*
697 * sonic_rxintr:
698 *
699 * Helper; handle receive interrupts.
700 */
701 void
702 sonic_rxintr(struct sonic_softc *sc)
703 {
704 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
705 struct sonic_descsoft *ds;
706 struct sonic_rda32 *rda32;
707 struct sonic_rda16 *rda16;
708 struct mbuf *m;
709 int i, len;
710 uint16_t status, bytecount, ptr0, ptr1, seqno;
711
712 for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
713 ds = &sc->sc_rxsoft[i];
714
715 if (sc->sc_32bit) {
716 SONIC_CDRXSYNC32(sc, i,
717 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
718 rda32 = &sc->sc_rda32[i];
719 SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
720 if (rda32->rda_inuse != 0)
721 break;
722 status = sonic32toh(sc, rda32->rda_status);
723 bytecount = sonic32toh(sc, rda32->rda_bytecount);
724 ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0);
725 ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1);
726 seqno = sonic32toh(sc, rda32->rda_seqno);
727 } else {
728 SONIC_CDRXSYNC16(sc, i,
729 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
730 rda16 = &sc->sc_rda16[i];
731 SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
732 if (rda16->rda_inuse != 0)
733 break;
734 status = sonic16toh(sc, rda16->rda_status);
735 bytecount = sonic16toh(sc, rda16->rda_bytecount);
736 ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0);
737 ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1);
738 seqno = sonic16toh(sc, rda16->rda_seqno);
739 }
740
741 /*
742 * Make absolutely sure this is the only packet
743 * in this receive buffer. Our entire Rx buffer
744 * management scheme depends on this, and if the
745 * SONIC didn't follow our rule, it means we've
746 * misconfigured it.
747 */
748 KASSERT(status & RCR_LPKT);
749
750 /*
751 * Make sure the packet arrived OK. If an error occurred,
752 * update stats and reset the descriptor. The buffer will
753 * be reused the next time the descriptor comes up in the
754 * ring.
755 */
756 if ((status & RCR_PRX) == 0) {
757 if (status & RCR_FAER)
758 printf("%s: Rx frame alignment error\n",
759 sc->sc_dev.dv_xname);
760 else if (status & RCR_CRCR)
761 printf("%s: Rx CRC error\n",
762 sc->sc_dev.dv_xname);
763 ifp->if_ierrors++;
764 SONIC_INIT_RXDESC(sc, i);
765 continue;
766 }
767
768 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
769 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
770
771 /*
772 * The SONIC includes the CRC with every packet.
773 */
774 len = bytecount - ETHER_CRC_LEN;
775
776 /*
777 * Ok, if the chip is in 32-bit mode, then receive
778 * buffers must be aligned to 32-bit boundaries,
779 * which means the payload is misaligned. In this
780 * case, we must allocate a new mbuf, and copy the
781 * packet into it, scooted forward 2 bytes to ensure
782 * proper alignment.
783 *
784 * Note, in 16-bit mode, we can configure the SONIC
785 * to do what we want, and we have.
786 */
787 #ifndef __NO_STRICT_ALIGNMENT
788 if (sc->sc_32bit) {
789 MGETHDR(m, M_DONTWAIT, MT_DATA);
790 if (m == NULL)
791 goto dropit;
792 if (len > (MHLEN - 2)) {
793 MCLGET(m, M_DONTWAIT);
794 if ((m->m_flags & M_EXT) == 0)
795 goto dropit;
796 }
797 m->m_data += 2;
798 /*
799 * Note that we use a cluster for incoming frames,
800 * so the buffer is virtually contiguous.
801 */
802 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
803 len);
804 SONIC_INIT_RXDESC(sc, i);
805 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
806 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
807 } else
808 #endif /* ! __NO_STRICT_ALIGNMENT */
809 /*
810 * If the packet is small enough to fit in a single
811 * header mbuf, allocate one and copy the data into
812 * it. This greatly reduces memory consumption when
813 * we receive lots of small packets.
814 */
815 if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
816 MGETHDR(m, M_DONTWAIT, MT_DATA);
817 if (m == NULL)
818 goto dropit;
819 m->m_data += 2;
820 /*
821 * Note that we use a cluster for incoming frames,
822 * so the buffer is virtually contiguous.
823 */
824 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
825 len);
826 SONIC_INIT_RXDESC(sc, i);
827 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
828 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
829 } else {
830 m = ds->ds_mbuf;
831 if (sonic_add_rxbuf(sc, i) != 0) {
832 dropit:
833 ifp->if_ierrors++;
834 SONIC_INIT_RXDESC(sc, i);
835 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
836 ds->ds_dmamap->dm_mapsize,
837 BUS_DMASYNC_PREREAD);
838 continue;
839 }
840 }
841
842 ifp->if_ipackets++;
843 m->m_pkthdr.rcvif = ifp;
844 m->m_pkthdr.len = m->m_len = len;
845
846 #if NBPFILTER > 0
847 /*
848 * Pass this up to any BPF listeners.
849 */
850 if (ifp->if_bpf)
851 bpf_mtap(ifp->if_bpf, m);
852 #endif /* NBPFILTER > 0 */
853
854 /* Pass it on. */
855 (*ifp->if_input)(ifp, m);
856 }
857
858 /* Update the receive pointer. */
859 sc->sc_rxptr = i;
860 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
861 }
862
863 /*
864 * sonic_reset:
865 *
866 * Perform a soft reset on the SONIC.
867 */
868 void
869 sonic_reset(struct sonic_softc *sc)
870 {
871
872 /* stop TX, RX and timer, and ensure RST is clear */
873 CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
874 delay(1000);
875
876 CSR_WRITE(sc, SONIC_CR, CR_RST);
877 delay(1000);
878
879 /* clear all interrupts */
880 CSR_WRITE(sc, SONIC_IMR, 0);
881 CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
882
883 CSR_WRITE(sc, SONIC_CR, 0);
884 delay(1000);
885 }
886
887 /*
888 * sonic_init: [ifnet interface function]
889 *
890 * Initialize the interface. Must be called at splnet().
891 */
892 int
893 sonic_init(struct ifnet *ifp)
894 {
895 struct sonic_softc *sc = ifp->if_softc;
896 struct sonic_descsoft *ds;
897 int i, error = 0;
898 uint16_t reg;
899
900 /*
901 * Cancel any pending I/O.
902 */
903 sonic_stop(ifp, 0);
904
905 /*
906 * Reset the SONIC to a known state.
907 */
908 sonic_reset(sc);
909
910 /*
911 * Bring the SONIC into reset state, and program the DCR.
912 *
913 * Note: We don't bother optimizing the transmit and receive
914 * thresholds, here. TFT/RFT values should be set in MD attachments.
915 */
916 reg = sc->sc_dcr;
917 if (sc->sc_32bit)
918 reg |= DCR_DW;
919 CSR_WRITE(sc, SONIC_CR, CR_RST);
920 CSR_WRITE(sc, SONIC_DCR, reg);
921 CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
922 CSR_WRITE(sc, SONIC_CR, 0);
923
924 /*
925 * Initialize the transmit descriptors.
926 */
927 if (sc->sc_32bit) {
928 for (i = 0; i < SONIC_NTXDESC; i++) {
929 memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
930 SONIC_CDTXSYNC32(sc, i,
931 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
932 }
933 } else {
934 for (i = 0; i < SONIC_NTXDESC; i++) {
935 memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
936 SONIC_CDTXSYNC16(sc, i,
937 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
938 }
939 }
940 sc->sc_txpending = 0;
941 sc->sc_txdirty = 0;
942 sc->sc_txlast = SONIC_NTXDESC - 1;
943
944 /*
945 * Initialize the receive descriptor ring.
946 */
947 for (i = 0; i < SONIC_NRXDESC; i++) {
948 ds = &sc->sc_rxsoft[i];
949 if (ds->ds_mbuf == NULL) {
950 if ((error = sonic_add_rxbuf(sc, i)) != 0) {
951 printf("%s: unable to allocate or map Rx "
952 "buffer %d, error = %d\n",
953 sc->sc_dev.dv_xname, i, error);
954 /*
955 * XXX Should attempt to run with fewer receive
956 * XXX buffers instead of just failing.
957 */
958 sonic_rxdrain(sc);
959 goto out;
960 }
961 } else
962 SONIC_INIT_RXDESC(sc, i);
963 }
964 sc->sc_rxptr = 0;
965
966 /* Give the transmit ring to the SONIC. */
967 CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
968 CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
969
970 /* Give the receive descriptor ring to the SONIC. */
971 CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
972 CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
973
974 /* Give the receive buffer ring to the SONIC. */
975 CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
976 CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
977 if (sc->sc_32bit)
978 CSR_WRITE(sc, SONIC_REAR,
979 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
980 sizeof(struct sonic_rra32)) & 0xffff);
981 else
982 CSR_WRITE(sc, SONIC_REAR,
983 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
984 sizeof(struct sonic_rra16)) & 0xffff);
985 CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
986 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
987
988 /*
989 * Set the End-Of-Buffer counter such that only one packet
990 * will be placed into each buffer we provide. Note we are
991 * following the recommendation of section 3.4.4 of the manual
992 * here, and have "lengthened" the receive buffers accordingly.
993 */
994 if (sc->sc_32bit)
995 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
996 else
997 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
998
999 /* Reset the receive sequence counter. */
1000 CSR_WRITE(sc, SONIC_RSC, 0);
1001
1002 /* Clear the tally registers. */
1003 CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
1004 CSR_WRITE(sc, SONIC_FAET, 0xffff);
1005 CSR_WRITE(sc, SONIC_MPT, 0xffff);
1006
1007 /* Set the receive filter. */
1008 sonic_set_filter(sc);
1009
1010 /*
1011 * Set the interrupt mask register.
1012 */
1013 sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
1014 IMR_TXER | IMR_PTX | IMR_PRX;
1015 CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1016
1017 /*
1018 * Start the receive process in motion. Note, we don't
1019 * start the transmit process until we actually try to
1020 * transmit packets.
1021 */
1022 CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1023
1024 /*
1025 * ...all done!
1026 */
1027 ifp->if_flags |= IFF_RUNNING;
1028 ifp->if_flags &= ~IFF_OACTIVE;
1029
1030 out:
1031 if (error)
1032 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1033 return (error);
1034 }
1035
1036 /*
1037 * sonic_rxdrain:
1038 *
1039 * Drain the receive queue.
1040 */
1041 void
1042 sonic_rxdrain(struct sonic_softc *sc)
1043 {
1044 struct sonic_descsoft *ds;
1045 int i;
1046
1047 for (i = 0; i < SONIC_NRXDESC; i++) {
1048 ds = &sc->sc_rxsoft[i];
1049 if (ds->ds_mbuf != NULL) {
1050 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1051 m_freem(ds->ds_mbuf);
1052 ds->ds_mbuf = NULL;
1053 }
1054 }
1055 }
1056
1057 /*
1058 * sonic_stop: [ifnet interface function]
1059 *
1060 * Stop transmission on the interface.
1061 */
1062 void
1063 sonic_stop(struct ifnet *ifp, int disable)
1064 {
1065 struct sonic_softc *sc = ifp->if_softc;
1066 struct sonic_descsoft *ds;
1067 int i;
1068
1069 /*
1070 * Disable interrupts.
1071 */
1072 CSR_WRITE(sc, SONIC_IMR, 0);
1073
1074 /*
1075 * Stop the transmitter, receiver, and timer.
1076 */
1077 CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP);
1078 for (i = 0; i < 1000; i++) {
1079 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0)
1080 break;
1081 delay(2);
1082 }
1083 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0)
1084 printf("%s: SONIC failed to stop\n", sc->sc_dev.dv_xname);
1085
1086 /*
1087 * Release any queued transmit buffers.
1088 */
1089 for (i = 0; i < SONIC_NTXDESC; i++) {
1090 ds = &sc->sc_txsoft[i];
1091 if (ds->ds_mbuf != NULL) {
1092 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1093 m_freem(ds->ds_mbuf);
1094 ds->ds_mbuf = NULL;
1095 }
1096 }
1097
1098 /*
1099 * Mark the interface down and cancel the watchdog timer.
1100 */
1101 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1102 ifp->if_timer = 0;
1103
1104 if (disable)
1105 sonic_rxdrain(sc);
1106 }
1107
1108 /*
1109 * sonic_add_rxbuf:
1110 *
1111 * Add a receive buffer to the indicated descriptor.
1112 */
1113 int
1114 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1115 {
1116 struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1117 struct mbuf *m;
1118 int error;
1119
1120 MGETHDR(m, M_DONTWAIT, MT_DATA);
1121 if (m == NULL)
1122 return (ENOBUFS);
1123
1124 MCLGET(m, M_DONTWAIT);
1125 if ((m->m_flags & M_EXT) == 0) {
1126 m_freem(m);
1127 return (ENOBUFS);
1128 }
1129
1130 if (ds->ds_mbuf != NULL)
1131 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1132
1133 ds->ds_mbuf = m;
1134
1135 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1136 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1137 BUS_DMA_READ|BUS_DMA_NOWAIT);
1138 if (error) {
1139 printf("%s: can't load rx DMA map %d, error = %d\n",
1140 sc->sc_dev.dv_xname, idx, error);
1141 panic("sonic_add_rxbuf"); /* XXX */
1142 }
1143
1144 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1145 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1146
1147 SONIC_INIT_RXDESC(sc, idx);
1148
1149 return (0);
1150 }
1151
1152 static void
1153 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1154 {
1155
1156 if (sc->sc_32bit) {
1157 struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1158
1159 cda->cda_entry = htosonic32(sc, entry);
1160 cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1161 cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1162 cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1163 } else {
1164 struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1165
1166 cda->cda_entry = htosonic16(sc, entry);
1167 cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1168 cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1169 cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1170 }
1171 }
1172
1173 /*
1174 * sonic_set_filter:
1175 *
1176 * Set the SONIC receive filter.
1177 */
1178 void
1179 sonic_set_filter(struct sonic_softc *sc)
1180 {
1181 struct ethercom *ec = &sc->sc_ethercom;
1182 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1183 struct ether_multi *enm;
1184 struct ether_multistep step;
1185 int i, entry = 0;
1186 uint16_t camvalid = 0;
1187 uint16_t rcr = 0;
1188
1189 if (ifp->if_flags & IFF_BROADCAST)
1190 rcr |= RCR_BRD;
1191
1192 if (ifp->if_flags & IFF_PROMISC) {
1193 rcr |= RCR_PRO;
1194 goto allmulti;
1195 }
1196
1197 /* Put our station address in the first CAM slot. */
1198 sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1199 camvalid |= (1U << entry);
1200 entry++;
1201
1202 /* Add the multicast addresses to the CAM. */
1203 ETHER_FIRST_MULTI(step, ec, enm);
1204 while (enm != NULL) {
1205 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1206 /*
1207 * We must listen to a range of multicast addresses.
1208 * The only way to do this on the SONIC is to enable
1209 * reception of all multicast packets.
1210 */
1211 goto allmulti;
1212 }
1213
1214 if (entry == 16) {
1215 /*
1216 * Out of CAM slots. Have to enable reception
1217 * of all multicast addresses.
1218 */
1219 goto allmulti;
1220 }
1221
1222 sonic_set_camentry(sc, entry, enm->enm_addrlo);
1223 camvalid |= (1U << entry);
1224 entry++;
1225
1226 ETHER_NEXT_MULTI(step, enm);
1227 }
1228
1229 ifp->if_flags &= ~IFF_ALLMULTI;
1230 goto setit;
1231
1232 allmulti:
1233 /* Use only the first CAM slot (station address). */
1234 camvalid = 0x0001;
1235 entry = 1;
1236 rcr |= RCR_AMC;
1237
1238 setit:
1239 /* Load the CAM. */
1240 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1241 CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1242 CSR_WRITE(sc, SONIC_CDC, entry);
1243 CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1244 for (i = 0; i < 10000; i++) {
1245 if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1246 break;
1247 delay(2);
1248 }
1249 if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1250 printf("%s: CAM load failed\n", sc->sc_dev.dv_xname);
1251 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1252
1253 /* Set the CAM enable resgiter. */
1254 CSR_WRITE(sc, SONIC_CER, camvalid);
1255
1256 /* Set the receive control register. */
1257 CSR_WRITE(sc, SONIC_RCR, rcr);
1258 }
1259