sgec.c revision 1.41 1 /* $NetBSD: sgec.c,v 1.41 2015/08/30 04:02:06 dholland Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34 * on for example the VAX 4000/300 (KA670).
35 *
36 * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37 *
38 * Even though the chip is capable to use virtual addresses (read the
39 * System Page Table directly) this driver doesn't do so, and there
40 * is no benefit in doing it either in NetBSD of today.
41 *
42 * Things that is still to do:
43 * Collect statistics.
44 * Use imperfect filtering when many multicast addresses.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.41 2015/08/30 04:02:06 dholland Exp $");
49
50 #include "opt_inet.h"
51
52 #include <sys/param.h>
53 #include <sys/mbuf.h>
54 #include <sys/socket.h>
55 #include <sys/device.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58
59 #include <net/if.h>
60 #include <net/if_ether.h>
61 #include <net/if_dl.h>
62
63 #include <netinet/in.h>
64 #include <netinet/if_inarp.h>
65
66 #include <net/bpf.h>
67 #include <net/bpfdesc.h>
68
69 #include <sys/bus.h>
70
71 #include <dev/ic/sgecreg.h>
72 #include <dev/ic/sgecvar.h>
73
74 static void zeinit(struct ze_softc *);
75 static void zestart(struct ifnet *);
76 static int zeioctl(struct ifnet *, u_long, void *);
77 static int ze_add_rxbuf(struct ze_softc *, int);
78 static void ze_setup(struct ze_softc *);
79 static void zetimeout(struct ifnet *);
80 static bool zereset(struct ze_softc *);
81
82 #define ZE_WCSR(csr, val) \
83 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
84 #define ZE_RCSR(csr) \
85 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
86
87 /*
88 * Interface exists: make available by filling in network interface
89 * record. System will initialize the interface when it is ready
90 * to accept packets.
91 */
92 void
93 sgec_attach(struct ze_softc *sc)
94 {
95 struct ifnet *ifp = &sc->sc_if;
96 struct ze_tdes *tp;
97 struct ze_rdes *rp;
98 bus_dma_segment_t seg;
99 int i, rseg, error;
100
101 /*
102 * Allocate DMA safe memory for descriptors and setup memory.
103 */
104 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
105 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
106 if (error) {
107 aprint_error(": unable to allocate control data, error = %d\n",
108 error);
109 goto fail_0;
110 }
111
112 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
113 (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
114 if (error) {
115 aprint_error(
116 ": unable to map control data, error = %d\n", error);
117 goto fail_1;
118 }
119
120 error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
121 sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
122 if (error) {
123 aprint_error(
124 ": unable to create control data DMA map, error = %d\n",
125 error);
126 goto fail_2;
127 }
128
129 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
130 sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
131 if (error) {
132 aprint_error(
133 ": unable to load control data DMA map, error = %d\n",
134 error);
135 goto fail_3;
136 }
137
138 /*
139 * Zero the newly allocated memory.
140 */
141 memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
142
143 /*
144 * Create the transmit descriptor DMA maps.
145 */
146 for (i = 0; error == 0 && i < TXDESCS; i++) {
147 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
148 TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
149 &sc->sc_xmtmap[i]);
150 }
151 if (error) {
152 aprint_error(": unable to create tx DMA map %d, error = %d\n",
153 i, error);
154 goto fail_4;
155 }
156
157 /*
158 * Create receive buffer DMA maps.
159 */
160 for (i = 0; error == 0 && i < RXDESCS; i++) {
161 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
162 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
163 }
164 if (error) {
165 aprint_error(": unable to create rx DMA map %d, error = %d\n",
166 i, error);
167 goto fail_5;
168 }
169
170 /*
171 * Pre-allocate the receive buffers.
172 */
173 for (i = 0; error == 0 && i < RXDESCS; i++) {
174 error = ze_add_rxbuf(sc, i);
175 }
176
177 if (error) {
178 aprint_error(
179 ": unable to allocate or map rx buffer %d, error = %d\n",
180 i, error);
181 goto fail_6;
182 }
183
184 /* For vmstat -i
185 */
186 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
187 device_xname(sc->sc_dev), "intr");
188 evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
189 &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
190 evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
191 &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
192 evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
193 &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
194 evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
195 &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
196 evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
197 &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");
198
199 /*
200 * Create ring loops of the buffer chains.
201 * This is only done once.
202 */
203 sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
204
205 rp = sc->sc_zedata->zc_recv;
206 rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
207 rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
208 rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
209
210 tp = sc->sc_zedata->zc_xmit;
211 tp[TXDESCS].ze_tdr = ZE_TDR_OW;
212 tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
213 tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
214
215 if (zereset(sc))
216 return;
217
218 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
219 ifp->if_softc = sc;
220 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
221 ifp->if_start = zestart;
222 ifp->if_ioctl = zeioctl;
223 ifp->if_watchdog = zetimeout;
224 IFQ_SET_READY(&ifp->if_snd);
225
226 /*
227 * Attach the interface.
228 */
229 if_attach(ifp);
230 ether_ifattach(ifp, sc->sc_enaddr);
231
232 aprint_normal("\n");
233 aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
234 ether_sprintf(sc->sc_enaddr));
235 return;
236
237 /*
238 * Free any resources we've allocated during the failed attach
239 * attempt. Do this in reverse order and fall through.
240 */
241 fail_6:
242 for (i = 0; i < RXDESCS; i++) {
243 if (sc->sc_rxmbuf[i] != NULL) {
244 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
245 m_freem(sc->sc_rxmbuf[i]);
246 }
247 }
248 fail_5:
249 for (i = 0; i < TXDESCS; i++) {
250 if (sc->sc_xmtmap[i] != NULL)
251 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
252 }
253 fail_4:
254 for (i = 0; i < RXDESCS; i++) {
255 if (sc->sc_rcvmap[i] != NULL)
256 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
257 }
258 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
259 fail_3:
260 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
261 fail_2:
262 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
263 sizeof(struct ze_cdata));
264 fail_1:
265 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
266 fail_0:
267 return;
268 }
269
270 /*
271 * Initialization of interface.
272 */
273 void
274 zeinit(struct ze_softc *sc)
275 {
276 struct ifnet *ifp = &sc->sc_if;
277 struct ze_cdata *zc = sc->sc_zedata;
278 int i;
279
280 /*
281 * Reset the interface.
282 */
283 if (zereset(sc))
284 return;
285
286 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
287 /*
288 * Release and init transmit descriptors.
289 */
290 for (i = 0; i < TXDESCS; i++) {
291 if (sc->sc_xmtmap[i]->dm_nsegs > 0)
292 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
293 if (sc->sc_txmbuf[i]) {
294 m_freem(sc->sc_txmbuf[i]);
295 sc->sc_txmbuf[i] = 0;
296 }
297 zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
298 }
299
300
301 /*
302 * Init receive descriptors.
303 */
304 for (i = 0; i < RXDESCS; i++)
305 zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
306 sc->sc_nextrx = 0;
307
308 ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
309 ZE_NICSR6_SR|ZE_NICSR6_DC);
310
311 ifp->if_flags |= IFF_RUNNING;
312 ifp->if_flags &= ~IFF_OACTIVE;
313
314 /*
315 * Send a setup frame.
316 * This will start the transmit machinery as well.
317 */
318 ze_setup(sc);
319
320 }
321
322 /*
323 * Start output on interface.
324 */
325 void
326 zestart(struct ifnet *ifp)
327 {
328 struct ze_softc *sc = ifp->if_softc;
329 struct ze_cdata *zc = sc->sc_zedata;
330 paddr_t buffer;
331 struct mbuf *m;
332 int nexttx, starttx;
333 int len, i, totlen, error;
334 int old_inq = sc->sc_inq;
335 uint16_t orword, tdr = 0;
336 bus_dmamap_t map;
337
338 while (sc->sc_inq < (TXDESCS - 1)) {
339
340 if (sc->sc_setup) {
341 ze_setup(sc);
342 continue;
343 }
344 nexttx = sc->sc_nexttx;
345 IFQ_POLL(&sc->sc_if.if_snd, m);
346 if (m == 0)
347 goto out;
348 /*
349 * Count number of mbufs in chain.
350 * Always do DMA directly from mbufs, therefore the transmit
351 * ring is really big.
352 */
353 map = sc->sc_xmtmap[nexttx];
354 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
355 BUS_DMA_WRITE);
356 if (error) {
357 aprint_error_dev(sc->sc_dev,
358 "zestart: load_mbuf failed: %d", error);
359 goto out;
360 }
361
362 if (map->dm_nsegs >= TXDESCS)
363 panic("zestart"); /* XXX */
364
365 if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
366 bus_dmamap_unload(sc->sc_dmat, map);
367 ifp->if_flags |= IFF_OACTIVE;
368 goto out;
369 }
370
371 /*
372 * m now points to a mbuf chain that can be loaded.
373 * Loop around and set it.
374 */
375 totlen = 0;
376 orword = ZE_TDES1_FS;
377 starttx = nexttx;
378 for (i = 0; i < map->dm_nsegs; i++) {
379 buffer = map->dm_segs[i].ds_addr;
380 len = map->dm_segs[i].ds_len;
381
382 KASSERT(len > 0);
383
384 totlen += len;
385 /* Word alignment calc */
386 if (totlen == m->m_pkthdr.len) {
387 sc->sc_txcnt += map->dm_nsegs;
388 if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
389 orword |= ZE_TDES1_IC;
390 sc->sc_txcnt = 0;
391 }
392 orword |= ZE_TDES1_LS;
393 sc->sc_txmbuf[nexttx] = m;
394 }
395 zc->zc_xmit[nexttx].ze_bufsize = len;
396 zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
397 zc->zc_xmit[nexttx].ze_tdes1 = orword;
398 zc->zc_xmit[nexttx].ze_tdr = tdr;
399
400 if (++nexttx == TXDESCS)
401 nexttx = 0;
402 orword = 0;
403 tdr = ZE_TDR_OW;
404 }
405
406 sc->sc_inq += map->dm_nsegs;
407
408 IFQ_DEQUEUE(&ifp->if_snd, m);
409 #ifdef DIAGNOSTIC
410 if (totlen != m->m_pkthdr.len)
411 panic("zestart: len fault");
412 #endif
413 /*
414 * Turn ownership of the packet over to the device.
415 */
416 zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
417
418 /*
419 * Kick off the transmit logic, if it is stopped.
420 */
421 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
422 ZE_WCSR(ZE_CSR1, -1);
423 sc->sc_nexttx = nexttx;
424 }
425 if (sc->sc_inq == (TXDESCS - 1))
426 ifp->if_flags |= IFF_OACTIVE;
427
428 out: if (old_inq < sc->sc_inq)
429 ifp->if_timer = 5; /* If transmit logic dies */
430 }
431
432 int
433 sgec_intr(struct ze_softc *sc)
434 {
435 struct ze_cdata *zc = sc->sc_zedata;
436 struct ifnet *ifp = &sc->sc_if;
437 struct mbuf *m;
438 int csr, len;
439
440 csr = ZE_RCSR(ZE_CSR5);
441 if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
442 sc->sc_nointrcnt.ev_count++;
443 return 0;
444 }
445 ZE_WCSR(ZE_CSR5, csr);
446
447 if (csr & ZE_NICSR5_RU)
448 sc->sc_nobufintrcnt.ev_count++;
449
450 if (csr & ZE_NICSR5_RI) {
451 sc->sc_rxintrcnt.ev_count++;
452 while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
453 ZE_FRAMELEN_OW) == 0) {
454
455 ifp->if_ipackets++;
456 m = sc->sc_rxmbuf[sc->sc_nextrx];
457 len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
458 ze_add_rxbuf(sc, sc->sc_nextrx);
459 if (++sc->sc_nextrx == RXDESCS)
460 sc->sc_nextrx = 0;
461 if (len < ETHER_MIN_LEN) {
462 ifp->if_ierrors++;
463 m_freem(m);
464 } else {
465 m->m_pkthdr.rcvif = ifp;
466 m->m_pkthdr.len = m->m_len =
467 len - ETHER_CRC_LEN;
468 bpf_mtap(ifp, m);
469 (*ifp->if_input)(ifp, m);
470 }
471 }
472 }
473
474 if (csr & ZE_NICSR5_TI)
475 sc->sc_txintrcnt.ev_count++;
476 if (sc->sc_lastack != sc->sc_nexttx) {
477 int lastack;
478 for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
479 bus_dmamap_t map;
480 int nlastack;
481
482 if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
483 break;
484
485 if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
486 ZE_TDES1_DT_SETUP) {
487 if (++lastack == TXDESCS)
488 lastack = 0;
489 sc->sc_inq--;
490 continue;
491 }
492
493 KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
494 map = sc->sc_xmtmap[lastack];
495 KASSERT(map->dm_nsegs > 0);
496 nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
497 if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
498 break;
499 lastack = nlastack;
500 if (sc->sc_txcnt > map->dm_nsegs)
501 sc->sc_txcnt -= map->dm_nsegs;
502 else
503 sc->sc_txcnt = 0;
504 sc->sc_inq -= map->dm_nsegs;
505 KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
506 ifp->if_opackets++;
507 bus_dmamap_unload(sc->sc_dmat, map);
508 KASSERT(sc->sc_txmbuf[lastack]);
509 bpf_mtap(ifp, sc->sc_txmbuf[lastack]);
510 m_freem(sc->sc_txmbuf[lastack]);
511 sc->sc_txmbuf[lastack] = 0;
512 if (++lastack == TXDESCS)
513 lastack = 0;
514 }
515 if (lastack != sc->sc_lastack) {
516 sc->sc_txdraincnt.ev_count++;
517 sc->sc_lastack = lastack;
518 if (sc->sc_inq == 0)
519 ifp->if_timer = 0;
520 ifp->if_flags &= ~IFF_OACTIVE;
521 zestart(ifp); /* Put in more in queue */
522 }
523 }
524 return 1;
525 }
526
527 /*
528 * Process an ioctl request.
529 */
530 int
531 zeioctl(struct ifnet *ifp, u_long cmd, void *data)
532 {
533 struct ze_softc *sc = ifp->if_softc;
534 struct ifaddr *ifa = data;
535 int s = splnet(), error = 0;
536
537 switch (cmd) {
538
539 case SIOCINITIFADDR:
540 ifp->if_flags |= IFF_UP;
541 switch(ifa->ifa_addr->sa_family) {
542 #ifdef INET
543 case AF_INET:
544 zeinit(sc);
545 arp_ifinit(ifp, ifa);
546 break;
547 #endif
548 }
549 break;
550
551 case SIOCSIFFLAGS:
552 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
553 break;
554 /* XXX re-use ether_ioctl() */
555 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
556 case IFF_RUNNING:
557 /*
558 * If interface is marked down and it is running,
559 * stop it. (by disabling receive mechanism).
560 */
561 ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
562 ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
563 ifp->if_flags &= ~IFF_RUNNING;
564 break;
565 case IFF_UP:
566 /*
567 * If interface it marked up and it is stopped, then
568 * start it.
569 */
570 zeinit(sc);
571 break;
572 case IFF_UP|IFF_RUNNING:
573 /*
574 * Send a new setup packet to match any new changes.
575 * (Like IFF_PROMISC etc)
576 */
577 ze_setup(sc);
578 break;
579 case 0:
580 break;
581 }
582 break;
583
584 case SIOCADDMULTI:
585 case SIOCDELMULTI:
586 /*
587 * Update our multicast list.
588 */
589 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
590 /*
591 * Multicast list has changed; set the hardware filter
592 * accordingly.
593 */
594 if (ifp->if_flags & IFF_RUNNING)
595 ze_setup(sc);
596 error = 0;
597 }
598 break;
599
600 default:
601 error = ether_ioctl(ifp, cmd, data);
602
603 }
604 splx(s);
605 return (error);
606 }
607
608 /*
609 * Add a receive buffer to the indicated descriptor.
610 */
611 int
612 ze_add_rxbuf(struct ze_softc *sc, int i)
613 {
614 struct mbuf *m;
615 struct ze_rdes *rp;
616 int error;
617
618 MGETHDR(m, M_DONTWAIT, MT_DATA);
619 if (m == NULL)
620 return (ENOBUFS);
621
622 MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
623 MCLGET(m, M_DONTWAIT);
624 if ((m->m_flags & M_EXT) == 0) {
625 m_freem(m);
626 return (ENOBUFS);
627 }
628
629 if (sc->sc_rxmbuf[i] != NULL)
630 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
631
632 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
633 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
634 BUS_DMA_READ|BUS_DMA_NOWAIT);
635 if (error)
636 panic("%s: can't load rx DMA map %d, error = %d",
637 device_xname(sc->sc_dev), i, error);
638 sc->sc_rxmbuf[i] = m;
639
640 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
641 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
642
643 /*
644 * We know that the mbuf cluster is page aligned. Also, be sure
645 * that the IP header will be longword aligned.
646 */
647 m->m_data += 2;
648 rp = &sc->sc_zedata->zc_recv[i];
649 rp->ze_bufsize = (m->m_ext.ext_size - 2);
650 rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
651 rp->ze_framelen = ZE_FRAMELEN_OW;
652
653 return (0);
654 }
655
656 /*
657 * Create a setup packet and put in queue for sending.
658 */
659 void
660 ze_setup(struct ze_softc *sc)
661 {
662 struct ether_multi *enm;
663 struct ether_multistep step;
664 struct ze_cdata *zc = sc->sc_zedata;
665 struct ifnet *ifp = &sc->sc_if;
666 const u_int8_t *enaddr = CLLADDR(ifp->if_sadl);
667 int j, idx, reg;
668
669 if (sc->sc_inq == (TXDESCS - 1)) {
670 sc->sc_setup = 1;
671 return;
672 }
673 sc->sc_setup = 0;
674 /*
675 * Init the setup packet with valid info.
676 */
677 memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
678 memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
679
680 /*
681 * Multicast handling. The SGEC can handle up to 16 direct
682 * ethernet addresses.
683 */
684 j = 16;
685 ifp->if_flags &= ~IFF_ALLMULTI;
686 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
687 while (enm != NULL) {
688 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
689 ifp->if_flags |= IFF_ALLMULTI;
690 break;
691 }
692 memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
693 j += 8;
694 ETHER_NEXT_MULTI(step, enm);
695 if ((enm != NULL)&& (j == 128)) {
696 ifp->if_flags |= IFF_ALLMULTI;
697 break;
698 }
699 }
700
701 /*
702 * ALLMULTI implies PROMISC in this driver.
703 */
704 if (ifp->if_flags & IFF_ALLMULTI)
705 ifp->if_flags |= IFF_PROMISC;
706 else if (ifp->if_pcount == 0)
707 ifp->if_flags &= ~IFF_PROMISC;
708
709 /*
710 * Fiddle with the receive logic.
711 */
712 reg = ZE_RCSR(ZE_CSR6);
713 DELAY(10);
714 ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
715 reg &= ~ZE_NICSR6_AF;
716 if (ifp->if_flags & IFF_PROMISC)
717 reg |= ZE_NICSR6_AF_PROM;
718 else if (ifp->if_flags & IFF_ALLMULTI)
719 reg |= ZE_NICSR6_AF_ALLM;
720 DELAY(10);
721 ZE_WCSR(ZE_CSR6, reg);
722 /*
723 * Only send a setup packet if needed.
724 */
725 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
726 idx = sc->sc_nexttx;
727 zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
728 zc->zc_xmit[idx].ze_bufsize = 128;
729 zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
730 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
731
732 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
733 ZE_WCSR(ZE_CSR1, -1);
734
735 sc->sc_inq++;
736 if (++sc->sc_nexttx == TXDESCS)
737 sc->sc_nexttx = 0;
738 }
739 }
740
741 /*
742 * Check for dead transmit logic.
743 */
744 void
745 zetimeout(struct ifnet *ifp)
746 {
747 struct ze_softc *sc = ifp->if_softc;
748
749 if (sc->sc_inq == 0)
750 return;
751
752 aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
753 /*
754 * Do a reset of interface, to get it going again.
755 * Will it work by just restart the transmit logic?
756 */
757 zeinit(sc);
758 }
759
760 /*
761 * Reset chip:
762 * Set/reset the reset flag.
763 * Write interrupt vector.
764 * Write ring buffer addresses.
765 * Write SBR.
766 */
767 bool
768 zereset(struct ze_softc *sc)
769 {
770 int reg, i;
771
772 ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
773 DELAY(50000);
774 if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
775 aprint_error_dev(sc->sc_dev, "selftest failed\n");
776 return true;
777 }
778
779 /*
780 * Get the vector that were set at match time, and remember it.
781 * WHICH VECTOR TO USE? Take one unused. XXX
782 * Funny way to set vector described in the programmers manual.
783 */
784 reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
785 i = 10;
786 do {
787 if (i-- == 0) {
788 aprint_error_dev(sc->sc_dev,
789 "failing SGEC CSR0 init\n");
790 return true;
791 }
792 ZE_WCSR(ZE_CSR0, reg);
793 } while (ZE_RCSR(ZE_CSR0) != reg);
794
795 ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
796 ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
797 return false;
798 }
799