if_qe.c revision 1.40 1 /* $NetBSD: if_qe.c,v 1.40 2000/03/30 12:45:37 augustss Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 * Have a timeout check for hang transmit logic.
36 * Handle ubaresets. Does not work at all right now.
37 * Fix ALLMULTI reception. But someone must tell me how...
38 * Collect statistics.
39 */
40
41 #include "opt_inet.h"
42 #include "bpfilter.h"
43
44 #include <sys/param.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/device.h>
48 #include <sys/systm.h>
49 #include <sys/sockio.h>
50
51 #include <net/if.h>
52 #include <net/if_ether.h>
53 #include <net/if_dl.h>
54
55 #include <netinet/in.h>
56 #include <netinet/if_inarp.h>
57
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #include <net/bpfdesc.h>
61 #endif
62
63 #include <machine/bus.h>
64
65 #include <dev/qbus/ubavar.h>
66 #include <dev/qbus/if_qereg.h>
67
68 #include "ioconf.h"
69
70 #define RXDESCS 30 /* # of receive descriptors */
71 #define TXDESCS 60 /* # transmit descs */
72
73 /*
74 * Structure containing the elements that must be in DMA-safe memory.
75 */
76 struct qe_cdata {
77 struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
78 struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
79 u_int8_t qc_setup[128]; /* Setup packet layout */
80 };
81
82 struct qe_softc {
83 struct device sc_dev; /* Configuration common part */
84 struct ethercom sc_ec; /* Ethernet common part */
85 #define sc_if sc_ec.ec_if /* network-visible interface */
86 bus_space_tag_t sc_iot;
87 bus_addr_t sc_ioh;
88 bus_dma_tag_t sc_dmat;
89 struct qe_cdata *sc_qedata; /* Descriptor struct */
90 struct qe_cdata *sc_pqedata; /* Unibus address of above */
91 bus_dmamap_t sc_cmap; /* Map for control structures */
92 struct mbuf* sc_txmbuf[TXDESCS];
93 struct mbuf* sc_rxmbuf[RXDESCS];
94 bus_dmamap_t sc_xmtmap[TXDESCS];
95 bus_dmamap_t sc_rcvmap[RXDESCS];
96 int sc_intvec; /* Interrupt vector */
97 int sc_nexttx;
98 int sc_inq;
99 int sc_lastack;
100 int sc_nextrx;
101 int sc_setup; /* Setup packet in queue */
102 };
103
104 static int qematch __P((struct device *, struct cfdata *, void *));
105 static void qeattach __P((struct device *, struct device *, void *));
106 static void qeinit __P((struct qe_softc *));
107 static void qestart __P((struct ifnet *));
108 static void qeintr __P((void *));
109 static int qeioctl __P((struct ifnet *, u_long, caddr_t));
110 static int qe_add_rxbuf __P((struct qe_softc *, int));
111 static void qe_setup __P((struct qe_softc *));
112 static void qetimeout __P((struct ifnet *));
113
114 struct cfattach qe_ca = {
115 sizeof(struct qe_softc), qematch, qeattach
116 };
117
118 #define QE_WCSR(csr, val) \
119 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
120 #define QE_RCSR(csr) \
121 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
122
123 #define LOWORD(x) ((int)(x) & 0xffff)
124 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
125
126 /*
127 * Check for present DEQNA. Done by sending a fake setup packet
128 * and wait for interrupt.
129 */
130 int
131 qematch(parent, cf, aux)
132 struct device *parent;
133 struct cfdata *cf;
134 void *aux;
135 {
136 bus_dmamap_t cmap;
137 struct qe_softc ssc;
138 struct qe_softc *sc = &ssc;
139 struct uba_attach_args *ua = aux;
140 struct uba_softc *ubasc = (struct uba_softc *)parent;
141
142 #define PROBESIZE (sizeof(struct qe_ring) * 4 + 128)
143 struct qe_ring ring[15]; /* For diag purposes only */
144 struct qe_ring *rp;
145 int error;
146
147 bzero(sc, sizeof(struct qe_softc));
148 bzero(ring, PROBESIZE);
149 sc->sc_iot = ua->ua_iot;
150 sc->sc_ioh = ua->ua_ioh;
151 sc->sc_dmat = ua->ua_dmat;
152
153 ubasc->uh_lastiv -= 4;
154 QE_WCSR(QE_CSR_CSR, QE_RESET);
155 QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
156
157 /*
158 * Map the ring area. Actually this is done only to be able to
159 * send and receive a internal packet; some junk is loopbacked
160 * so that the DEQNA has a reason to interrupt.
161 */
162 if ((error = bus_dmamap_create(sc->sc_dmat, PROBESIZE, 1, PROBESIZE, 0,
163 BUS_DMA_NOWAIT, &cmap))) {
164 printf("qematch: bus_dmamap_create failed = %d\n", error);
165 return 0;
166 }
167 if ((error = bus_dmamap_load(sc->sc_dmat, cmap, ring, PROBESIZE, 0,
168 BUS_DMA_NOWAIT))) {
169 printf("qematch: bus_dmamap_load failed = %d\n", error);
170 bus_dmamap_destroy(sc->sc_dmat, cmap);
171 return 0;
172 }
173
174 /*
175 * Init a simple "fake" receive and transmit descriptor that
176 * points to some unused area. Send a fake setup packet.
177 */
178 rp = (void *)cmap->dm_segs[0].ds_addr;
179 ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
180 ring[0].qe_addr_lo = LOWORD(&rp[4]);
181 ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
182 ring[0].qe_buf_len = 128;
183
184 ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
185 ring[2].qe_addr_lo = LOWORD(&rp[4]);
186 ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
187 ring[2].qe_buf_len = 128;
188
189 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
190 DELAY(1000);
191
192 /*
193 * Start the interface and wait for the packet.
194 */
195 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
196 QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
197 QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
198 QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
199 QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
200 DELAY(10000);
201
202 /*
203 * All done with the bus resources.
204 */
205 bus_dmamap_unload(sc->sc_dmat, cmap);
206 bus_dmamap_destroy(sc->sc_dmat, cmap);
207 return 1;
208 }
209
210 /*
211 * Interface exists: make available by filling in network interface
212 * record. System will initialize the interface when it is ready
213 * to accept packets.
214 */
215 void
216 qeattach(parent, self, aux)
217 struct device *parent, *self;
218 void *aux;
219 {
220 struct uba_attach_args *ua = aux;
221 struct uba_softc *ubasc = (struct uba_softc *)parent;
222 struct qe_softc *sc = (struct qe_softc *)self;
223 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
224 struct qe_ring *rp;
225 u_int8_t enaddr[ETHER_ADDR_LEN];
226 bus_dma_segment_t seg;
227 int i, rseg, error;
228
229 sc->sc_iot = ua->ua_iot;
230 sc->sc_ioh = ua->ua_ioh;
231 sc->sc_dmat = ua->ua_dmat;
232
233 /*
234 * Allocate DMA safe memory for descriptors and setup memory.
235 */
236 if ((error = bus_dmamem_alloc(sc->sc_dmat,
237 sizeof(struct qe_cdata), NBPG, 0, &seg, 1, &rseg,
238 BUS_DMA_NOWAIT)) != 0) {
239 printf(": unable to allocate control data, error = %d\n",
240 error);
241 goto fail_0;
242 }
243
244 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
245 sizeof(struct qe_cdata), (caddr_t *)&sc->sc_qedata,
246 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
247 printf(": unable to map control data, error = %d\n", error);
248 goto fail_1;
249 }
250
251 if ((error = bus_dmamap_create(sc->sc_dmat,
252 sizeof(struct qe_cdata), 1,
253 sizeof(struct qe_cdata), 0, BUS_DMA_NOWAIT,
254 &sc->sc_cmap)) != 0) {
255 printf(": unable to create control data DMA map, error = %d\n",
256 error);
257 goto fail_2;
258 }
259
260 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
261 sc->sc_qedata, sizeof(struct qe_cdata), NULL,
262 BUS_DMA_NOWAIT)) != 0) {
263 printf(": unable to load control data DMA map, error = %d\n",
264 error);
265 goto fail_3;
266 }
267
268 /*
269 * Zero the newly allocated memory.
270 */
271 bzero(sc->sc_qedata, sizeof(struct qe_cdata));
272 /*
273 * Create the transmit descriptor DMA maps. We take advantage
274 * of the fact that the Qbus address space is big, and therefore
275 * allocate map registers for all transmit descriptors also,
276 * so that we can avoid this each time we send a packet.
277 */
278 for (i = 0; i < TXDESCS; i++) {
279 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
280 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
281 &sc->sc_xmtmap[i]))) {
282 printf(": unable to create tx DMA map %d, error = %d\n",
283 i, error);
284 goto fail_4;
285 }
286 }
287
288 /*
289 * Create receive buffer DMA maps.
290 */
291 for (i = 0; i < RXDESCS; i++) {
292 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
293 MCLBYTES, 0, BUS_DMA_NOWAIT,
294 &sc->sc_rcvmap[i]))) {
295 printf(": unable to create rx DMA map %d, error = %d\n",
296 i, error);
297 goto fail_5;
298 }
299 }
300 /*
301 * Pre-allocate the receive buffers.
302 */
303 for (i = 0; i < RXDESCS; i++) {
304 if ((error = qe_add_rxbuf(sc, i)) != 0) {
305 printf(": unable to allocate or map rx buffer %d\n,"
306 " error = %d\n", i, error);
307 goto fail_6;
308 }
309 }
310
311 /*
312 * Create ring loops of the buffer chains.
313 * This is only done once.
314 */
315 sc->sc_pqedata = (struct qe_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
316
317 rp = sc->sc_qedata->qc_recv;
318 rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
319 rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
320 QE_VALID | QE_CHAIN;
321 rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
322
323 rp = sc->sc_qedata->qc_xmit;
324 rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
325 rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
326 QE_VALID | QE_CHAIN;
327 rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
328
329 /*
330 * Get the vector that were set at match time, and remember it.
331 */
332 sc->sc_intvec = ubasc->uh_lastiv;
333 QE_WCSR(QE_CSR_CSR, QE_RESET);
334 DELAY(1000);
335 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
336
337 /*
338 * Read out ethernet address and tell which type this card is.
339 */
340 for (i = 0; i < 6; i++)
341 enaddr[i] = QE_RCSR(i * 2) & 0xff;
342
343 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
344 printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
345 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
346 ether_sprintf(enaddr));
347
348 QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
349
350 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr, sc);
351
352 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
353 ifp->if_softc = sc;
354 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
355 ifp->if_start = qestart;
356 ifp->if_ioctl = qeioctl;
357 ifp->if_watchdog = qetimeout;
358
359 /*
360 * Attach the interface.
361 */
362 if_attach(ifp);
363 ether_ifattach(ifp, enaddr);
364
365 #if NBPFILTER > 0
366 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
367 #endif
368 return;
369
370 /*
371 * Free any resources we've allocated during the failed attach
372 * attempt. Do this in reverse order and fall through.
373 */
374 fail_6:
375 for (i = 0; i < RXDESCS; i++) {
376 if (sc->sc_rxmbuf[i] != NULL) {
377 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
378 m_freem(sc->sc_rxmbuf[i]);
379 }
380 }
381 fail_5:
382 for (i = 0; i < RXDESCS; i++) {
383 if (sc->sc_xmtmap[i] != NULL)
384 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
385 }
386 fail_4:
387 for (i = 0; i < TXDESCS; i++) {
388 if (sc->sc_rcvmap[i] != NULL)
389 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
390 }
391 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
392 fail_3:
393 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
394 fail_2:
395 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_qedata,
396 sizeof(struct qe_cdata));
397 fail_1:
398 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
399 fail_0:
400 return;
401 }
402
403 /*
404 * Initialization of interface.
405 */
406 void
407 qeinit(sc)
408 struct qe_softc *sc;
409 {
410 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
411 struct qe_cdata *qc = sc->sc_qedata;
412 int i;
413
414
415 /*
416 * Reset the interface.
417 */
418 QE_WCSR(QE_CSR_CSR, QE_RESET);
419 DELAY(1000);
420 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
421 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
422
423 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
424 /*
425 * Release and init transmit descriptors.
426 */
427 for (i = 0; i < TXDESCS; i++) {
428 if (sc->sc_txmbuf[i]) {
429 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
430 m_freem(sc->sc_txmbuf[i]);
431 sc->sc_txmbuf[i] = 0;
432 }
433 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
434 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
435 }
436
437
438 /*
439 * Init receive descriptors.
440 */
441 for (i = 0; i < RXDESCS; i++)
442 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
443 sc->sc_nextrx = 0;
444
445 /*
446 * Write the descriptor addresses to the device.
447 * Receiving packets will be enabled in the interrupt routine.
448 */
449 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
450 QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
451 QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
452
453 ifp->if_flags |= IFF_RUNNING;
454 ifp->if_flags &= ~IFF_OACTIVE;
455
456 /*
457 * Send a setup frame.
458 * This will start the transmit machinery as well.
459 */
460 qe_setup(sc);
461
462 }
463
464 /*
465 * Start output on interface.
466 */
467 void
468 qestart(ifp)
469 struct ifnet *ifp;
470 {
471 struct qe_softc *sc = ifp->if_softc;
472 struct qe_cdata *qc = sc->sc_qedata;
473 paddr_t buffer;
474 struct mbuf *m, *m0;
475 int idx, len, s, i, totlen, error;
476 short orword;
477
478 if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
479 return;
480
481 s = splimp();
482 while (sc->sc_inq < (TXDESCS - 1)) {
483
484 if (sc->sc_setup) {
485 qe_setup(sc);
486 continue;
487 }
488 idx = sc->sc_nexttx;
489 IF_DEQUEUE(&sc->sc_if.if_snd, m);
490 if (m == 0)
491 goto out;
492 /*
493 * Count number of mbufs in chain.
494 * Always do DMA directly from mbufs, therefore the transmit
495 * ring is really big.
496 */
497 for (m0 = m, i = 0; m0; m0 = m0->m_next)
498 if (m0->m_len)
499 i++;
500 if (i >= TXDESCS)
501 panic("qestart");
502
503 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
504 IF_PREPEND(&sc->sc_if.if_snd, m);
505 ifp->if_flags |= IFF_OACTIVE;
506 goto out;
507 }
508
509 #if NBPFILTER > 0
510 if (ifp->if_bpf)
511 bpf_mtap(ifp->if_bpf, m);
512 #endif
513 /*
514 * m now points to a mbuf chain that can be loaded.
515 * Loop around and set it.
516 */
517 totlen = 0;
518 for (m0 = m; m0; m0 = m0->m_next) {
519 error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
520 mtod(m0, void *), m0->m_len, 0, 0);
521 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
522 len = m0->m_len;
523 if (len == 0)
524 continue;
525
526 totlen += len;
527 /* Word alignment calc */
528 orword = 0;
529 if (totlen == m->m_pkthdr.len) {
530 if (totlen < ETHER_MIN_LEN)
531 len += (ETHER_MIN_LEN - totlen);
532 orword |= QE_EOMSG;
533 sc->sc_txmbuf[idx] = m;
534 }
535 if ((buffer & 1) || (len & 1))
536 len += 2;
537 if (buffer & 1)
538 orword |= QE_ODDBEGIN;
539 if ((buffer + len) & 1)
540 orword |= QE_ODDEND;
541 qc->qc_xmit[idx].qe_buf_len = -(len/2);
542 qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
543 qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
544 qc->qc_xmit[idx].qe_flag =
545 qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
546 qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
547 if (++idx == TXDESCS)
548 idx = 0;
549 sc->sc_inq++;
550 }
551 #ifdef DIAGNOSTIC
552 if (totlen != m->m_pkthdr.len)
553 panic("qestart: len fault");
554 #endif
555
556 /*
557 * Kick off the transmit logic, if it is stopped.
558 */
559 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
560 QE_WCSR(QE_CSR_XMTL,
561 LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
562 QE_WCSR(QE_CSR_XMTH,
563 HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
564 }
565 sc->sc_nexttx = idx;
566 }
567 if (sc->sc_inq == (TXDESCS - 1))
568 ifp->if_flags |= IFF_OACTIVE;
569
570 out: if (sc->sc_inq)
571 ifp->if_timer = 5; /* If transmit logic dies */
572 splx(s);
573 }
574
575 static void
576 qeintr(arg)
577 void *arg;
578 {
579 struct qe_softc *sc = arg;
580 struct qe_cdata *qc = sc->sc_qedata;
581 struct ifnet *ifp = &sc->sc_if;
582 struct ether_header *eh;
583 struct mbuf *m;
584 int csr, status1, status2, len;
585
586 csr = QE_RCSR(QE_CSR_CSR);
587
588 QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
589 QE_RCV_INT | QE_ILOOP);
590
591 if (csr & QE_RCV_INT)
592 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
593 status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
594 status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
595 m = sc->sc_rxmbuf[sc->sc_nextrx];
596 len = ((status1 & QE_RBL_HI) |
597 (status2 & QE_RBL_LO)) + 60;
598 qe_add_rxbuf(sc, sc->sc_nextrx);
599 m->m_pkthdr.rcvif = ifp;
600 m->m_pkthdr.len = m->m_len = len;
601 if (++sc->sc_nextrx == RXDESCS)
602 sc->sc_nextrx = 0;
603 eh = mtod(m, struct ether_header *);
604 #if NBPFILTER > 0
605 if (ifp->if_bpf) {
606 bpf_mtap(ifp->if_bpf, m);
607 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
608 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
609 ETHER_ADDR_LEN) != 0 &&
610 ((eh->ether_dhost[0] & 1) == 0)) {
611 m_freem(m);
612 continue;
613 }
614 }
615 #endif
616 /*
617 * ALLMULTI means PROMISC in this driver.
618 */
619 if ((ifp->if_flags & IFF_ALLMULTI) &&
620 ((eh->ether_dhost[0] & 1) == 0) &&
621 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
622 ETHER_ADDR_LEN)) {
623 m_freem(m);
624 continue;
625 }
626 (*ifp->if_input)(ifp, m);
627 }
628
629 if (csr & QE_XMIT_INT) {
630 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
631 int idx = sc->sc_lastack;
632
633 sc->sc_inq--;
634 if (++sc->sc_lastack == TXDESCS)
635 sc->sc_lastack = 0;
636
637 /* XXX collect statistics */
638 qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
639 qc->qc_xmit[idx].qe_status1 =
640 qc->qc_xmit[idx].qe_flag = QE_NOTYET;
641
642 if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
643 continue;
644 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
645 if (sc->sc_txmbuf[idx]) {
646 m_freem(sc->sc_txmbuf[idx]);
647 sc->sc_txmbuf[idx] = 0;
648 }
649 }
650 ifp->if_timer = 0;
651 ifp->if_flags &= ~IFF_OACTIVE;
652 qestart(ifp); /* Put in more in queue */
653 }
654 /*
655 * How can the receive list get invalid???
656 * Verified that it happens anyway.
657 */
658 if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
659 (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
660 QE_WCSR(QE_CSR_RCLL,
661 LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
662 QE_WCSR(QE_CSR_RCLH,
663 HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
664 }
665 }
666
667 /*
668 * Process an ioctl request.
669 */
670 int
671 qeioctl(ifp, cmd, data)
672 struct ifnet *ifp;
673 u_long cmd;
674 caddr_t data;
675 {
676 struct qe_softc *sc = ifp->if_softc;
677 struct ifreq *ifr = (struct ifreq *)data;
678 struct ifaddr *ifa = (struct ifaddr *)data;
679 int s = splnet(), error = 0;
680
681 switch (cmd) {
682
683 case SIOCSIFADDR:
684 ifp->if_flags |= IFF_UP;
685 switch(ifa->ifa_addr->sa_family) {
686 #ifdef INET
687 case AF_INET:
688 qeinit(sc);
689 arp_ifinit(ifp, ifa);
690 break;
691 #endif
692 }
693 break;
694
695 case SIOCSIFFLAGS:
696 if ((ifp->if_flags & IFF_UP) == 0 &&
697 (ifp->if_flags & IFF_RUNNING) != 0) {
698 /*
699 * If interface is marked down and it is running,
700 * stop it. (by disabling receive mechanism).
701 */
702 QE_WCSR(QE_CSR_CSR,
703 QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
704 ifp->if_flags &= ~IFF_RUNNING;
705 } else if ((ifp->if_flags & IFF_UP) != 0 &&
706 (ifp->if_flags & IFF_RUNNING) == 0) {
707 /*
708 * If interface it marked up and it is stopped, then
709 * start it.
710 */
711 qeinit(sc);
712 } else if ((ifp->if_flags & IFF_UP) != 0) {
713 /*
714 * Send a new setup packet to match any new changes.
715 * (Like IFF_PROMISC etc)
716 */
717 qe_setup(sc);
718 }
719 break;
720
721 case SIOCADDMULTI:
722 case SIOCDELMULTI:
723 /*
724 * Update our multicast list.
725 */
726 error = (cmd == SIOCADDMULTI) ?
727 ether_addmulti(ifr, &sc->sc_ec):
728 ether_delmulti(ifr, &sc->sc_ec);
729
730 if (error == ENETRESET) {
731 /*
732 * Multicast list has changed; set the hardware filter
733 * accordingly.
734 */
735 qe_setup(sc);
736 error = 0;
737 }
738 break;
739
740 default:
741 error = EINVAL;
742
743 }
744 splx(s);
745 return (error);
746 }
747
748 /*
749 * Add a receive buffer to the indicated descriptor.
750 */
751 int
752 qe_add_rxbuf(sc, i)
753 struct qe_softc *sc;
754 int i;
755 {
756 struct mbuf *m;
757 struct qe_ring *rp;
758 vaddr_t addr;
759 int error;
760
761 MGETHDR(m, M_DONTWAIT, MT_DATA);
762 if (m == NULL)
763 return (ENOBUFS);
764
765 MCLGET(m, M_DONTWAIT);
766 if ((m->m_flags & M_EXT) == 0) {
767 m_freem(m);
768 return (ENOBUFS);
769 }
770
771 if (sc->sc_rxmbuf[i] != NULL)
772 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
773
774 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
775 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
776 if (error)
777 panic("%s: can't load rx DMA map %d, error = %d\n",
778 sc->sc_dev.dv_xname, i, error);
779 sc->sc_rxmbuf[i] = m;
780
781 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
782 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
783
784 /*
785 * We know that the mbuf cluster is page aligned. Also, be sure
786 * that the IP header will be longword aligned.
787 */
788 m->m_data += 2;
789 addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
790 rp = &sc->sc_qedata->qc_recv[i];
791 rp->qe_flag = rp->qe_status1 = QE_NOTYET;
792 rp->qe_addr_lo = LOWORD(addr);
793 rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
794 rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
795
796 return (0);
797 }
798
799 /*
800 * Create a setup packet and put in queue for sending.
801 */
802 void
803 qe_setup(sc)
804 struct qe_softc *sc;
805 {
806 struct ether_multi *enm;
807 struct ether_multistep step;
808 struct qe_cdata *qc = sc->sc_qedata;
809 struct ifnet *ifp = &sc->sc_if;
810 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
811 int i, j, k, idx, s;
812
813 s = splimp();
814 if (sc->sc_inq == (TXDESCS - 1)) {
815 sc->sc_setup = 1;
816 splx(s);
817 return;
818 }
819 sc->sc_setup = 0;
820 /*
821 * Init the setup packet with valid info.
822 */
823 memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
824 for (i = 0; i < ETHER_ADDR_LEN; i++)
825 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
826
827 /*
828 * Multicast handling. The DEQNA can handle up to 12 direct
829 * ethernet addresses.
830 */
831 j = 3; k = 0;
832 ifp->if_flags &= ~IFF_ALLMULTI;
833 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
834 while (enm != NULL) {
835 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
836 ifp->if_flags |= IFF_ALLMULTI;
837 break;
838 }
839 for (i = 0; i < ETHER_ADDR_LEN; i++)
840 qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
841 j++;
842 if (j == 8) {
843 j = 1; k += 64;
844 }
845 if (k > 64) {
846 ifp->if_flags |= IFF_ALLMULTI;
847 break;
848 }
849 ETHER_NEXT_MULTI(step, enm);
850 }
851 idx = sc->sc_nexttx;
852 qc->qc_xmit[idx].qe_buf_len = -64;
853
854 /*
855 * How is the DEQNA turned in ALLMULTI mode???
856 * Until someone tells me, fall back to PROMISC when more than
857 * 12 ethernet addresses.
858 */
859 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
860 qc->qc_xmit[idx].qe_buf_len = -65;
861
862 qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
863 qc->qc_xmit[idx].qe_addr_hi =
864 HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
865 qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
866 qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
867
868 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
869 QE_WCSR(QE_CSR_XMTL,
870 LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
871 QE_WCSR(QE_CSR_XMTH,
872 HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
873 }
874
875 sc->sc_inq++;
876 if (++sc->sc_nexttx == TXDESCS)
877 sc->sc_nexttx = 0;
878 splx(s);
879 }
880
881 /*
882 * Check for dead transmit logic. Not uncommon.
883 */
884 void
885 qetimeout(ifp)
886 struct ifnet *ifp;
887 {
888 struct qe_softc *sc = ifp->if_softc;
889
890 if (sc->sc_inq == 0)
891 return;
892
893 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
894 /*
895 * Do a reset of interface, to get it going again.
896 * Will it work by just restart the transmit logic?
897 */
898 qeinit(sc);
899 }
900