if_qe.c revision 1.47 1 /* $NetBSD: if_qe.c,v 1.47 2001/04/12 20:04:24 thorpej Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 * Have a timeout check for hang transmit logic.
36 * Handle ubaresets. Does not work at all right now.
37 * Fix ALLMULTI reception. But someone must tell me how...
38 * Collect statistics.
39 */
40
41 #include "opt_inet.h"
42 #include "bpfilter.h"
43
44 #include <sys/param.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/device.h>
48 #include <sys/systm.h>
49 #include <sys/sockio.h>
50
51 #include <net/if.h>
52 #include <net/if_ether.h>
53 #include <net/if_dl.h>
54
55 #include <netinet/in.h>
56 #include <netinet/if_inarp.h>
57
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #include <net/bpfdesc.h>
61 #endif
62
63 #include <machine/bus.h>
64
65 #include <dev/qbus/ubavar.h>
66 #include <dev/qbus/if_qereg.h>
67
68 #include "ioconf.h"
69
70 #define RXDESCS 30 /* # of receive descriptors */
71 #define TXDESCS 60 /* # transmit descs */
72
73 /*
74 * Structure containing the elements that must be in DMA-safe memory.
75 */
76 struct qe_cdata {
77 struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
78 struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
79 u_int8_t qc_setup[128]; /* Setup packet layout */
80 };
81
82 struct qe_softc {
83 struct device sc_dev; /* Configuration common part */
84 struct evcnt sc_intrcnt; /* Interrupt counting */
85 struct ethercom sc_ec; /* Ethernet common part */
86 #define sc_if sc_ec.ec_if /* network-visible interface */
87 bus_space_tag_t sc_iot;
88 bus_addr_t sc_ioh;
89 bus_dma_tag_t sc_dmat;
90 struct qe_cdata *sc_qedata; /* Descriptor struct */
91 struct qe_cdata *sc_pqedata; /* Unibus address of above */
92 bus_dmamap_t sc_cmap; /* Map for control structures */
93 struct mbuf* sc_txmbuf[TXDESCS];
94 struct mbuf* sc_rxmbuf[RXDESCS];
95 bus_dmamap_t sc_xmtmap[TXDESCS];
96 bus_dmamap_t sc_rcvmap[RXDESCS];
97 int sc_intvec; /* Interrupt vector */
98 int sc_nexttx;
99 int sc_inq;
100 int sc_lastack;
101 int sc_nextrx;
102 int sc_setup; /* Setup packet in queue */
103 };
104
105 static int qematch(struct device *, struct cfdata *, void *);
106 static void qeattach(struct device *, struct device *, void *);
107 static void qeinit(struct qe_softc *);
108 static void qestart(struct ifnet *);
109 static void qeintr(void *);
110 static int qeioctl(struct ifnet *, u_long, caddr_t);
111 static int qe_add_rxbuf(struct qe_softc *, int);
112 static void qe_setup(struct qe_softc *);
113 static void qetimeout(struct ifnet *);
114
115 struct cfattach qe_ca = {
116 sizeof(struct qe_softc), qematch, qeattach
117 };
118
119 #define QE_WCSR(csr, val) \
120 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
121 #define QE_RCSR(csr) \
122 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
123
124 #define LOWORD(x) ((int)(x) & 0xffff)
125 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
126
127 /*
128 * Check for present DEQNA. Done by sending a fake setup packet
129 * and wait for interrupt.
130 */
131 int
132 qematch(struct device *parent, struct cfdata *cf, void *aux)
133 {
134 bus_dmamap_t cmap;
135 struct qe_softc ssc;
136 struct qe_softc *sc = &ssc;
137 struct uba_attach_args *ua = aux;
138 struct uba_softc *ubasc = (struct uba_softc *)parent;
139
140 #define PROBESIZE (sizeof(struct qe_ring) * 4 + 128)
141 struct qe_ring ring[15]; /* For diag purposes only */
142 struct qe_ring *rp;
143 int error;
144
145 bzero(sc, sizeof(struct qe_softc));
146 bzero(ring, PROBESIZE);
147 sc->sc_iot = ua->ua_iot;
148 sc->sc_ioh = ua->ua_ioh;
149 sc->sc_dmat = ua->ua_dmat;
150
151 ubasc->uh_lastiv -= 4;
152 QE_WCSR(QE_CSR_CSR, QE_RESET);
153 QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
154
155 /*
156 * Map the ring area. Actually this is done only to be able to
157 * send and receive a internal packet; some junk is loopbacked
158 * so that the DEQNA has a reason to interrupt.
159 */
160 if ((error = bus_dmamap_create(sc->sc_dmat, PROBESIZE, 1, PROBESIZE, 0,
161 BUS_DMA_NOWAIT, &cmap))) {
162 printf("qematch: bus_dmamap_create failed = %d\n", error);
163 return 0;
164 }
165 if ((error = bus_dmamap_load(sc->sc_dmat, cmap, ring, PROBESIZE, 0,
166 BUS_DMA_NOWAIT))) {
167 printf("qematch: bus_dmamap_load failed = %d\n", error);
168 bus_dmamap_destroy(sc->sc_dmat, cmap);
169 return 0;
170 }
171
172 /*
173 * Init a simple "fake" receive and transmit descriptor that
174 * points to some unused area. Send a fake setup packet.
175 */
176 rp = (void *)cmap->dm_segs[0].ds_addr;
177 ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
178 ring[0].qe_addr_lo = LOWORD(&rp[4]);
179 ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
180 ring[0].qe_buf_len = 128;
181
182 ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
183 ring[2].qe_addr_lo = LOWORD(&rp[4]);
184 ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
185 ring[2].qe_buf_len = 128;
186
187 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
188 DELAY(1000);
189
190 /*
191 * Start the interface and wait for the packet.
192 */
193 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
194 QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
195 QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
196 QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
197 QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
198 DELAY(10000);
199
200 /*
201 * All done with the bus resources.
202 */
203 bus_dmamap_unload(sc->sc_dmat, cmap);
204 bus_dmamap_destroy(sc->sc_dmat, cmap);
205 return 1;
206 }
207
208 /*
209 * Interface exists: make available by filling in network interface
210 * record. System will initialize the interface when it is ready
211 * to accept packets.
212 */
213 void
214 qeattach(struct device *parent, struct device *self, void *aux)
215 {
216 struct uba_attach_args *ua = aux;
217 struct uba_softc *ubasc = (struct uba_softc *)parent;
218 struct qe_softc *sc = (struct qe_softc *)self;
219 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
220 struct qe_ring *rp;
221 u_int8_t enaddr[ETHER_ADDR_LEN];
222 bus_dma_segment_t seg;
223 int i, rseg, error;
224
225 sc->sc_iot = ua->ua_iot;
226 sc->sc_ioh = ua->ua_ioh;
227 sc->sc_dmat = ua->ua_dmat;
228
229 /*
230 * Allocate DMA safe memory for descriptors and setup memory.
231 */
232 if ((error = bus_dmamem_alloc(sc->sc_dmat,
233 sizeof(struct qe_cdata), NBPG, 0, &seg, 1, &rseg,
234 BUS_DMA_NOWAIT)) != 0) {
235 printf(": unable to allocate control data, error = %d\n",
236 error);
237 goto fail_0;
238 }
239
240 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
241 sizeof(struct qe_cdata), (caddr_t *)&sc->sc_qedata,
242 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
243 printf(": unable to map control data, error = %d\n", error);
244 goto fail_1;
245 }
246
247 if ((error = bus_dmamap_create(sc->sc_dmat,
248 sizeof(struct qe_cdata), 1,
249 sizeof(struct qe_cdata), 0, BUS_DMA_NOWAIT,
250 &sc->sc_cmap)) != 0) {
251 printf(": unable to create control data DMA map, error = %d\n",
252 error);
253 goto fail_2;
254 }
255
256 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
257 sc->sc_qedata, sizeof(struct qe_cdata), NULL,
258 BUS_DMA_NOWAIT)) != 0) {
259 printf(": unable to load control data DMA map, error = %d\n",
260 error);
261 goto fail_3;
262 }
263
264 /*
265 * Zero the newly allocated memory.
266 */
267 bzero(sc->sc_qedata, sizeof(struct qe_cdata));
268 /*
269 * Create the transmit descriptor DMA maps. We take advantage
270 * of the fact that the Qbus address space is big, and therefore
271 * allocate map registers for all transmit descriptors also,
272 * so that we can avoid this each time we send a packet.
273 */
274 for (i = 0; i < TXDESCS; i++) {
275 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
276 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
277 &sc->sc_xmtmap[i]))) {
278 printf(": unable to create tx DMA map %d, error = %d\n",
279 i, error);
280 goto fail_4;
281 }
282 }
283
284 /*
285 * Create receive buffer DMA maps.
286 */
287 for (i = 0; i < RXDESCS; i++) {
288 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
289 MCLBYTES, 0, BUS_DMA_NOWAIT,
290 &sc->sc_rcvmap[i]))) {
291 printf(": unable to create rx DMA map %d, error = %d\n",
292 i, error);
293 goto fail_5;
294 }
295 }
296 /*
297 * Pre-allocate the receive buffers.
298 */
299 for (i = 0; i < RXDESCS; i++) {
300 if ((error = qe_add_rxbuf(sc, i)) != 0) {
301 printf(": unable to allocate or map rx buffer %d\n,"
302 " error = %d\n", i, error);
303 goto fail_6;
304 }
305 }
306
307 /*
308 * Create ring loops of the buffer chains.
309 * This is only done once.
310 */
311 sc->sc_pqedata = (struct qe_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
312
313 rp = sc->sc_qedata->qc_recv;
314 rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
315 rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
316 QE_VALID | QE_CHAIN;
317 rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
318
319 rp = sc->sc_qedata->qc_xmit;
320 rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
321 rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
322 QE_VALID | QE_CHAIN;
323 rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
324
325 /*
326 * Get the vector that were set at match time, and remember it.
327 */
328 sc->sc_intvec = ubasc->uh_lastiv;
329 QE_WCSR(QE_CSR_CSR, QE_RESET);
330 DELAY(1000);
331 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
332
333 /*
334 * Read out ethernet address and tell which type this card is.
335 */
336 for (i = 0; i < 6; i++)
337 enaddr[i] = QE_RCSR(i * 2) & 0xff;
338
339 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
340 printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
341 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
342 ether_sprintf(enaddr));
343
344 QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
345
346 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
347 sc, &sc->sc_intrcnt);
348 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
349 sc->sc_dev.dv_xname, "intr");
350
351 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
352 ifp->if_softc = sc;
353 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
354 ifp->if_start = qestart;
355 ifp->if_ioctl = qeioctl;
356 ifp->if_watchdog = qetimeout;
357 IFQ_SET_READY(&ifp->if_snd);
358
359 /*
360 * Attach the interface.
361 */
362 if_attach(ifp);
363 ether_ifattach(ifp, enaddr);
364
365 return;
366
367 /*
368 * Free any resources we've allocated during the failed attach
369 * attempt. Do this in reverse order and fall through.
370 */
371 fail_6:
372 for (i = 0; i < RXDESCS; i++) {
373 if (sc->sc_rxmbuf[i] != NULL) {
374 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
375 m_freem(sc->sc_rxmbuf[i]);
376 }
377 }
378 fail_5:
379 for (i = 0; i < RXDESCS; i++) {
380 if (sc->sc_xmtmap[i] != NULL)
381 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
382 }
383 fail_4:
384 for (i = 0; i < TXDESCS; i++) {
385 if (sc->sc_rcvmap[i] != NULL)
386 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
387 }
388 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
389 fail_3:
390 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
391 fail_2:
392 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_qedata,
393 sizeof(struct qe_cdata));
394 fail_1:
395 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
396 fail_0:
397 return;
398 }
399
400 /*
401 * Initialization of interface.
402 */
403 void
404 qeinit(struct qe_softc *sc)
405 {
406 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
407 struct qe_cdata *qc = sc->sc_qedata;
408 int i;
409
410
411 /*
412 * Reset the interface.
413 */
414 QE_WCSR(QE_CSR_CSR, QE_RESET);
415 DELAY(1000);
416 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
417 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
418
419 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
420 /*
421 * Release and init transmit descriptors.
422 */
423 for (i = 0; i < TXDESCS; i++) {
424 if (sc->sc_txmbuf[i]) {
425 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
426 m_freem(sc->sc_txmbuf[i]);
427 sc->sc_txmbuf[i] = 0;
428 }
429 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
430 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
431 }
432
433
434 /*
435 * Init receive descriptors.
436 */
437 for (i = 0; i < RXDESCS; i++)
438 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
439 sc->sc_nextrx = 0;
440
441 /*
442 * Write the descriptor addresses to the device.
443 * Receiving packets will be enabled in the interrupt routine.
444 */
445 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
446 QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
447 QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
448
449 ifp->if_flags |= IFF_RUNNING;
450 ifp->if_flags &= ~IFF_OACTIVE;
451
452 /*
453 * Send a setup frame.
454 * This will start the transmit machinery as well.
455 */
456 qe_setup(sc);
457
458 }
459
460 /*
461 * Start output on interface.
462 */
463 void
464 qestart(struct ifnet *ifp)
465 {
466 struct qe_softc *sc = ifp->if_softc;
467 struct qe_cdata *qc = sc->sc_qedata;
468 paddr_t buffer;
469 struct mbuf *m, *m0;
470 int idx, len, s, i, totlen, error;
471 short orword, csr;
472
473 if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
474 return;
475
476 s = splnet();
477 while (sc->sc_inq < (TXDESCS - 1)) {
478
479 if (sc->sc_setup) {
480 qe_setup(sc);
481 continue;
482 }
483 idx = sc->sc_nexttx;
484 IFQ_POLL(&ifp->if_snd, m);
485 if (m == 0)
486 goto out;
487 /*
488 * Count number of mbufs in chain.
489 * Always do DMA directly from mbufs, therefore the transmit
490 * ring is really big.
491 */
492 for (m0 = m, i = 0; m0; m0 = m0->m_next)
493 if (m0->m_len)
494 i++;
495 if (i >= TXDESCS)
496 panic("qestart");
497
498 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
499 ifp->if_flags |= IFF_OACTIVE;
500 goto out;
501 }
502
503 IFQ_DEQUEUE(&ifp->if_snd, m);
504
505 #if NBPFILTER > 0
506 if (ifp->if_bpf)
507 bpf_mtap(ifp->if_bpf, m);
508 #endif
509 /*
510 * m now points to a mbuf chain that can be loaded.
511 * Loop around and set it.
512 */
513 totlen = 0;
514 for (m0 = m; m0; m0 = m0->m_next) {
515 error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
516 mtod(m0, void *), m0->m_len, 0, 0);
517 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
518 len = m0->m_len;
519 if (len == 0)
520 continue;
521
522 totlen += len;
523 /* Word alignment calc */
524 orword = 0;
525 if (totlen == m->m_pkthdr.len) {
526 if (totlen < ETHER_MIN_LEN)
527 len += (ETHER_MIN_LEN - totlen);
528 orword |= QE_EOMSG;
529 sc->sc_txmbuf[idx] = m;
530 }
531 if ((buffer & 1) || (len & 1))
532 len += 2;
533 if (buffer & 1)
534 orword |= QE_ODDBEGIN;
535 if ((buffer + len) & 1)
536 orword |= QE_ODDEND;
537 qc->qc_xmit[idx].qe_buf_len = -(len/2);
538 qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
539 qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
540 qc->qc_xmit[idx].qe_flag =
541 qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
542 qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
543 if (++idx == TXDESCS)
544 idx = 0;
545 sc->sc_inq++;
546 }
547 #ifdef DIAGNOSTIC
548 if (totlen != m->m_pkthdr.len)
549 panic("qestart: len fault");
550 #endif
551
552 /*
553 * Kick off the transmit logic, if it is stopped.
554 */
555 csr = QE_RCSR(QE_CSR_CSR);
556 if (csr & QE_XL_INVALID) {
557 QE_WCSR(QE_CSR_XMTL,
558 LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
559 QE_WCSR(QE_CSR_XMTH,
560 HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
561 }
562 sc->sc_nexttx = idx;
563 }
564 if (sc->sc_inq == (TXDESCS - 1))
565 ifp->if_flags |= IFF_OACTIVE;
566
567 out: if (sc->sc_inq)
568 ifp->if_timer = 5; /* If transmit logic dies */
569 splx(s);
570 }
571
572 static void
573 qeintr(void *arg)
574 {
575 struct qe_softc *sc = arg;
576 struct qe_cdata *qc = sc->sc_qedata;
577 struct ifnet *ifp = &sc->sc_if;
578 struct mbuf *m;
579 int csr, status1, status2, len;
580
581 csr = QE_RCSR(QE_CSR_CSR);
582
583 QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
584 QE_RCV_INT | QE_ILOOP);
585
586 if (csr & QE_RCV_INT)
587 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
588 status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
589 status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
590
591 m = sc->sc_rxmbuf[sc->sc_nextrx];
592 len = ((status1 & QE_RBL_HI) |
593 (status2 & QE_RBL_LO)) + 60;
594 qe_add_rxbuf(sc, sc->sc_nextrx);
595 m->m_pkthdr.rcvif = ifp;
596 m->m_pkthdr.len = m->m_len = len;
597 if (++sc->sc_nextrx == RXDESCS)
598 sc->sc_nextrx = 0;
599 #if NBPFILTER > 0
600 if (ifp->if_bpf)
601 bpf_mtap(ifp->if_bpf, m);
602 #endif
603 if ((status1 & QE_ESETUP) == 0)
604 (*ifp->if_input)(ifp, m);
605 else
606 m_freem(m);
607 }
608
609 if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
610 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
611 int idx = sc->sc_lastack;
612
613 sc->sc_inq--;
614 if (++sc->sc_lastack == TXDESCS)
615 sc->sc_lastack = 0;
616
617 /* XXX collect statistics */
618 qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
619 qc->qc_xmit[idx].qe_status1 =
620 qc->qc_xmit[idx].qe_flag = QE_NOTYET;
621
622 if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
623 continue;
624 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
625 if (sc->sc_txmbuf[idx]) {
626 m_freem(sc->sc_txmbuf[idx]);
627 sc->sc_txmbuf[idx] = 0;
628 }
629 }
630 ifp->if_timer = 0;
631 ifp->if_flags &= ~IFF_OACTIVE;
632 qestart(ifp); /* Put in more in queue */
633 }
634 /*
635 * How can the receive list get invalid???
636 * Verified that it happens anyway.
637 */
638 if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
639 (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
640 QE_WCSR(QE_CSR_RCLL,
641 LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
642 QE_WCSR(QE_CSR_RCLH,
643 HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
644 }
645 }
646
647 /*
648 * Process an ioctl request.
649 */
650 int
651 qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
652 {
653 struct qe_softc *sc = ifp->if_softc;
654 struct ifreq *ifr = (struct ifreq *)data;
655 struct ifaddr *ifa = (struct ifaddr *)data;
656 int s = splnet(), error = 0;
657
658 switch (cmd) {
659
660 case SIOCSIFADDR:
661 ifp->if_flags |= IFF_UP;
662 switch(ifa->ifa_addr->sa_family) {
663 #ifdef INET
664 case AF_INET:
665 qeinit(sc);
666 arp_ifinit(ifp, ifa);
667 break;
668 #endif
669 }
670 break;
671
672 case SIOCSIFFLAGS:
673 if ((ifp->if_flags & IFF_UP) == 0 &&
674 (ifp->if_flags & IFF_RUNNING) != 0) {
675 /*
676 * If interface is marked down and it is running,
677 * stop it. (by disabling receive mechanism).
678 */
679 QE_WCSR(QE_CSR_CSR,
680 QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
681 ifp->if_flags &= ~IFF_RUNNING;
682 } else if ((ifp->if_flags & IFF_UP) != 0 &&
683 (ifp->if_flags & IFF_RUNNING) == 0) {
684 /*
685 * If interface it marked up and it is stopped, then
686 * start it.
687 */
688 qeinit(sc);
689 } else if ((ifp->if_flags & IFF_UP) != 0) {
690 /*
691 * Send a new setup packet to match any new changes.
692 * (Like IFF_PROMISC etc)
693 */
694 qe_setup(sc);
695 }
696 break;
697
698 case SIOCADDMULTI:
699 case SIOCDELMULTI:
700 /*
701 * Update our multicast list.
702 */
703 error = (cmd == SIOCADDMULTI) ?
704 ether_addmulti(ifr, &sc->sc_ec):
705 ether_delmulti(ifr, &sc->sc_ec);
706
707 if (error == ENETRESET) {
708 /*
709 * Multicast list has changed; set the hardware filter
710 * accordingly.
711 */
712 qe_setup(sc);
713 error = 0;
714 }
715 break;
716
717 default:
718 error = EINVAL;
719
720 }
721 splx(s);
722 return (error);
723 }
724
725 /*
726 * Add a receive buffer to the indicated descriptor.
727 */
728 int
729 qe_add_rxbuf(struct qe_softc *sc, int i)
730 {
731 struct mbuf *m;
732 struct qe_ring *rp;
733 vaddr_t addr;
734 int error;
735
736 MGETHDR(m, M_DONTWAIT, MT_DATA);
737 if (m == NULL)
738 return (ENOBUFS);
739
740 MCLGET(m, M_DONTWAIT);
741 if ((m->m_flags & M_EXT) == 0) {
742 m_freem(m);
743 return (ENOBUFS);
744 }
745
746 if (sc->sc_rxmbuf[i] != NULL)
747 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
748
749 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
750 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
751 if (error)
752 panic("%s: can't load rx DMA map %d, error = %d\n",
753 sc->sc_dev.dv_xname, i, error);
754 sc->sc_rxmbuf[i] = m;
755
756 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
757 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
758
759 /*
760 * We know that the mbuf cluster is page aligned. Also, be sure
761 * that the IP header will be longword aligned.
762 */
763 m->m_data += 2;
764 addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
765 rp = &sc->sc_qedata->qc_recv[i];
766 rp->qe_flag = rp->qe_status1 = QE_NOTYET;
767 rp->qe_addr_lo = LOWORD(addr);
768 rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
769 rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
770
771 return (0);
772 }
773
774 /*
775 * Create a setup packet and put in queue for sending.
776 */
777 void
778 qe_setup(struct qe_softc *sc)
779 {
780 struct ether_multi *enm;
781 struct ether_multistep step;
782 struct qe_cdata *qc = sc->sc_qedata;
783 struct ifnet *ifp = &sc->sc_if;
784 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
785 int i, j, k, idx, s;
786
787 s = splnet();
788 if (sc->sc_inq == (TXDESCS - 1)) {
789 sc->sc_setup = 1;
790 splx(s);
791 return;
792 }
793 sc->sc_setup = 0;
794 /*
795 * Init the setup packet with valid info.
796 */
797 memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
798 for (i = 0; i < ETHER_ADDR_LEN; i++)
799 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
800
801 /*
802 * Multicast handling. The DEQNA can handle up to 12 direct
803 * ethernet addresses.
804 */
805 j = 3; k = 0;
806 ifp->if_flags &= ~IFF_ALLMULTI;
807 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
808 while (enm != NULL) {
809 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
810 ifp->if_flags |= IFF_ALLMULTI;
811 break;
812 }
813 for (i = 0; i < ETHER_ADDR_LEN; i++)
814 qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
815 j++;
816 if (j == 8) {
817 j = 1; k += 64;
818 }
819 if (k > 64) {
820 ifp->if_flags |= IFF_ALLMULTI;
821 break;
822 }
823 ETHER_NEXT_MULTI(step, enm);
824 }
825 idx = sc->sc_nexttx;
826 qc->qc_xmit[idx].qe_buf_len = -64;
827
828 /*
829 * How is the DEQNA turned in ALLMULTI mode???
830 * Until someone tells me, fall back to PROMISC when more than
831 * 12 ethernet addresses.
832 */
833 if (ifp->if_flags & IFF_ALLMULTI)
834 ifp->if_flags |= IFF_PROMISC;
835 else if (ifp->if_pcount == 0)
836 ifp->if_flags &= ~IFF_PROMISC;
837 if (ifp->if_flags & IFF_PROMISC)
838 qc->qc_xmit[idx].qe_buf_len = -65;
839
840 qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
841 qc->qc_xmit[idx].qe_addr_hi =
842 HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
843 qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
844 qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
845
846 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
847 QE_WCSR(QE_CSR_XMTL,
848 LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
849 QE_WCSR(QE_CSR_XMTH,
850 HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
851 }
852
853 sc->sc_inq++;
854 if (++sc->sc_nexttx == TXDESCS)
855 sc->sc_nexttx = 0;
856 splx(s);
857 }
858
859 /*
860 * Check for dead transmit logic. Not uncommon.
861 */
862 void
863 qetimeout(struct ifnet *ifp)
864 {
865 struct qe_softc *sc = ifp->if_softc;
866
867 if (sc->sc_inq == 0)
868 return;
869
870 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
871 /*
872 * Do a reset of interface, to get it going again.
873 * Will it work by just restart the transmit logic?
874 */
875 qeinit(sc);
876 }
877