if_qe.c revision 1.51 1 /* $NetBSD: if_qe.c,v 1.51 2002/06/08 12:28:37 ragge Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 * Handle ubaresets. Does not work at all right now.
36 * Fix ALLMULTI reception. But someone must tell me how...
37 * Collect statistics.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.51 2002/06/08 12:28:37 ragge Exp $");
42
43 #include "opt_inet.h"
44 #include "bpfilter.h"
45
46 #include <sys/param.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
52
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
59
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #include <net/bpfdesc.h>
63 #endif
64
65 #include <machine/bus.h>
66
67 #include <dev/qbus/ubavar.h>
68 #include <dev/qbus/if_qereg.h>
69
70 #include "ioconf.h"
71
72 #define RXDESCS 30 /* # of receive descriptors */
73 #define TXDESCS 60 /* # transmit descs */
74
75 /*
76 * Structure containing the elements that must be in DMA-safe memory.
77 */
78 struct qe_cdata {
79 struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
80 struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
81 u_int8_t qc_setup[128]; /* Setup packet layout */
82 };
83
84 struct qe_softc {
85 struct device sc_dev; /* Configuration common part */
86 struct evcnt sc_intrcnt; /* Interrupt counting */
87 struct ethercom sc_ec; /* Ethernet common part */
88 #define sc_if sc_ec.ec_if /* network-visible interface */
89 bus_space_tag_t sc_iot;
90 bus_addr_t sc_ioh;
91 bus_dma_tag_t sc_dmat;
92 struct qe_cdata *sc_qedata; /* Descriptor struct */
93 struct qe_cdata *sc_pqedata; /* Unibus address of above */
94 struct mbuf* sc_txmbuf[TXDESCS];
95 struct mbuf* sc_rxmbuf[RXDESCS];
96 bus_dmamap_t sc_xmtmap[TXDESCS];
97 bus_dmamap_t sc_rcvmap[RXDESCS];
98 struct ubinfo sc_ui;
99 int sc_intvec; /* Interrupt vector */
100 int sc_nexttx;
101 int sc_inq;
102 int sc_lastack;
103 int sc_nextrx;
104 int sc_setup; /* Setup packet in queue */
105 };
106
107 static int qematch(struct device *, struct cfdata *, void *);
108 static void qeattach(struct device *, struct device *, void *);
109 static void qeinit(struct qe_softc *);
110 static void qestart(struct ifnet *);
111 static void qeintr(void *);
112 static int qeioctl(struct ifnet *, u_long, caddr_t);
113 static int qe_add_rxbuf(struct qe_softc *, int);
114 static void qe_setup(struct qe_softc *);
115 static void qetimeout(struct ifnet *);
116
117 struct cfattach qe_ca = {
118 sizeof(struct qe_softc), qematch, qeattach
119 };
120
121 #define QE_WCSR(csr, val) \
122 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
123 #define QE_RCSR(csr) \
124 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
125
126 #define LOWORD(x) ((int)(x) & 0xffff)
127 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
128
129 /*
130 * Check for present DEQNA. Done by sending a fake setup packet
131 * and wait for interrupt.
132 */
133 int
134 qematch(struct device *parent, struct cfdata *cf, void *aux)
135 {
136 struct qe_softc ssc;
137 struct qe_softc *sc = &ssc;
138 struct uba_attach_args *ua = aux;
139 struct uba_softc *ubasc = (struct uba_softc *)parent;
140 struct ubinfo ui;
141
142 #define PROBESIZE 4096
143 struct qe_ring *ring;
144 struct qe_ring *rp;
145 int error;
146
147 ring = malloc(PROBESIZE, M_TEMP, M_WAITOK);
148 bzero(sc, sizeof(struct qe_softc));
149 bzero(ring, PROBESIZE);
150 sc->sc_iot = ua->ua_iot;
151 sc->sc_ioh = ua->ua_ioh;
152 sc->sc_dmat = ua->ua_dmat;
153
154 ubasc->uh_lastiv -= 4;
155 QE_WCSR(QE_CSR_CSR, QE_RESET);
156 QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
157
158 /*
159 * Map the ring area. Actually this is done only to be able to
160 * send and receive a internal packet; some junk is loopbacked
161 * so that the DEQNA has a reason to interrupt.
162 */
163 ui.ui_size = PROBESIZE;
164 ui.ui_vaddr = (caddr_t)&ring[0];
165 if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
166 return 0;
167
168 /*
169 * Init a simple "fake" receive and transmit descriptor that
170 * points to some unused area. Send a fake setup packet.
171 */
172 rp = (void *)ui.ui_baddr;
173 ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
174 ring[0].qe_addr_lo = LOWORD(&rp[4]);
175 ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
176 ring[0].qe_buf_len = -64;
177
178 ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
179 ring[2].qe_addr_lo = LOWORD(&rp[4]);
180 ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
181 ring[2].qe_buf_len = -(1500/2);
182
183 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
184 DELAY(1000);
185
186 /*
187 * Start the interface and wait for the packet.
188 */
189 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
190 QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
191 QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
192 QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
193 QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
194 DELAY(10000);
195
196 /*
197 * All done with the bus resources.
198 */
199 ubfree((void *)parent, &ui);
200 free(ring, M_TEMP);
201 return 1;
202 }
203
204 /*
205 * Interface exists: make available by filling in network interface
206 * record. System will initialize the interface when it is ready
207 * to accept packets.
208 */
209 void
210 qeattach(struct device *parent, struct device *self, void *aux)
211 {
212 struct uba_attach_args *ua = aux;
213 struct uba_softc *ubasc = (struct uba_softc *)parent;
214 struct qe_softc *sc = (struct qe_softc *)self;
215 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
216 struct qe_ring *rp;
217 u_int8_t enaddr[ETHER_ADDR_LEN];
218 int i, error;
219
220 sc->sc_iot = ua->ua_iot;
221 sc->sc_ioh = ua->ua_ioh;
222 sc->sc_dmat = ua->ua_dmat;
223
224 /*
225 * Allocate DMA safe memory for descriptors and setup memory.
226 */
227
228 sc->sc_ui.ui_size = sizeof(struct qe_cdata);
229 if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
230 printf(": unable to ubmemalloc(), error = %d\n", error);
231 return;
232 }
233 sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
234 sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
235
236 /*
237 * Zero the newly allocated memory.
238 */
239 bzero(sc->sc_qedata, sizeof(struct qe_cdata));
240 /*
241 * Create the transmit descriptor DMA maps. We take advantage
242 * of the fact that the Qbus address space is big, and therefore
243 * allocate map registers for all transmit descriptors also,
244 * so that we can avoid this each time we send a packet.
245 */
246 for (i = 0; i < TXDESCS; i++) {
247 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
248 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
249 &sc->sc_xmtmap[i]))) {
250 printf(": unable to create tx DMA map %d, error = %d\n",
251 i, error);
252 goto fail_4;
253 }
254 }
255
256 /*
257 * Create receive buffer DMA maps.
258 */
259 for (i = 0; i < RXDESCS; i++) {
260 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
261 MCLBYTES, 0, BUS_DMA_NOWAIT,
262 &sc->sc_rcvmap[i]))) {
263 printf(": unable to create rx DMA map %d, error = %d\n",
264 i, error);
265 goto fail_5;
266 }
267 }
268 /*
269 * Pre-allocate the receive buffers.
270 */
271 for (i = 0; i < RXDESCS; i++) {
272 if ((error = qe_add_rxbuf(sc, i)) != 0) {
273 printf(": unable to allocate or map rx buffer %d\n,"
274 " error = %d\n", i, error);
275 goto fail_6;
276 }
277 }
278
279 /*
280 * Create ring loops of the buffer chains.
281 * This is only done once.
282 */
283
284 rp = sc->sc_qedata->qc_recv;
285 rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
286 rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
287 QE_VALID | QE_CHAIN;
288 rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
289
290 rp = sc->sc_qedata->qc_xmit;
291 rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
292 rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
293 QE_VALID | QE_CHAIN;
294 rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
295
296 /*
297 * Get the vector that were set at match time, and remember it.
298 */
299 sc->sc_intvec = ubasc->uh_lastiv;
300 QE_WCSR(QE_CSR_CSR, QE_RESET);
301 DELAY(1000);
302 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
303
304 /*
305 * Read out ethernet address and tell which type this card is.
306 */
307 for (i = 0; i < 6; i++)
308 enaddr[i] = QE_RCSR(i * 2) & 0xff;
309
310 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
311 printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
312 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
313 ether_sprintf(enaddr));
314
315 QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
316
317 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
318 sc, &sc->sc_intrcnt);
319 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
320 sc->sc_dev.dv_xname, "intr");
321
322 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
323 ifp->if_softc = sc;
324 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
325 ifp->if_start = qestart;
326 ifp->if_ioctl = qeioctl;
327 ifp->if_watchdog = qetimeout;
328 IFQ_SET_READY(&ifp->if_snd);
329
330 /*
331 * Attach the interface.
332 */
333 if_attach(ifp);
334 ether_ifattach(ifp, enaddr);
335
336 return;
337
338 /*
339 * Free any resources we've allocated during the failed attach
340 * attempt. Do this in reverse order and fall through.
341 */
342 fail_6:
343 for (i = 0; i < RXDESCS; i++) {
344 if (sc->sc_rxmbuf[i] != NULL) {
345 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
346 m_freem(sc->sc_rxmbuf[i]);
347 }
348 }
349 fail_5:
350 for (i = 0; i < RXDESCS; i++) {
351 if (sc->sc_xmtmap[i] != NULL)
352 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
353 }
354 fail_4:
355 for (i = 0; i < TXDESCS; i++) {
356 if (sc->sc_rcvmap[i] != NULL)
357 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
358 }
359 }
360
361 /*
362 * Initialization of interface.
363 */
364 void
365 qeinit(struct qe_softc *sc)
366 {
367 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
368 struct qe_cdata *qc = sc->sc_qedata;
369 int i;
370
371
372 /*
373 * Reset the interface.
374 */
375 QE_WCSR(QE_CSR_CSR, QE_RESET);
376 DELAY(1000);
377 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
378 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
379
380 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
381 /*
382 * Release and init transmit descriptors.
383 */
384 for (i = 0; i < TXDESCS; i++) {
385 if (sc->sc_txmbuf[i]) {
386 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
387 m_freem(sc->sc_txmbuf[i]);
388 sc->sc_txmbuf[i] = 0;
389 }
390 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
391 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
392 }
393
394
395 /*
396 * Init receive descriptors.
397 */
398 for (i = 0; i < RXDESCS; i++)
399 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
400 sc->sc_nextrx = 0;
401
402 /*
403 * Write the descriptor addresses to the device.
404 * Receiving packets will be enabled in the interrupt routine.
405 */
406 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
407 QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
408 QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
409
410 ifp->if_flags |= IFF_RUNNING;
411 ifp->if_flags &= ~IFF_OACTIVE;
412
413 /*
414 * Send a setup frame.
415 * This will start the transmit machinery as well.
416 */
417 qe_setup(sc);
418
419 }
420
421 /*
422 * Start output on interface.
423 */
424 void
425 qestart(struct ifnet *ifp)
426 {
427 struct qe_softc *sc = ifp->if_softc;
428 struct qe_cdata *qc = sc->sc_qedata;
429 paddr_t buffer;
430 struct mbuf *m, *m0;
431 int idx, len, s, i, totlen, error;
432 short orword, csr;
433
434 if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
435 return;
436
437 s = splnet();
438 while (sc->sc_inq < (TXDESCS - 1)) {
439
440 if (sc->sc_setup) {
441 qe_setup(sc);
442 continue;
443 }
444 idx = sc->sc_nexttx;
445 IFQ_POLL(&ifp->if_snd, m);
446 if (m == 0)
447 goto out;
448 /*
449 * Count number of mbufs in chain.
450 * Always do DMA directly from mbufs, therefore the transmit
451 * ring is really big.
452 */
453 for (m0 = m, i = 0; m0; m0 = m0->m_next)
454 if (m0->m_len)
455 i++;
456 if (i >= TXDESCS)
457 panic("qestart");
458
459 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
460 ifp->if_flags |= IFF_OACTIVE;
461 goto out;
462 }
463
464 IFQ_DEQUEUE(&ifp->if_snd, m);
465
466 #if NBPFILTER > 0
467 if (ifp->if_bpf)
468 bpf_mtap(ifp->if_bpf, m);
469 #endif
470 /*
471 * m now points to a mbuf chain that can be loaded.
472 * Loop around and set it.
473 */
474 totlen = 0;
475 for (m0 = m; m0; m0 = m0->m_next) {
476 error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
477 mtod(m0, void *), m0->m_len, 0, 0);
478 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
479 len = m0->m_len;
480 if (len == 0)
481 continue;
482
483 totlen += len;
484 /* Word alignment calc */
485 orword = 0;
486 if (totlen == m->m_pkthdr.len) {
487 if (totlen < ETHER_MIN_LEN)
488 len += (ETHER_MIN_LEN - totlen);
489 orword |= QE_EOMSG;
490 sc->sc_txmbuf[idx] = m;
491 }
492 if ((buffer & 1) || (len & 1))
493 len += 2;
494 if (buffer & 1)
495 orword |= QE_ODDBEGIN;
496 if ((buffer + len) & 1)
497 orword |= QE_ODDEND;
498 qc->qc_xmit[idx].qe_buf_len = -(len/2);
499 qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
500 qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
501 qc->qc_xmit[idx].qe_flag =
502 qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
503 qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
504 if (++idx == TXDESCS)
505 idx = 0;
506 sc->sc_inq++;
507 }
508 #ifdef DIAGNOSTIC
509 if (totlen != m->m_pkthdr.len)
510 panic("qestart: len fault");
511 #endif
512
513 /*
514 * Kick off the transmit logic, if it is stopped.
515 */
516 csr = QE_RCSR(QE_CSR_CSR);
517 if (csr & QE_XL_INVALID) {
518 QE_WCSR(QE_CSR_XMTL,
519 LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
520 QE_WCSR(QE_CSR_XMTH,
521 HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
522 }
523 sc->sc_nexttx = idx;
524 }
525 if (sc->sc_inq == (TXDESCS - 1))
526 ifp->if_flags |= IFF_OACTIVE;
527
528 out: if (sc->sc_inq)
529 ifp->if_timer = 5; /* If transmit logic dies */
530 splx(s);
531 }
532
533 static void
534 qeintr(void *arg)
535 {
536 struct qe_softc *sc = arg;
537 struct qe_cdata *qc = sc->sc_qedata;
538 struct ifnet *ifp = &sc->sc_if;
539 struct mbuf *m;
540 int csr, status1, status2, len;
541
542 csr = QE_RCSR(QE_CSR_CSR);
543
544 QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
545 QE_RCV_INT | QE_ILOOP);
546
547 if (csr & QE_RCV_INT)
548 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
549 status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
550 status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
551
552 m = sc->sc_rxmbuf[sc->sc_nextrx];
553 len = ((status1 & QE_RBL_HI) |
554 (status2 & QE_RBL_LO)) + 60;
555 qe_add_rxbuf(sc, sc->sc_nextrx);
556 m->m_pkthdr.rcvif = ifp;
557 m->m_pkthdr.len = m->m_len = len;
558 if (++sc->sc_nextrx == RXDESCS)
559 sc->sc_nextrx = 0;
560 #if NBPFILTER > 0
561 if (ifp->if_bpf)
562 bpf_mtap(ifp->if_bpf, m);
563 #endif
564 if ((status1 & QE_ESETUP) == 0)
565 (*ifp->if_input)(ifp, m);
566 else
567 m_freem(m);
568 }
569
570 if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
571 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
572 int idx = sc->sc_lastack;
573
574 sc->sc_inq--;
575 if (++sc->sc_lastack == TXDESCS)
576 sc->sc_lastack = 0;
577
578 /* XXX collect statistics */
579 qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
580 qc->qc_xmit[idx].qe_status1 =
581 qc->qc_xmit[idx].qe_flag = QE_NOTYET;
582
583 if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
584 continue;
585 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
586 if (sc->sc_txmbuf[idx]) {
587 m_freem(sc->sc_txmbuf[idx]);
588 sc->sc_txmbuf[idx] = 0;
589 }
590 }
591 ifp->if_timer = 0;
592 ifp->if_flags &= ~IFF_OACTIVE;
593 qestart(ifp); /* Put in more in queue */
594 }
595 /*
596 * How can the receive list get invalid???
597 * Verified that it happens anyway.
598 */
599 if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
600 (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
601 QE_WCSR(QE_CSR_RCLL,
602 LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
603 QE_WCSR(QE_CSR_RCLH,
604 HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
605 }
606 }
607
608 /*
609 * Process an ioctl request.
610 */
611 int
612 qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
613 {
614 struct qe_softc *sc = ifp->if_softc;
615 struct ifreq *ifr = (struct ifreq *)data;
616 struct ifaddr *ifa = (struct ifaddr *)data;
617 int s = splnet(), error = 0;
618
619 switch (cmd) {
620
621 case SIOCSIFADDR:
622 ifp->if_flags |= IFF_UP;
623 switch(ifa->ifa_addr->sa_family) {
624 #ifdef INET
625 case AF_INET:
626 qeinit(sc);
627 arp_ifinit(ifp, ifa);
628 break;
629 #endif
630 }
631 break;
632
633 case SIOCSIFFLAGS:
634 if ((ifp->if_flags & IFF_UP) == 0 &&
635 (ifp->if_flags & IFF_RUNNING) != 0) {
636 /*
637 * If interface is marked down and it is running,
638 * stop it. (by disabling receive mechanism).
639 */
640 QE_WCSR(QE_CSR_CSR,
641 QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
642 ifp->if_flags &= ~IFF_RUNNING;
643 } else if ((ifp->if_flags & IFF_UP) != 0 &&
644 (ifp->if_flags & IFF_RUNNING) == 0) {
645 /*
646 * If interface it marked up and it is stopped, then
647 * start it.
648 */
649 qeinit(sc);
650 } else if ((ifp->if_flags & IFF_UP) != 0) {
651 /*
652 * Send a new setup packet to match any new changes.
653 * (Like IFF_PROMISC etc)
654 */
655 qe_setup(sc);
656 }
657 break;
658
659 case SIOCADDMULTI:
660 case SIOCDELMULTI:
661 /*
662 * Update our multicast list.
663 */
664 error = (cmd == SIOCADDMULTI) ?
665 ether_addmulti(ifr, &sc->sc_ec):
666 ether_delmulti(ifr, &sc->sc_ec);
667
668 if (error == ENETRESET) {
669 /*
670 * Multicast list has changed; set the hardware filter
671 * accordingly.
672 */
673 qe_setup(sc);
674 error = 0;
675 }
676 break;
677
678 default:
679 error = EINVAL;
680
681 }
682 splx(s);
683 return (error);
684 }
685
686 /*
687 * Add a receive buffer to the indicated descriptor.
688 */
689 int
690 qe_add_rxbuf(struct qe_softc *sc, int i)
691 {
692 struct mbuf *m;
693 struct qe_ring *rp;
694 vaddr_t addr;
695 int error;
696
697 MGETHDR(m, M_DONTWAIT, MT_DATA);
698 if (m == NULL)
699 return (ENOBUFS);
700
701 MCLGET(m, M_DONTWAIT);
702 if ((m->m_flags & M_EXT) == 0) {
703 m_freem(m);
704 return (ENOBUFS);
705 }
706
707 if (sc->sc_rxmbuf[i] != NULL)
708 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
709
710 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
711 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
712 if (error)
713 panic("%s: can't load rx DMA map %d, error = %d\n",
714 sc->sc_dev.dv_xname, i, error);
715 sc->sc_rxmbuf[i] = m;
716
717 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
718 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
719
720 /*
721 * We know that the mbuf cluster is page aligned. Also, be sure
722 * that the IP header will be longword aligned.
723 */
724 m->m_data += 2;
725 addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
726 rp = &sc->sc_qedata->qc_recv[i];
727 rp->qe_flag = rp->qe_status1 = QE_NOTYET;
728 rp->qe_addr_lo = LOWORD(addr);
729 rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
730 rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
731
732 return (0);
733 }
734
735 /*
736 * Create a setup packet and put in queue for sending.
737 */
738 void
739 qe_setup(struct qe_softc *sc)
740 {
741 struct ether_multi *enm;
742 struct ether_multistep step;
743 struct qe_cdata *qc = sc->sc_qedata;
744 struct ifnet *ifp = &sc->sc_if;
745 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
746 int i, j, k, idx, s;
747
748 s = splnet();
749 if (sc->sc_inq == (TXDESCS - 1)) {
750 sc->sc_setup = 1;
751 splx(s);
752 return;
753 }
754 sc->sc_setup = 0;
755 /*
756 * Init the setup packet with valid info.
757 */
758 memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
759 for (i = 0; i < ETHER_ADDR_LEN; i++)
760 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
761
762 /*
763 * Multicast handling. The DEQNA can handle up to 12 direct
764 * ethernet addresses.
765 */
766 j = 3; k = 0;
767 ifp->if_flags &= ~IFF_ALLMULTI;
768 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
769 while (enm != NULL) {
770 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
771 ifp->if_flags |= IFF_ALLMULTI;
772 break;
773 }
774 for (i = 0; i < ETHER_ADDR_LEN; i++)
775 qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
776 j++;
777 if (j == 8) {
778 j = 1; k += 64;
779 }
780 if (k > 64) {
781 ifp->if_flags |= IFF_ALLMULTI;
782 break;
783 }
784 ETHER_NEXT_MULTI(step, enm);
785 }
786 idx = sc->sc_nexttx;
787 qc->qc_xmit[idx].qe_buf_len = -64;
788
789 /*
790 * How is the DEQNA turned in ALLMULTI mode???
791 * Until someone tells me, fall back to PROMISC when more than
792 * 12 ethernet addresses.
793 */
794 if (ifp->if_flags & IFF_ALLMULTI)
795 ifp->if_flags |= IFF_PROMISC;
796 else if (ifp->if_pcount == 0)
797 ifp->if_flags &= ~IFF_PROMISC;
798 if (ifp->if_flags & IFF_PROMISC)
799 qc->qc_xmit[idx].qe_buf_len = -65;
800
801 qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
802 qc->qc_xmit[idx].qe_addr_hi =
803 HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
804 qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
805 qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
806
807 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
808 QE_WCSR(QE_CSR_XMTL,
809 LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
810 QE_WCSR(QE_CSR_XMTH,
811 HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
812 }
813
814 sc->sc_inq++;
815 if (++sc->sc_nexttx == TXDESCS)
816 sc->sc_nexttx = 0;
817 splx(s);
818 }
819
820 /*
821 * Check for dead transmit logic. Not uncommon.
822 */
823 void
824 qetimeout(struct ifnet *ifp)
825 {
826 struct qe_softc *sc = ifp->if_softc;
827
828 if (sc->sc_inq == 0)
829 return;
830
831 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
832 /*
833 * Do a reset of interface, to get it going again.
834 * Will it work by just restart the transmit logic?
835 */
836 qeinit(sc);
837 }
838