if_qe.c revision 1.54 1 /* $NetBSD: if_qe.c,v 1.54 2002/09/30 22:42:11 thorpej Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 * Handle ubaresets. Does not work at all right now.
36 * Fix ALLMULTI reception. But someone must tell me how...
37 * Collect statistics.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.54 2002/09/30 22:42:11 thorpej Exp $");
42
43 #include "opt_inet.h"
44 #include "bpfilter.h"
45
46 #include <sys/param.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
52
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
59
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #include <net/bpfdesc.h>
63 #endif
64
65 #include <machine/bus.h>
66
67 #include <dev/qbus/ubavar.h>
68 #include <dev/qbus/if_qereg.h>
69
70 #include "ioconf.h"
71
72 #define RXDESCS 30 /* # of receive descriptors */
73 #define TXDESCS 60 /* # transmit descs */
74
75 /*
76 * Structure containing the elements that must be in DMA-safe memory.
77 */
78 struct qe_cdata {
79 struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
80 struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
81 u_int8_t qc_setup[128]; /* Setup packet layout */
82 };
83
84 struct qe_softc {
85 struct device sc_dev; /* Configuration common part */
86 struct evcnt sc_intrcnt; /* Interrupt counting */
87 struct ethercom sc_ec; /* Ethernet common part */
88 #define sc_if sc_ec.ec_if /* network-visible interface */
89 bus_space_tag_t sc_iot;
90 bus_addr_t sc_ioh;
91 bus_dma_tag_t sc_dmat;
92 struct qe_cdata *sc_qedata; /* Descriptor struct */
93 struct qe_cdata *sc_pqedata; /* Unibus address of above */
94 struct mbuf* sc_txmbuf[TXDESCS];
95 struct mbuf* sc_rxmbuf[RXDESCS];
96 bus_dmamap_t sc_xmtmap[TXDESCS];
97 bus_dmamap_t sc_rcvmap[RXDESCS];
98 struct ubinfo sc_ui;
99 int sc_intvec; /* Interrupt vector */
100 int sc_nexttx;
101 int sc_inq;
102 int sc_lastack;
103 int sc_nextrx;
104 int sc_setup; /* Setup packet in queue */
105 };
106
107 static int qematch(struct device *, struct cfdata *, void *);
108 static void qeattach(struct device *, struct device *, void *);
109 static void qeinit(struct qe_softc *);
110 static void qestart(struct ifnet *);
111 static void qeintr(void *);
112 static int qeioctl(struct ifnet *, u_long, caddr_t);
113 static int qe_add_rxbuf(struct qe_softc *, int);
114 static void qe_setup(struct qe_softc *);
115 static void qetimeout(struct ifnet *);
116
117 CFATTACH_DECL(qe, sizeof(struct qe_softc),
118 qematch, qeattach, NULL, NULL)
119
120 #define QE_WCSR(csr, val) \
121 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
122 #define QE_RCSR(csr) \
123 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
124
125 #define LOWORD(x) ((int)(x) & 0xffff)
126 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
127
128 /*
129 * Check for present DEQNA. Done by sending a fake setup packet
130 * and wait for interrupt.
131 */
132 int
133 qematch(struct device *parent, struct cfdata *cf, void *aux)
134 {
135 struct qe_softc ssc;
136 struct qe_softc *sc = &ssc;
137 struct uba_attach_args *ua = aux;
138 struct uba_softc *ubasc = (struct uba_softc *)parent;
139 struct ubinfo ui;
140
141 #define PROBESIZE 4096
142 struct qe_ring *ring;
143 struct qe_ring *rp;
144 int error;
145
146 ring = malloc(PROBESIZE, M_TEMP, M_WAITOK);
147 bzero(sc, sizeof(struct qe_softc));
148 bzero(ring, PROBESIZE);
149 sc->sc_iot = ua->ua_iot;
150 sc->sc_ioh = ua->ua_ioh;
151 sc->sc_dmat = ua->ua_dmat;
152
153 ubasc->uh_lastiv -= 4;
154 QE_WCSR(QE_CSR_CSR, QE_RESET);
155 QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
156
157 /*
158 * Map the ring area. Actually this is done only to be able to
159 * send and receive a internal packet; some junk is loopbacked
160 * so that the DEQNA has a reason to interrupt.
161 */
162 ui.ui_size = PROBESIZE;
163 ui.ui_vaddr = (caddr_t)&ring[0];
164 if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
165 return 0;
166
167 /*
168 * Init a simple "fake" receive and transmit descriptor that
169 * points to some unused area. Send a fake setup packet.
170 */
171 rp = (void *)ui.ui_baddr;
172 ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
173 ring[0].qe_addr_lo = LOWORD(&rp[4]);
174 ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
175 ring[0].qe_buf_len = -64;
176
177 ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
178 ring[2].qe_addr_lo = LOWORD(&rp[4]);
179 ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
180 ring[2].qe_buf_len = -(1500/2);
181
182 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
183 DELAY(1000);
184
185 /*
186 * Start the interface and wait for the packet.
187 */
188 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
189 QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
190 QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
191 QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
192 QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
193 DELAY(10000);
194
195 /*
196 * All done with the bus resources.
197 */
198 ubfree((void *)parent, &ui);
199 free(ring, M_TEMP);
200 return 1;
201 }
202
203 /*
204 * Interface exists: make available by filling in network interface
205 * record. System will initialize the interface when it is ready
206 * to accept packets.
207 */
208 void
209 qeattach(struct device *parent, struct device *self, void *aux)
210 {
211 struct uba_attach_args *ua = aux;
212 struct uba_softc *ubasc = (struct uba_softc *)parent;
213 struct qe_softc *sc = (struct qe_softc *)self;
214 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
215 struct qe_ring *rp;
216 u_int8_t enaddr[ETHER_ADDR_LEN];
217 int i, error;
218
219 sc->sc_iot = ua->ua_iot;
220 sc->sc_ioh = ua->ua_ioh;
221 sc->sc_dmat = ua->ua_dmat;
222
223 /*
224 * Allocate DMA safe memory for descriptors and setup memory.
225 */
226
227 sc->sc_ui.ui_size = sizeof(struct qe_cdata);
228 if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
229 printf(": unable to ubmemalloc(), error = %d\n", error);
230 return;
231 }
232 sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
233 sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
234
235 /*
236 * Zero the newly allocated memory.
237 */
238 bzero(sc->sc_qedata, sizeof(struct qe_cdata));
239 /*
240 * Create the transmit descriptor DMA maps. We take advantage
241 * of the fact that the Qbus address space is big, and therefore
242 * allocate map registers for all transmit descriptors also,
243 * so that we can avoid this each time we send a packet.
244 */
245 for (i = 0; i < TXDESCS; i++) {
246 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
247 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
248 &sc->sc_xmtmap[i]))) {
249 printf(": unable to create tx DMA map %d, error = %d\n",
250 i, error);
251 goto fail_4;
252 }
253 }
254
255 /*
256 * Create receive buffer DMA maps.
257 */
258 for (i = 0; i < RXDESCS; i++) {
259 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
260 MCLBYTES, 0, BUS_DMA_NOWAIT,
261 &sc->sc_rcvmap[i]))) {
262 printf(": unable to create rx DMA map %d, error = %d\n",
263 i, error);
264 goto fail_5;
265 }
266 }
267 /*
268 * Pre-allocate the receive buffers.
269 */
270 for (i = 0; i < RXDESCS; i++) {
271 if ((error = qe_add_rxbuf(sc, i)) != 0) {
272 printf(": unable to allocate or map rx buffer %d\n,"
273 " error = %d\n", i, error);
274 goto fail_6;
275 }
276 }
277
278 /*
279 * Create ring loops of the buffer chains.
280 * This is only done once.
281 */
282
283 rp = sc->sc_qedata->qc_recv;
284 rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
285 rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
286 QE_VALID | QE_CHAIN;
287 rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
288
289 rp = sc->sc_qedata->qc_xmit;
290 rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
291 rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
292 QE_VALID | QE_CHAIN;
293 rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
294
295 /*
296 * Get the vector that were set at match time, and remember it.
297 */
298 sc->sc_intvec = ubasc->uh_lastiv;
299 QE_WCSR(QE_CSR_CSR, QE_RESET);
300 DELAY(1000);
301 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
302
303 /*
304 * Read out ethernet address and tell which type this card is.
305 */
306 for (i = 0; i < 6; i++)
307 enaddr[i] = QE_RCSR(i * 2) & 0xff;
308
309 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
310 printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
311 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
312 ether_sprintf(enaddr));
313
314 QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
315
316 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
317 sc, &sc->sc_intrcnt);
318 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
319 sc->sc_dev.dv_xname, "intr");
320
321 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
322 ifp->if_softc = sc;
323 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
324 ifp->if_start = qestart;
325 ifp->if_ioctl = qeioctl;
326 ifp->if_watchdog = qetimeout;
327 IFQ_SET_READY(&ifp->if_snd);
328
329 /*
330 * Attach the interface.
331 */
332 if_attach(ifp);
333 ether_ifattach(ifp, enaddr);
334
335 return;
336
337 /*
338 * Free any resources we've allocated during the failed attach
339 * attempt. Do this in reverse order and fall through.
340 */
341 fail_6:
342 for (i = 0; i < RXDESCS; i++) {
343 if (sc->sc_rxmbuf[i] != NULL) {
344 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
345 m_freem(sc->sc_rxmbuf[i]);
346 }
347 }
348 fail_5:
349 for (i = 0; i < RXDESCS; i++) {
350 if (sc->sc_xmtmap[i] != NULL)
351 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
352 }
353 fail_4:
354 for (i = 0; i < TXDESCS; i++) {
355 if (sc->sc_rcvmap[i] != NULL)
356 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
357 }
358 }
359
360 /*
361 * Initialization of interface.
362 */
363 void
364 qeinit(struct qe_softc *sc)
365 {
366 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
367 struct qe_cdata *qc = sc->sc_qedata;
368 int i;
369
370
371 /*
372 * Reset the interface.
373 */
374 QE_WCSR(QE_CSR_CSR, QE_RESET);
375 DELAY(1000);
376 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
377 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
378
379 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
380 /*
381 * Release and init transmit descriptors.
382 */
383 for (i = 0; i < TXDESCS; i++) {
384 if (sc->sc_txmbuf[i]) {
385 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
386 m_freem(sc->sc_txmbuf[i]);
387 sc->sc_txmbuf[i] = 0;
388 }
389 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
390 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
391 }
392
393
394 /*
395 * Init receive descriptors.
396 */
397 for (i = 0; i < RXDESCS; i++)
398 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
399 sc->sc_nextrx = 0;
400
401 /*
402 * Write the descriptor addresses to the device.
403 * Receiving packets will be enabled in the interrupt routine.
404 */
405 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
406 QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
407 QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
408
409 ifp->if_flags |= IFF_RUNNING;
410 ifp->if_flags &= ~IFF_OACTIVE;
411
412 /*
413 * Send a setup frame.
414 * This will start the transmit machinery as well.
415 */
416 qe_setup(sc);
417
418 }
419
420 /*
421 * Start output on interface.
422 */
423 void
424 qestart(struct ifnet *ifp)
425 {
426 struct qe_softc *sc = ifp->if_softc;
427 struct qe_cdata *qc = sc->sc_qedata;
428 paddr_t buffer;
429 struct mbuf *m, *m0;
430 int idx, len, s, i, totlen, error;
431 short orword, csr;
432
433 if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
434 return;
435
436 s = splnet();
437 while (sc->sc_inq < (TXDESCS - 1)) {
438
439 if (sc->sc_setup) {
440 qe_setup(sc);
441 continue;
442 }
443 idx = sc->sc_nexttx;
444 IFQ_POLL(&ifp->if_snd, m);
445 if (m == 0)
446 goto out;
447 /*
448 * Count number of mbufs in chain.
449 * Always do DMA directly from mbufs, therefore the transmit
450 * ring is really big.
451 */
452 for (m0 = m, i = 0; m0; m0 = m0->m_next)
453 if (m0->m_len)
454 i++;
455 if (i >= TXDESCS)
456 panic("qestart");
457
458 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
459 ifp->if_flags |= IFF_OACTIVE;
460 goto out;
461 }
462
463 IFQ_DEQUEUE(&ifp->if_snd, m);
464
465 #if NBPFILTER > 0
466 if (ifp->if_bpf)
467 bpf_mtap(ifp->if_bpf, m);
468 #endif
469 /*
470 * m now points to a mbuf chain that can be loaded.
471 * Loop around and set it.
472 */
473 totlen = 0;
474 for (m0 = m; m0; m0 = m0->m_next) {
475 error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
476 mtod(m0, void *), m0->m_len, 0, 0);
477 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
478 len = m0->m_len;
479 if (len == 0)
480 continue;
481
482 totlen += len;
483 /* Word alignment calc */
484 orword = 0;
485 if (totlen == m->m_pkthdr.len) {
486 if (totlen < ETHER_MIN_LEN)
487 len += (ETHER_MIN_LEN - totlen);
488 orword |= QE_EOMSG;
489 sc->sc_txmbuf[idx] = m;
490 }
491 if ((buffer & 1) || (len & 1))
492 len += 2;
493 if (buffer & 1)
494 orword |= QE_ODDBEGIN;
495 if ((buffer + len) & 1)
496 orword |= QE_ODDEND;
497 qc->qc_xmit[idx].qe_buf_len = -(len/2);
498 qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
499 qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
500 qc->qc_xmit[idx].qe_flag =
501 qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
502 qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
503 if (++idx == TXDESCS)
504 idx = 0;
505 sc->sc_inq++;
506 }
507 #ifdef DIAGNOSTIC
508 if (totlen != m->m_pkthdr.len)
509 panic("qestart: len fault");
510 #endif
511
512 /*
513 * Kick off the transmit logic, if it is stopped.
514 */
515 csr = QE_RCSR(QE_CSR_CSR);
516 if (csr & QE_XL_INVALID) {
517 QE_WCSR(QE_CSR_XMTL,
518 LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
519 QE_WCSR(QE_CSR_XMTH,
520 HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
521 }
522 sc->sc_nexttx = idx;
523 }
524 if (sc->sc_inq == (TXDESCS - 1))
525 ifp->if_flags |= IFF_OACTIVE;
526
527 out: if (sc->sc_inq)
528 ifp->if_timer = 5; /* If transmit logic dies */
529 splx(s);
530 }
531
532 static void
533 qeintr(void *arg)
534 {
535 struct qe_softc *sc = arg;
536 struct qe_cdata *qc = sc->sc_qedata;
537 struct ifnet *ifp = &sc->sc_if;
538 struct mbuf *m;
539 int csr, status1, status2, len;
540
541 csr = QE_RCSR(QE_CSR_CSR);
542
543 QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
544 QE_RCV_INT | QE_ILOOP);
545
546 if (csr & QE_RCV_INT)
547 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
548 status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
549 status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
550
551 m = sc->sc_rxmbuf[sc->sc_nextrx];
552 len = ((status1 & QE_RBL_HI) |
553 (status2 & QE_RBL_LO)) + 60;
554 qe_add_rxbuf(sc, sc->sc_nextrx);
555 m->m_pkthdr.rcvif = ifp;
556 m->m_pkthdr.len = m->m_len = len;
557 if (++sc->sc_nextrx == RXDESCS)
558 sc->sc_nextrx = 0;
559 #if NBPFILTER > 0
560 if (ifp->if_bpf)
561 bpf_mtap(ifp->if_bpf, m);
562 #endif
563 if ((status1 & QE_ESETUP) == 0)
564 (*ifp->if_input)(ifp, m);
565 else
566 m_freem(m);
567 }
568
569 if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
570 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
571 int idx = sc->sc_lastack;
572
573 sc->sc_inq--;
574 if (++sc->sc_lastack == TXDESCS)
575 sc->sc_lastack = 0;
576
577 /* XXX collect statistics */
578 qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
579 qc->qc_xmit[idx].qe_status1 =
580 qc->qc_xmit[idx].qe_flag = QE_NOTYET;
581
582 if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
583 continue;
584 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
585 if (sc->sc_txmbuf[idx]) {
586 m_freem(sc->sc_txmbuf[idx]);
587 sc->sc_txmbuf[idx] = 0;
588 }
589 }
590 ifp->if_timer = 0;
591 ifp->if_flags &= ~IFF_OACTIVE;
592 qestart(ifp); /* Put in more in queue */
593 }
594 /*
595 * How can the receive list get invalid???
596 * Verified that it happens anyway.
597 */
598 if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
599 (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
600 QE_WCSR(QE_CSR_RCLL,
601 LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
602 QE_WCSR(QE_CSR_RCLH,
603 HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
604 }
605 }
606
607 /*
608 * Process an ioctl request.
609 */
610 int
611 qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
612 {
613 struct qe_softc *sc = ifp->if_softc;
614 struct ifreq *ifr = (struct ifreq *)data;
615 struct ifaddr *ifa = (struct ifaddr *)data;
616 int s = splnet(), error = 0;
617
618 switch (cmd) {
619
620 case SIOCSIFADDR:
621 ifp->if_flags |= IFF_UP;
622 switch(ifa->ifa_addr->sa_family) {
623 #ifdef INET
624 case AF_INET:
625 qeinit(sc);
626 arp_ifinit(ifp, ifa);
627 break;
628 #endif
629 }
630 break;
631
632 case SIOCSIFFLAGS:
633 if ((ifp->if_flags & IFF_UP) == 0 &&
634 (ifp->if_flags & IFF_RUNNING) != 0) {
635 /*
636 * If interface is marked down and it is running,
637 * stop it. (by disabling receive mechanism).
638 */
639 QE_WCSR(QE_CSR_CSR,
640 QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
641 ifp->if_flags &= ~IFF_RUNNING;
642 } else if ((ifp->if_flags & IFF_UP) != 0 &&
643 (ifp->if_flags & IFF_RUNNING) == 0) {
644 /*
645 * If interface it marked up and it is stopped, then
646 * start it.
647 */
648 qeinit(sc);
649 } else if ((ifp->if_flags & IFF_UP) != 0) {
650 /*
651 * Send a new setup packet to match any new changes.
652 * (Like IFF_PROMISC etc)
653 */
654 qe_setup(sc);
655 }
656 break;
657
658 case SIOCADDMULTI:
659 case SIOCDELMULTI:
660 /*
661 * Update our multicast list.
662 */
663 error = (cmd == SIOCADDMULTI) ?
664 ether_addmulti(ifr, &sc->sc_ec):
665 ether_delmulti(ifr, &sc->sc_ec);
666
667 if (error == ENETRESET) {
668 /*
669 * Multicast list has changed; set the hardware filter
670 * accordingly.
671 */
672 qe_setup(sc);
673 error = 0;
674 }
675 break;
676
677 default:
678 error = EINVAL;
679
680 }
681 splx(s);
682 return (error);
683 }
684
685 /*
686 * Add a receive buffer to the indicated descriptor.
687 */
688 int
689 qe_add_rxbuf(struct qe_softc *sc, int i)
690 {
691 struct mbuf *m;
692 struct qe_ring *rp;
693 vaddr_t addr;
694 int error;
695
696 MGETHDR(m, M_DONTWAIT, MT_DATA);
697 if (m == NULL)
698 return (ENOBUFS);
699
700 MCLGET(m, M_DONTWAIT);
701 if ((m->m_flags & M_EXT) == 0) {
702 m_freem(m);
703 return (ENOBUFS);
704 }
705
706 if (sc->sc_rxmbuf[i] != NULL)
707 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
708
709 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
710 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
711 if (error)
712 panic("%s: can't load rx DMA map %d, error = %d",
713 sc->sc_dev.dv_xname, i, error);
714 sc->sc_rxmbuf[i] = m;
715
716 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
717 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
718
719 /*
720 * We know that the mbuf cluster is page aligned. Also, be sure
721 * that the IP header will be longword aligned.
722 */
723 m->m_data += 2;
724 addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
725 rp = &sc->sc_qedata->qc_recv[i];
726 rp->qe_flag = rp->qe_status1 = QE_NOTYET;
727 rp->qe_addr_lo = LOWORD(addr);
728 rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
729 rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
730
731 return (0);
732 }
733
734 /*
735 * Create a setup packet and put in queue for sending.
736 */
737 void
738 qe_setup(struct qe_softc *sc)
739 {
740 struct ether_multi *enm;
741 struct ether_multistep step;
742 struct qe_cdata *qc = sc->sc_qedata;
743 struct ifnet *ifp = &sc->sc_if;
744 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
745 int i, j, k, idx, s;
746
747 s = splnet();
748 if (sc->sc_inq == (TXDESCS - 1)) {
749 sc->sc_setup = 1;
750 splx(s);
751 return;
752 }
753 sc->sc_setup = 0;
754 /*
755 * Init the setup packet with valid info.
756 */
757 memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
758 for (i = 0; i < ETHER_ADDR_LEN; i++)
759 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
760
761 /*
762 * Multicast handling. The DEQNA can handle up to 12 direct
763 * ethernet addresses.
764 */
765 j = 3; k = 0;
766 ifp->if_flags &= ~IFF_ALLMULTI;
767 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
768 while (enm != NULL) {
769 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
770 ifp->if_flags |= IFF_ALLMULTI;
771 break;
772 }
773 for (i = 0; i < ETHER_ADDR_LEN; i++)
774 qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
775 j++;
776 if (j == 8) {
777 j = 1; k += 64;
778 }
779 if (k > 64) {
780 ifp->if_flags |= IFF_ALLMULTI;
781 break;
782 }
783 ETHER_NEXT_MULTI(step, enm);
784 }
785 idx = sc->sc_nexttx;
786 qc->qc_xmit[idx].qe_buf_len = -64;
787
788 /*
789 * How is the DEQNA turned in ALLMULTI mode???
790 * Until someone tells me, fall back to PROMISC when more than
791 * 12 ethernet addresses.
792 */
793 if (ifp->if_flags & IFF_ALLMULTI)
794 ifp->if_flags |= IFF_PROMISC;
795 else if (ifp->if_pcount == 0)
796 ifp->if_flags &= ~IFF_PROMISC;
797 if (ifp->if_flags & IFF_PROMISC)
798 qc->qc_xmit[idx].qe_buf_len = -65;
799
800 qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
801 qc->qc_xmit[idx].qe_addr_hi =
802 HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
803 qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
804 qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
805
806 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
807 QE_WCSR(QE_CSR_XMTL,
808 LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
809 QE_WCSR(QE_CSR_XMTH,
810 HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
811 }
812
813 sc->sc_inq++;
814 if (++sc->sc_nexttx == TXDESCS)
815 sc->sc_nexttx = 0;
816 splx(s);
817 }
818
819 /*
820 * Check for dead transmit logic. Not uncommon.
821 */
822 void
823 qetimeout(struct ifnet *ifp)
824 {
825 struct qe_softc *sc = ifp->if_softc;
826
827 if (sc->sc_inq == 0)
828 return;
829
830 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
831 /*
832 * Do a reset of interface, to get it going again.
833 * Will it work by just restart the transmit logic?
834 */
835 qeinit(sc);
836 }
837