if_qe.c revision 1.70 1 /* $NetBSD: if_qe.c,v 1.70 2010/01/19 22:07:43 pooka Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 * Handle ubaresets. Does not work at all right now.
36 * Fix ALLMULTI reception. But someone must tell me how...
37 * Collect statistics.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.70 2010/01/19 22:07:43 pooka Exp $");
42
43 #include "opt_inet.h"
44
45 #include <sys/param.h>
46 #include <sys/mbuf.h>
47 #include <sys/socket.h>
48 #include <sys/device.h>
49 #include <sys/systm.h>
50 #include <sys/sockio.h>
51
52 #include <net/if.h>
53 #include <net/if_ether.h>
54 #include <net/if_dl.h>
55
56 #include <netinet/in.h>
57 #include <netinet/if_inarp.h>
58
59 #include <net/bpf.h>
60 #include <net/bpfdesc.h>
61
62 #include <sys/bus.h>
63
64 #include <dev/qbus/ubavar.h>
65 #include <dev/qbus/if_qereg.h>
66
67 #include "ioconf.h"
68
69 #define RXDESCS 30 /* # of receive descriptors */
70 #define TXDESCS 60 /* # transmit descs */
71
72 /*
73 * Structure containing the elements that must be in DMA-safe memory.
74 */
75 struct qe_cdata {
76 struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
77 struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
78 u_int8_t qc_setup[128]; /* Setup packet layout */
79 };
80
81 struct qe_softc {
82 device_t sc_dev; /* Configuration common part */
83 struct uba_softc *sc_uh; /* our parent */
84 struct evcnt sc_intrcnt; /* Interrupt counting */
85 struct ethercom sc_ec; /* Ethernet common part */
86 #define sc_if sc_ec.ec_if /* network-visible interface */
87 bus_space_tag_t sc_iot;
88 bus_addr_t sc_ioh;
89 bus_dma_tag_t sc_dmat;
90 struct qe_cdata *sc_qedata; /* Descriptor struct */
91 struct qe_cdata *sc_pqedata; /* Unibus address of above */
92 struct mbuf* sc_txmbuf[TXDESCS];
93 struct mbuf* sc_rxmbuf[RXDESCS];
94 bus_dmamap_t sc_xmtmap[TXDESCS];
95 bus_dmamap_t sc_rcvmap[RXDESCS];
96 bus_dmamap_t sc_nulldmamap; /* ethernet padding buffer */
97 struct ubinfo sc_ui;
98 int sc_intvec; /* Interrupt vector */
99 int sc_nexttx;
100 int sc_inq;
101 int sc_lastack;
102 int sc_nextrx;
103 int sc_setup; /* Setup packet in queue */
104 };
105
106 static int qematch(device_t, cfdata_t, void *);
107 static void qeattach(device_t, device_t, void *);
108 static void qeinit(struct qe_softc *);
109 static void qestart(struct ifnet *);
110 static void qeintr(void *);
111 static int qeioctl(struct ifnet *, u_long, void *);
112 static int qe_add_rxbuf(struct qe_softc *, int);
113 static void qe_setup(struct qe_softc *);
114 static void qetimeout(struct ifnet *);
115
116 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
117 qematch, qeattach, NULL, NULL);
118
119 #define QE_WCSR(csr, val) \
120 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
121 #define QE_RCSR(csr) \
122 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
123
124 #define LOWORD(x) ((int)(x) & 0xffff)
125 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
126
127 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
128
129 /*
130 * Check for present DEQNA. Done by sending a fake setup packet
131 * and wait for interrupt.
132 */
133 int
134 qematch(device_t parent, cfdata_t cf, void *aux)
135 {
136 struct qe_softc ssc;
137 struct qe_softc *sc = &ssc;
138 struct uba_attach_args *ua = aux;
139 struct uba_softc *uh = device_private(parent);
140 struct ubinfo ui;
141
142 #define PROBESIZE 4096
143 struct qe_ring *ring;
144 struct qe_ring *rp;
145 int error;
146
147 ring = malloc(PROBESIZE, M_TEMP, M_WAITOK|M_ZERO);
148 memset(sc, 0, sizeof(*sc));
149 sc->sc_iot = ua->ua_iot;
150 sc->sc_ioh = ua->ua_ioh;
151 sc->sc_dmat = ua->ua_dmat;
152
153 uh->uh_lastiv -= 4;
154 QE_WCSR(QE_CSR_CSR, QE_RESET);
155 QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
156
157 /*
158 * Map the ring area. Actually this is done only to be able to
159 * send and receive a internal packet; some junk is loopbacked
160 * so that the DEQNA has a reason to interrupt.
161 */
162 ui.ui_size = PROBESIZE;
163 ui.ui_vaddr = (void *)&ring[0];
164 if ((error = uballoc(uh, &ui, UBA_CANTWAIT)))
165 return 0;
166
167 /*
168 * Init a simple "fake" receive and transmit descriptor that
169 * points to some unused area. Send a fake setup packet.
170 */
171 rp = (void *)ui.ui_baddr;
172 ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
173 ring[0].qe_addr_lo = LOWORD(&rp[4]);
174 ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
175 ring[0].qe_buf_len = -64;
176
177 ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
178 ring[2].qe_addr_lo = LOWORD(&rp[4]);
179 ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
180 ring[2].qe_buf_len = -(1500/2);
181
182 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
183 DELAY(1000);
184
185 /*
186 * Start the interface and wait for the packet.
187 */
188 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
189 QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
190 QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
191 QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
192 QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
193 DELAY(10000);
194
195 /*
196 * All done with the bus resources.
197 */
198 ubfree(uh, &ui);
199 free(ring, M_TEMP);
200 return 1;
201 }
202
203 /*
204 * Interface exists: make available by filling in network interface
205 * record. System will initialize the interface when it is ready
206 * to accept packets.
207 */
208 void
209 qeattach(device_t parent, device_t self, void *aux)
210 {
211 struct uba_attach_args *ua = aux;
212 struct qe_softc *sc = device_private(self);
213 struct ifnet *ifp = &sc->sc_if;
214 struct qe_ring *rp;
215 u_int8_t enaddr[ETHER_ADDR_LEN];
216 int i, error;
217 char *nullbuf;
218
219 sc->sc_dev = self;
220 sc->sc_uh = device_private(parent);
221 sc->sc_iot = ua->ua_iot;
222 sc->sc_ioh = ua->ua_ioh;
223 sc->sc_dmat = ua->ua_dmat;
224
225 /*
226 * Allocate DMA safe memory for descriptors and setup memory.
227 */
228
229 sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
230 if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
231 aprint_error(": unable to ubmemalloc(), error = %d\n", error);
232 return;
233 }
234 sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
235 sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
236
237 /*
238 * Zero the newly allocated memory.
239 */
240 memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
241 nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
242 /*
243 * Create the transmit descriptor DMA maps. We take advantage
244 * of the fact that the Qbus address space is big, and therefore
245 * allocate map registers for all transmit descriptors also,
246 * so that we can avoid this each time we send a packet.
247 */
248 for (i = 0; i < TXDESCS; i++) {
249 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
250 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
251 &sc->sc_xmtmap[i]))) {
252 aprint_error(
253 ": unable to create tx DMA map %d, error = %d\n",
254 i, error);
255 goto fail_4;
256 }
257 }
258
259 /*
260 * Create receive buffer DMA maps.
261 */
262 for (i = 0; i < RXDESCS; i++) {
263 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
264 MCLBYTES, 0, BUS_DMA_NOWAIT,
265 &sc->sc_rcvmap[i]))) {
266 aprint_error(
267 ": unable to create rx DMA map %d, error = %d\n",
268 i, error);
269 goto fail_5;
270 }
271 }
272 /*
273 * Pre-allocate the receive buffers.
274 */
275 for (i = 0; i < RXDESCS; i++) {
276 if ((error = qe_add_rxbuf(sc, i)) != 0) {
277 aprint_error(
278 ": unable to allocate or map rx buffer %d,"
279 " error = %d\n", i, error);
280 goto fail_6;
281 }
282 }
283
284 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
285 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
286 aprint_error(
287 ": unable to create pad buffer DMA map, error = %d\n",
288 error);
289 goto fail_6;
290 }
291 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
292 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
293 aprint_error(
294 ": unable to load pad buffer DMA map, error = %d\n",
295 error);
296 goto fail_7;
297 }
298 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
299 BUS_DMASYNC_PREWRITE);
300
301 /*
302 * Create ring loops of the buffer chains.
303 * This is only done once.
304 */
305
306 rp = sc->sc_qedata->qc_recv;
307 rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
308 rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
309 QE_VALID | QE_CHAIN;
310 rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
311
312 rp = sc->sc_qedata->qc_xmit;
313 rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
314 rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
315 QE_VALID | QE_CHAIN;
316 rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
317
318 /*
319 * Get the vector that were set at match time, and remember it.
320 */
321 sc->sc_intvec = sc->sc_uh->uh_lastiv;
322 QE_WCSR(QE_CSR_CSR, QE_RESET);
323 DELAY(1000);
324 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
325
326 /*
327 * Read out ethernet address and tell which type this card is.
328 */
329 for (i = 0; i < 6; i++)
330 enaddr[i] = QE_RCSR(i * 2) & 0xff;
331
332 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
333 aprint_normal(": %s, hardware address %s\n",
334 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
335 ether_sprintf(enaddr));
336
337 QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
338
339 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
340 sc, &sc->sc_intrcnt);
341 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
342 device_xname(sc->sc_dev), "intr");
343
344 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
345 ifp->if_softc = sc;
346 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
347 ifp->if_start = qestart;
348 ifp->if_ioctl = qeioctl;
349 ifp->if_watchdog = qetimeout;
350 IFQ_SET_READY(&ifp->if_snd);
351
352 /*
353 * Attach the interface.
354 */
355 if_attach(ifp);
356 ether_ifattach(ifp, enaddr);
357
358 return;
359
360 /*
361 * Free any resources we've allocated during the failed attach
362 * attempt. Do this in reverse order and fall through.
363 */
364 fail_7:
365 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
366 fail_6:
367 for (i = 0; i < RXDESCS; i++) {
368 if (sc->sc_rxmbuf[i] != NULL) {
369 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
370 m_freem(sc->sc_rxmbuf[i]);
371 }
372 }
373 fail_5:
374 for (i = 0; i < RXDESCS; i++) {
375 if (sc->sc_xmtmap[i] != NULL)
376 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
377 }
378 fail_4:
379 for (i = 0; i < TXDESCS; i++) {
380 if (sc->sc_rcvmap[i] != NULL)
381 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
382 }
383 }
384
385 /*
386 * Initialization of interface.
387 */
388 void
389 qeinit(struct qe_softc *sc)
390 {
391 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
392 struct qe_cdata *qc = sc->sc_qedata;
393 int i;
394
395
396 /*
397 * Reset the interface.
398 */
399 QE_WCSR(QE_CSR_CSR, QE_RESET);
400 DELAY(1000);
401 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
402 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
403
404 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
405 /*
406 * Release and init transmit descriptors.
407 */
408 for (i = 0; i < TXDESCS; i++) {
409 if (sc->sc_txmbuf[i]) {
410 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
411 m_freem(sc->sc_txmbuf[i]);
412 sc->sc_txmbuf[i] = 0;
413 }
414 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
415 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
416 }
417
418
419 /*
420 * Init receive descriptors.
421 */
422 for (i = 0; i < RXDESCS; i++)
423 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
424 sc->sc_nextrx = 0;
425
426 /*
427 * Write the descriptor addresses to the device.
428 * Receiving packets will be enabled in the interrupt routine.
429 */
430 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
431 QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
432 QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
433
434 ifp->if_flags |= IFF_RUNNING;
435 ifp->if_flags &= ~IFF_OACTIVE;
436
437 /*
438 * Send a setup frame.
439 * This will start the transmit machinery as well.
440 */
441 qe_setup(sc);
442
443 }
444
445 /*
446 * Start output on interface.
447 */
448 void
449 qestart(struct ifnet *ifp)
450 {
451 struct qe_softc *sc = ifp->if_softc;
452 struct qe_cdata *qc = sc->sc_qedata;
453 paddr_t buffer;
454 struct mbuf *m, *m0;
455 int idx, len, s, i, totlen, buflen, error;
456 short orword, csr;
457
458 if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
459 return;
460
461 s = splnet();
462 while (sc->sc_inq < (TXDESCS - 1)) {
463
464 if (sc->sc_setup) {
465 qe_setup(sc);
466 continue;
467 }
468 idx = sc->sc_nexttx;
469 IFQ_POLL(&ifp->if_snd, m);
470 if (m == 0)
471 goto out;
472 /*
473 * Count number of mbufs in chain.
474 * Always do DMA directly from mbufs, therefore the transmit
475 * ring is really big.
476 */
477 for (m0 = m, i = 0; m0; m0 = m0->m_next)
478 if (m0->m_len)
479 i++;
480 if (m->m_pkthdr.len < ETHER_PAD_LEN) {
481 buflen = ETHER_PAD_LEN;
482 i++;
483 } else
484 buflen = m->m_pkthdr.len;
485 if (i >= TXDESCS)
486 panic("qestart");
487
488 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
489 ifp->if_flags |= IFF_OACTIVE;
490 goto out;
491 }
492
493 IFQ_DEQUEUE(&ifp->if_snd, m);
494
495 if (ifp->if_bpf)
496 bpf_ops->bpf_mtap(ifp->if_bpf, m);
497 /*
498 * m now points to a mbuf chain that can be loaded.
499 * Loop around and set it.
500 */
501 totlen = 0;
502 for (m0 = m; ; m0 = m0->m_next) {
503 if (m0) {
504 if (m0->m_len == 0)
505 continue;
506 error = bus_dmamap_load(sc->sc_dmat,
507 sc->sc_xmtmap[idx], mtod(m0, void *),
508 m0->m_len, 0, 0);
509 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
510 len = m0->m_len;
511 } else if (totlen < ETHER_PAD_LEN) {
512 buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
513 len = ETHER_PAD_LEN - totlen;
514 } else {
515 break;
516 }
517
518 totlen += len;
519 /* Word alignment calc */
520 orword = 0;
521 if (totlen == buflen) {
522 orword |= QE_EOMSG;
523 sc->sc_txmbuf[idx] = m;
524 }
525 if ((buffer & 1) || (len & 1))
526 len += 2;
527 if (buffer & 1)
528 orword |= QE_ODDBEGIN;
529 if ((buffer + len) & 1)
530 orword |= QE_ODDEND;
531 qc->qc_xmit[idx].qe_buf_len = -(len/2);
532 qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
533 qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
534 qc->qc_xmit[idx].qe_flag =
535 qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
536 qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
537 if (++idx == TXDESCS)
538 idx = 0;
539 sc->sc_inq++;
540 if (m0 == NULL)
541 break;
542 }
543 #ifdef DIAGNOSTIC
544 if (totlen != buflen)
545 panic("qestart: len fault");
546 #endif
547
548 /*
549 * Kick off the transmit logic, if it is stopped.
550 */
551 csr = QE_RCSR(QE_CSR_CSR);
552 if (csr & QE_XL_INVALID) {
553 QE_WCSR(QE_CSR_XMTL,
554 LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
555 QE_WCSR(QE_CSR_XMTH,
556 HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
557 }
558 sc->sc_nexttx = idx;
559 }
560 if (sc->sc_inq == (TXDESCS - 1))
561 ifp->if_flags |= IFF_OACTIVE;
562
563 out: if (sc->sc_inq)
564 ifp->if_timer = 5; /* If transmit logic dies */
565 splx(s);
566 }
567
568 static void
569 qeintr(void *arg)
570 {
571 struct qe_softc *sc = arg;
572 struct qe_cdata *qc = sc->sc_qedata;
573 struct ifnet *ifp = &sc->sc_if;
574 struct mbuf *m;
575 int csr, status1, status2, len;
576
577 csr = QE_RCSR(QE_CSR_CSR);
578
579 QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
580 QE_RCV_INT | QE_ILOOP);
581
582 if (csr & QE_RCV_INT)
583 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
584 status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
585 status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
586
587 m = sc->sc_rxmbuf[sc->sc_nextrx];
588 len = ((status1 & QE_RBL_HI) |
589 (status2 & QE_RBL_LO)) + 60;
590 qe_add_rxbuf(sc, sc->sc_nextrx);
591 m->m_pkthdr.rcvif = ifp;
592 m->m_pkthdr.len = m->m_len = len;
593 if (++sc->sc_nextrx == RXDESCS)
594 sc->sc_nextrx = 0;
595 if (ifp->if_bpf)
596 bpf_ops->bpf_mtap(ifp->if_bpf, m);
597 if ((status1 & QE_ESETUP) == 0)
598 (*ifp->if_input)(ifp, m);
599 else
600 m_freem(m);
601 }
602
603 if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
604 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
605 int idx = sc->sc_lastack;
606
607 sc->sc_inq--;
608 if (++sc->sc_lastack == TXDESCS)
609 sc->sc_lastack = 0;
610
611 /* XXX collect statistics */
612 qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
613 qc->qc_xmit[idx].qe_status1 =
614 qc->qc_xmit[idx].qe_flag = QE_NOTYET;
615
616 if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
617 continue;
618 if (sc->sc_txmbuf[idx] == NULL ||
619 sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
620 bus_dmamap_unload(sc->sc_dmat,
621 sc->sc_xmtmap[idx]);
622 if (sc->sc_txmbuf[idx]) {
623 m_freem(sc->sc_txmbuf[idx]);
624 sc->sc_txmbuf[idx] = NULL;
625 }
626 }
627 ifp->if_timer = 0;
628 ifp->if_flags &= ~IFF_OACTIVE;
629 qestart(ifp); /* Put in more in queue */
630 }
631 /*
632 * How can the receive list get invalid???
633 * Verified that it happens anyway.
634 */
635 if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
636 (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
637 QE_WCSR(QE_CSR_RCLL,
638 LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
639 QE_WCSR(QE_CSR_RCLH,
640 HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
641 }
642 }
643
644 /*
645 * Process an ioctl request.
646 */
647 int
648 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
649 {
650 struct qe_softc *sc = ifp->if_softc;
651 struct ifaddr *ifa = (struct ifaddr *)data;
652 int s = splnet(), error = 0;
653
654 switch (cmd) {
655
656 case SIOCINITIFADDR:
657 ifp->if_flags |= IFF_UP;
658 switch(ifa->ifa_addr->sa_family) {
659 #ifdef INET
660 case AF_INET:
661 qeinit(sc);
662 arp_ifinit(ifp, ifa);
663 break;
664 #endif
665 }
666 break;
667
668 case SIOCSIFFLAGS:
669 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
670 break;
671 /* XXX re-use ether_ioctl() */
672 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
673 case IFF_RUNNING:
674 /*
675 * If interface is marked down and it is running,
676 * stop it. (by disabling receive mechanism).
677 */
678 QE_WCSR(QE_CSR_CSR,
679 QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
680 ifp->if_flags &= ~IFF_RUNNING;
681 break;
682 case IFF_UP:
683 /*
684 * If interface it marked up and it is stopped, then
685 * start it.
686 */
687 qeinit(sc);
688 break;
689 case IFF_UP|IFF_RUNNING:
690 /*
691 * Send a new setup packet to match any new changes.
692 * (Like IFF_PROMISC etc)
693 */
694 qe_setup(sc);
695 break;
696 case 0:
697 break;
698 }
699 break;
700
701 case SIOCADDMULTI:
702 case SIOCDELMULTI:
703 /*
704 * Update our multicast list.
705 */
706 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
707 /*
708 * Multicast list has changed; set the hardware filter
709 * accordingly.
710 */
711 if (ifp->if_flags & IFF_RUNNING)
712 qe_setup(sc);
713 error = 0;
714 }
715 break;
716
717 default:
718 error = ether_ioctl(ifp, cmd, data);
719 }
720 splx(s);
721 return (error);
722 }
723
724 /*
725 * Add a receive buffer to the indicated descriptor.
726 */
727 int
728 qe_add_rxbuf(struct qe_softc *sc, int i)
729 {
730 struct mbuf *m;
731 struct qe_ring *rp;
732 vaddr_t addr;
733 int error;
734
735 MGETHDR(m, M_DONTWAIT, MT_DATA);
736 if (m == NULL)
737 return (ENOBUFS);
738
739 MCLGET(m, M_DONTWAIT);
740 if ((m->m_flags & M_EXT) == 0) {
741 m_freem(m);
742 return (ENOBUFS);
743 }
744
745 if (sc->sc_rxmbuf[i] != NULL)
746 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
747
748 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
749 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
750 if (error)
751 panic("%s: can't load rx DMA map %d, error = %d",
752 device_xname(sc->sc_dev), i, error);
753 sc->sc_rxmbuf[i] = m;
754
755 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
756 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
757
758 /*
759 * We know that the mbuf cluster is page aligned. Also, be sure
760 * that the IP header will be longword aligned.
761 */
762 m->m_data += 2;
763 addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
764 rp = &sc->sc_qedata->qc_recv[i];
765 rp->qe_flag = rp->qe_status1 = QE_NOTYET;
766 rp->qe_addr_lo = LOWORD(addr);
767 rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
768 rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
769
770 return (0);
771 }
772
773 /*
774 * Create a setup packet and put in queue for sending.
775 */
776 void
777 qe_setup(struct qe_softc *sc)
778 {
779 struct ether_multi *enm;
780 struct ether_multistep step;
781 struct qe_cdata *qc = sc->sc_qedata;
782 struct ifnet *ifp = &sc->sc_if;
783 u_int8_t enaddr[ETHER_ADDR_LEN];
784 int i, j, k, idx, s;
785
786 s = splnet();
787 if (sc->sc_inq == (TXDESCS - 1)) {
788 sc->sc_setup = 1;
789 splx(s);
790 return;
791 }
792 sc->sc_setup = 0;
793 /*
794 * Init the setup packet with valid info.
795 */
796 memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
797 memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
798 for (i = 0; i < ETHER_ADDR_LEN; i++)
799 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
800
801 /*
802 * Multicast handling. The DEQNA can handle up to 12 direct
803 * ethernet addresses.
804 */
805 j = 3; k = 0;
806 ifp->if_flags &= ~IFF_ALLMULTI;
807 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
808 while (enm != NULL) {
809 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
810 ifp->if_flags |= IFF_ALLMULTI;
811 break;
812 }
813 for (i = 0; i < ETHER_ADDR_LEN; i++)
814 qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
815 j++;
816 if (j == 8) {
817 j = 1; k += 64;
818 }
819 if (k > 64) {
820 ifp->if_flags |= IFF_ALLMULTI;
821 break;
822 }
823 ETHER_NEXT_MULTI(step, enm);
824 }
825 idx = sc->sc_nexttx;
826 qc->qc_xmit[idx].qe_buf_len = -64;
827
828 /*
829 * How is the DEQNA turned in ALLMULTI mode???
830 * Until someone tells me, fall back to PROMISC when more than
831 * 12 ethernet addresses.
832 */
833 if (ifp->if_flags & IFF_ALLMULTI)
834 ifp->if_flags |= IFF_PROMISC;
835 else if (ifp->if_pcount == 0)
836 ifp->if_flags &= ~IFF_PROMISC;
837 if (ifp->if_flags & IFF_PROMISC)
838 qc->qc_xmit[idx].qe_buf_len = -65;
839
840 qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
841 qc->qc_xmit[idx].qe_addr_hi =
842 HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
843 qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
844 qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
845
846 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
847 QE_WCSR(QE_CSR_XMTL,
848 LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
849 QE_WCSR(QE_CSR_XMTH,
850 HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
851 }
852
853 sc->sc_inq++;
854 if (++sc->sc_nexttx == TXDESCS)
855 sc->sc_nexttx = 0;
856 splx(s);
857 }
858
859 /*
860 * Check for dead transmit logic. Not uncommon.
861 */
862 void
863 qetimeout(struct ifnet *ifp)
864 {
865 struct qe_softc *sc = ifp->if_softc;
866
867 if (sc->sc_inq == 0)
868 return;
869
870 aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
871 /*
872 * Do a reset of interface, to get it going again.
873 * Will it work by just restart the transmit logic?
874 */
875 qeinit(sc);
876 }
877