if_qe.c revision 1.73.2.4 1 /* $NetBSD: if_qe.c,v 1.73.2.4 2017/08/28 17:52:26 skrll Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 /*
27 * Driver for DEQNA/DELQA ethernet cards.
28 * Things that is still to do:
29 * Handle ubaresets. Does not work at all right now.
30 * Fix ALLMULTI reception. But someone must tell me how...
31 * Collect statistics.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.73.2.4 2017/08/28 17:52:26 skrll Exp $");
36
37 #include "opt_inet.h"
38
39 #include <sys/param.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/device.h>
43 #include <sys/systm.h>
44 #include <sys/sockio.h>
45
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_dl.h>
49
50 #include <netinet/in.h>
51 #include <netinet/if_inarp.h>
52
53 #include <net/bpf.h>
54 #include <net/bpfdesc.h>
55
56 #include <sys/bus.h>
57
58 #include <dev/qbus/ubavar.h>
59 #include <dev/qbus/if_qereg.h>
60
61 #include "ioconf.h"
62
63 #define RXDESCS 30 /* # of receive descriptors */
64 #define TXDESCS 60 /* # transmit descs */
65
66 /*
67 * Structure containing the elements that must be in DMA-safe memory.
68 */
69 struct qe_cdata {
70 struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
71 struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
72 u_int8_t qc_setup[128]; /* Setup packet layout */
73 };
74
75 struct qe_softc {
76 device_t sc_dev; /* Configuration common part */
77 struct uba_softc *sc_uh; /* our parent */
78 struct evcnt sc_intrcnt; /* Interrupt counting */
79 struct ethercom sc_ec; /* Ethernet common part */
80 #define sc_if sc_ec.ec_if /* network-visible interface */
81 bus_space_tag_t sc_iot;
82 bus_addr_t sc_ioh;
83 bus_dma_tag_t sc_dmat;
84 struct qe_cdata *sc_qedata; /* Descriptor struct */
85 struct qe_cdata *sc_pqedata; /* Unibus address of above */
86 struct mbuf* sc_txmbuf[TXDESCS];
87 struct mbuf* sc_rxmbuf[RXDESCS];
88 bus_dmamap_t sc_xmtmap[TXDESCS];
89 bus_dmamap_t sc_rcvmap[RXDESCS];
90 bus_dmamap_t sc_nulldmamap; /* ethernet padding buffer */
91 struct ubinfo sc_ui;
92 int sc_intvec; /* Interrupt vector */
93 int sc_nexttx;
94 int sc_inq;
95 int sc_lastack;
96 int sc_nextrx;
97 int sc_setup; /* Setup packet in queue */
98 };
99
100 static int qematch(device_t, cfdata_t, void *);
101 static void qeattach(device_t, device_t, void *);
102 static void qeinit(struct qe_softc *);
103 static void qestart(struct ifnet *);
104 static void qeintr(void *);
105 static int qeioctl(struct ifnet *, u_long, void *);
106 static int qe_add_rxbuf(struct qe_softc *, int);
107 static void qe_setup(struct qe_softc *);
108 static void qetimeout(struct ifnet *);
109
110 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
111 qematch, qeattach, NULL, NULL);
112
113 #define QE_WCSR(csr, val) \
114 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
115 #define QE_RCSR(csr) \
116 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
117
118 #define LOWORD(x) ((int)(x) & 0xffff)
119 #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
120
121 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
122
123 /*
124 * Check for present DEQNA. Done by sending a fake setup packet
125 * and wait for interrupt.
126 */
127 int
128 qematch(device_t parent, cfdata_t cf, void *aux)
129 {
130 struct qe_softc ssc;
131 struct qe_softc *sc = &ssc;
132 struct uba_attach_args *ua = aux;
133 struct uba_softc *uh = device_private(parent);
134 struct ubinfo ui;
135
136 #define PROBESIZE 4096
137 struct qe_ring *ring;
138 struct qe_ring *rp;
139 int error, match;
140
141 ring = malloc(PROBESIZE, M_TEMP, M_WAITOK|M_ZERO);
142 memset(sc, 0, sizeof(*sc));
143 sc->sc_iot = ua->ua_iot;
144 sc->sc_ioh = ua->ua_ioh;
145 sc->sc_dmat = ua->ua_dmat;
146
147 uh->uh_lastiv -= 4;
148 QE_WCSR(QE_CSR_CSR, QE_RESET);
149 QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
150
151 /*
152 * Map the ring area. Actually this is done only to be able to
153 * send and receive a internal packet; some junk is loopbacked
154 * so that the DEQNA has a reason to interrupt.
155 */
156 ui.ui_size = PROBESIZE;
157 ui.ui_vaddr = (void *)&ring[0];
158 if ((error = uballoc(uh, &ui, UBA_CANTWAIT))) {
159 match = 0;
160 goto out0;
161 }
162
163 /*
164 * Init a simple "fake" receive and transmit descriptor that
165 * points to some unused area. Send a fake setup packet.
166 */
167 rp = (void *)ui.ui_baddr;
168 ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
169 ring[0].qe_addr_lo = LOWORD(&rp[4]);
170 ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
171 ring[0].qe_buf_len = -64;
172
173 ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
174 ring[2].qe_addr_lo = LOWORD(&rp[4]);
175 ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
176 ring[2].qe_buf_len = -(1500/2);
177
178 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
179 DELAY(1000);
180
181 /*
182 * Start the interface and wait for the packet.
183 */
184 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
185 QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
186 QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
187 QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
188 QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
189 DELAY(10000);
190
191 match = 1;
192
193 /*
194 * All done with the bus resources.
195 */
196 ubfree(uh, &ui);
197 out0: free(ring, M_TEMP);
198 return match;
199 }
200
201 /*
202 * Interface exists: make available by filling in network interface
203 * record. System will initialize the interface when it is ready
204 * to accept packets.
205 */
206 void
207 qeattach(device_t parent, device_t self, void *aux)
208 {
209 struct uba_attach_args *ua = aux;
210 struct qe_softc *sc = device_private(self);
211 struct ifnet *ifp = &sc->sc_if;
212 struct qe_ring *rp;
213 u_int8_t enaddr[ETHER_ADDR_LEN];
214 int i, error;
215 char *nullbuf;
216
217 sc->sc_dev = self;
218 sc->sc_uh = device_private(parent);
219 sc->sc_iot = ua->ua_iot;
220 sc->sc_ioh = ua->ua_ioh;
221 sc->sc_dmat = ua->ua_dmat;
222
223 /*
224 * Allocate DMA safe memory for descriptors and setup memory.
225 */
226
227 sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
228 if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
229 aprint_error(": unable to ubmemalloc(), error = %d\n", error);
230 return;
231 }
232 sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
233 sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
234
235 /*
236 * Zero the newly allocated memory.
237 */
238 memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
239 nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
240 /*
241 * Create the transmit descriptor DMA maps. We take advantage
242 * of the fact that the Qbus address space is big, and therefore
243 * allocate map registers for all transmit descriptors also,
244 * so that we can avoid this each time we send a packet.
245 */
246 for (i = 0; i < TXDESCS; i++) {
247 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
248 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
249 &sc->sc_xmtmap[i]))) {
250 aprint_error(
251 ": unable to create tx DMA map %d, error = %d\n",
252 i, error);
253 goto fail_4;
254 }
255 }
256
257 /*
258 * Create receive buffer DMA maps.
259 */
260 for (i = 0; i < RXDESCS; i++) {
261 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
262 MCLBYTES, 0, BUS_DMA_NOWAIT,
263 &sc->sc_rcvmap[i]))) {
264 aprint_error(
265 ": unable to create rx DMA map %d, error = %d\n",
266 i, error);
267 goto fail_5;
268 }
269 }
270 /*
271 * Pre-allocate the receive buffers.
272 */
273 for (i = 0; i < RXDESCS; i++) {
274 if ((error = qe_add_rxbuf(sc, i)) != 0) {
275 aprint_error(
276 ": unable to allocate or map rx buffer %d,"
277 " error = %d\n", i, error);
278 goto fail_6;
279 }
280 }
281
282 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
283 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
284 aprint_error(
285 ": unable to create pad buffer DMA map, error = %d\n",
286 error);
287 goto fail_6;
288 }
289 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
290 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
291 aprint_error(
292 ": unable to load pad buffer DMA map, error = %d\n",
293 error);
294 goto fail_7;
295 }
296 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
297 BUS_DMASYNC_PREWRITE);
298
299 /*
300 * Create ring loops of the buffer chains.
301 * This is only done once.
302 */
303
304 rp = sc->sc_qedata->qc_recv;
305 rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
306 rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
307 QE_VALID | QE_CHAIN;
308 rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
309
310 rp = sc->sc_qedata->qc_xmit;
311 rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
312 rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
313 QE_VALID | QE_CHAIN;
314 rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
315
316 /*
317 * Get the vector that were set at match time, and remember it.
318 */
319 sc->sc_intvec = sc->sc_uh->uh_lastiv;
320 QE_WCSR(QE_CSR_CSR, QE_RESET);
321 DELAY(1000);
322 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
323
324 /*
325 * Read out ethernet address and tell which type this card is.
326 */
327 for (i = 0; i < 6; i++)
328 enaddr[i] = QE_RCSR(i * 2) & 0xff;
329
330 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
331 aprint_normal(": %s, hardware address %s\n",
332 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
333 ether_sprintf(enaddr));
334
335 QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
336
337 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
338 sc, &sc->sc_intrcnt);
339 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
340 device_xname(sc->sc_dev), "intr");
341
342 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
343 ifp->if_softc = sc;
344 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
345 ifp->if_start = qestart;
346 ifp->if_ioctl = qeioctl;
347 ifp->if_watchdog = qetimeout;
348 IFQ_SET_READY(&ifp->if_snd);
349
350 /*
351 * Attach the interface.
352 */
353 if_attach(ifp);
354 ether_ifattach(ifp, enaddr);
355
356 return;
357
358 /*
359 * Free any resources we've allocated during the failed attach
360 * attempt. Do this in reverse order and fall through.
361 */
362 fail_7:
363 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
364 fail_6:
365 for (i = 0; i < RXDESCS; i++) {
366 if (sc->sc_rxmbuf[i] != NULL) {
367 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
368 m_freem(sc->sc_rxmbuf[i]);
369 }
370 }
371 fail_5:
372 for (i = 0; i < RXDESCS; i++) {
373 if (sc->sc_rcvmap[i] != NULL)
374 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
375 }
376 fail_4:
377 for (i = 0; i < TXDESCS; i++) {
378 if (sc->sc_xmtmap[i] != NULL)
379 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
380 }
381 }
382
383 /*
384 * Initialization of interface.
385 */
386 void
387 qeinit(struct qe_softc *sc)
388 {
389 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
390 struct qe_cdata *qc = sc->sc_qedata;
391 int i;
392
393
394 /*
395 * Reset the interface.
396 */
397 QE_WCSR(QE_CSR_CSR, QE_RESET);
398 DELAY(1000);
399 QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
400 QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
401
402 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
403 /*
404 * Release and init transmit descriptors.
405 */
406 for (i = 0; i < TXDESCS; i++) {
407 if (sc->sc_txmbuf[i]) {
408 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
409 m_freem(sc->sc_txmbuf[i]);
410 sc->sc_txmbuf[i] = 0;
411 }
412 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
413 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
414 }
415
416
417 /*
418 * Init receive descriptors.
419 */
420 for (i = 0; i < RXDESCS; i++)
421 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
422 sc->sc_nextrx = 0;
423
424 /*
425 * Write the descriptor addresses to the device.
426 * Receiving packets will be enabled in the interrupt routine.
427 */
428 QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
429 QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
430 QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
431
432 ifp->if_flags |= IFF_RUNNING;
433 ifp->if_flags &= ~IFF_OACTIVE;
434
435 /*
436 * Send a setup frame.
437 * This will start the transmit machinery as well.
438 */
439 qe_setup(sc);
440
441 }
442
443 /*
444 * Start output on interface.
445 */
446 void
447 qestart(struct ifnet *ifp)
448 {
449 struct qe_softc *sc = ifp->if_softc;
450 struct qe_cdata *qc = sc->sc_qedata;
451 paddr_t buffer;
452 struct mbuf *m, *m0;
453 int idx, len, s, i, totlen, buflen;
454 short orword, csr;
455
456 if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
457 return;
458
459 s = splnet();
460 while (sc->sc_inq < (TXDESCS - 1)) {
461
462 if (sc->sc_setup) {
463 qe_setup(sc);
464 continue;
465 }
466 idx = sc->sc_nexttx;
467 IFQ_POLL(&ifp->if_snd, m);
468 if (m == 0)
469 goto out;
470 /*
471 * Count number of mbufs in chain.
472 * Always do DMA directly from mbufs, therefore the transmit
473 * ring is really big.
474 */
475 for (m0 = m, i = 0; m0; m0 = m0->m_next)
476 if (m0->m_len)
477 i++;
478 if (m->m_pkthdr.len < ETHER_PAD_LEN) {
479 buflen = ETHER_PAD_LEN;
480 i++;
481 } else
482 buflen = m->m_pkthdr.len;
483 if (i >= TXDESCS)
484 panic("qestart");
485
486 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
487 ifp->if_flags |= IFF_OACTIVE;
488 goto out;
489 }
490
491 IFQ_DEQUEUE(&ifp->if_snd, m);
492
493 bpf_mtap(ifp, m);
494 /*
495 * m now points to a mbuf chain that can be loaded.
496 * Loop around and set it.
497 */
498 totlen = 0;
499 for (m0 = m; ; m0 = m0->m_next) {
500 if (m0) {
501 if (m0->m_len == 0)
502 continue;
503 bus_dmamap_load(sc->sc_dmat,
504 sc->sc_xmtmap[idx], mtod(m0, void *),
505 m0->m_len, 0, 0);
506 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
507 len = m0->m_len;
508 } else if (totlen < ETHER_PAD_LEN) {
509 buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
510 len = ETHER_PAD_LEN - totlen;
511 } else {
512 break;
513 }
514
515 totlen += len;
516 /* Word alignment calc */
517 orword = 0;
518 if (totlen == buflen) {
519 orword |= QE_EOMSG;
520 sc->sc_txmbuf[idx] = m;
521 }
522 if ((buffer & 1) || (len & 1))
523 len += 2;
524 if (buffer & 1)
525 orword |= QE_ODDBEGIN;
526 if ((buffer + len) & 1)
527 orword |= QE_ODDEND;
528 qc->qc_xmit[idx].qe_buf_len = -(len/2);
529 qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
530 qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
531 qc->qc_xmit[idx].qe_flag =
532 qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
533 qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
534 if (++idx == TXDESCS)
535 idx = 0;
536 sc->sc_inq++;
537 if (m0 == NULL)
538 break;
539 }
540 #ifdef DIAGNOSTIC
541 if (totlen != buflen)
542 panic("qestart: len fault");
543 #endif
544
545 /*
546 * Kick off the transmit logic, if it is stopped.
547 */
548 csr = QE_RCSR(QE_CSR_CSR);
549 if (csr & QE_XL_INVALID) {
550 QE_WCSR(QE_CSR_XMTL,
551 LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
552 QE_WCSR(QE_CSR_XMTH,
553 HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
554 }
555 sc->sc_nexttx = idx;
556 }
557 if (sc->sc_inq == (TXDESCS - 1))
558 ifp->if_flags |= IFF_OACTIVE;
559
560 out: if (sc->sc_inq)
561 ifp->if_timer = 5; /* If transmit logic dies */
562 splx(s);
563 }
564
565 static void
566 qeintr(void *arg)
567 {
568 struct qe_softc *sc = arg;
569 struct qe_cdata *qc = sc->sc_qedata;
570 struct ifnet *ifp = &sc->sc_if;
571 struct mbuf *m;
572 int csr, status1, status2, len;
573
574 csr = QE_RCSR(QE_CSR_CSR);
575
576 QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
577 QE_RCV_INT | QE_ILOOP);
578
579 if (csr & QE_RCV_INT)
580 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
581 status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
582 status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
583
584 m = sc->sc_rxmbuf[sc->sc_nextrx];
585 len = ((status1 & QE_RBL_HI) |
586 (status2 & QE_RBL_LO)) + 60;
587 qe_add_rxbuf(sc, sc->sc_nextrx);
588 m_set_rcvif(m, ifp);
589 m->m_pkthdr.len = m->m_len = len;
590 if (++sc->sc_nextrx == RXDESCS)
591 sc->sc_nextrx = 0;
592 if ((status1 & QE_ESETUP) == 0)
593 if_percpuq_enqueue(ifp->if_percpuq, m);
594 else
595 m_freem(m);
596 }
597
598 if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
599 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
600 int idx = sc->sc_lastack;
601
602 sc->sc_inq--;
603 if (++sc->sc_lastack == TXDESCS)
604 sc->sc_lastack = 0;
605
606 /* XXX collect statistics */
607 qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
608 qc->qc_xmit[idx].qe_status1 =
609 qc->qc_xmit[idx].qe_flag = QE_NOTYET;
610
611 if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
612 continue;
613 if (sc->sc_txmbuf[idx] == NULL ||
614 sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
615 bus_dmamap_unload(sc->sc_dmat,
616 sc->sc_xmtmap[idx]);
617 if (sc->sc_txmbuf[idx]) {
618 m_freem(sc->sc_txmbuf[idx]);
619 sc->sc_txmbuf[idx] = NULL;
620 }
621 }
622 ifp->if_timer = 0;
623 ifp->if_flags &= ~IFF_OACTIVE;
624 qestart(ifp); /* Put in more in queue */
625 }
626 /*
627 * How can the receive list get invalid???
628 * Verified that it happens anyway.
629 */
630 if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
631 (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
632 QE_WCSR(QE_CSR_RCLL,
633 LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
634 QE_WCSR(QE_CSR_RCLH,
635 HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
636 }
637 }
638
639 /*
640 * Process an ioctl request.
641 */
642 int
643 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
644 {
645 struct qe_softc *sc = ifp->if_softc;
646 struct ifaddr *ifa = (struct ifaddr *)data;
647 int s = splnet(), error = 0;
648
649 switch (cmd) {
650
651 case SIOCINITIFADDR:
652 ifp->if_flags |= IFF_UP;
653 switch(ifa->ifa_addr->sa_family) {
654 #ifdef INET
655 case AF_INET:
656 qeinit(sc);
657 arp_ifinit(ifp, ifa);
658 break;
659 #endif
660 }
661 break;
662
663 case SIOCSIFFLAGS:
664 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
665 break;
666 /* XXX re-use ether_ioctl() */
667 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
668 case IFF_RUNNING:
669 /*
670 * If interface is marked down and it is running,
671 * stop it. (by disabling receive mechanism).
672 */
673 QE_WCSR(QE_CSR_CSR,
674 QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
675 ifp->if_flags &= ~IFF_RUNNING;
676 break;
677 case IFF_UP:
678 /*
679 * If interface it marked up and it is stopped, then
680 * start it.
681 */
682 qeinit(sc);
683 break;
684 case IFF_UP|IFF_RUNNING:
685 /*
686 * Send a new setup packet to match any new changes.
687 * (Like IFF_PROMISC etc)
688 */
689 qe_setup(sc);
690 break;
691 case 0:
692 break;
693 }
694 break;
695
696 case SIOCADDMULTI:
697 case SIOCDELMULTI:
698 /*
699 * Update our multicast list.
700 */
701 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
702 /*
703 * Multicast list has changed; set the hardware filter
704 * accordingly.
705 */
706 if (ifp->if_flags & IFF_RUNNING)
707 qe_setup(sc);
708 error = 0;
709 }
710 break;
711
712 default:
713 error = ether_ioctl(ifp, cmd, data);
714 }
715 splx(s);
716 return (error);
717 }
718
719 /*
720 * Add a receive buffer to the indicated descriptor.
721 */
722 int
723 qe_add_rxbuf(struct qe_softc *sc, int i)
724 {
725 struct mbuf *m;
726 struct qe_ring *rp;
727 vaddr_t addr;
728 int error;
729
730 MGETHDR(m, M_DONTWAIT, MT_DATA);
731 if (m == NULL)
732 return (ENOBUFS);
733
734 MCLGET(m, M_DONTWAIT);
735 if ((m->m_flags & M_EXT) == 0) {
736 m_freem(m);
737 return (ENOBUFS);
738 }
739
740 if (sc->sc_rxmbuf[i] != NULL)
741 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
742
743 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
744 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
745 if (error)
746 panic("%s: can't load rx DMA map %d, error = %d",
747 device_xname(sc->sc_dev), i, error);
748 sc->sc_rxmbuf[i] = m;
749
750 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
751 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
752
753 /*
754 * We know that the mbuf cluster is page aligned. Also, be sure
755 * that the IP header will be longword aligned.
756 */
757 m->m_data += 2;
758 addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
759 rp = &sc->sc_qedata->qc_recv[i];
760 rp->qe_flag = rp->qe_status1 = QE_NOTYET;
761 rp->qe_addr_lo = LOWORD(addr);
762 rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
763 rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
764
765 return (0);
766 }
767
768 /*
769 * Create a setup packet and put in queue for sending.
770 */
771 void
772 qe_setup(struct qe_softc *sc)
773 {
774 struct ether_multi *enm;
775 struct ether_multistep step;
776 struct qe_cdata *qc = sc->sc_qedata;
777 struct ifnet *ifp = &sc->sc_if;
778 u_int8_t enaddr[ETHER_ADDR_LEN];
779 int i, j, k, idx, s;
780
781 s = splnet();
782 if (sc->sc_inq == (TXDESCS - 1)) {
783 sc->sc_setup = 1;
784 splx(s);
785 return;
786 }
787 sc->sc_setup = 0;
788 /*
789 * Init the setup packet with valid info.
790 */
791 memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
792 memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
793 for (i = 0; i < ETHER_ADDR_LEN; i++)
794 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
795
796 /*
797 * Multicast handling. The DEQNA can handle up to 12 direct
798 * ethernet addresses.
799 */
800 j = 3; k = 0;
801 ifp->if_flags &= ~IFF_ALLMULTI;
802 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
803 while (enm != NULL) {
804 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
805 ifp->if_flags |= IFF_ALLMULTI;
806 break;
807 }
808 for (i = 0; i < ETHER_ADDR_LEN; i++)
809 qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
810 j++;
811 if (j == 8) {
812 j = 1; k += 64;
813 }
814 if (k > 64) {
815 ifp->if_flags |= IFF_ALLMULTI;
816 break;
817 }
818 ETHER_NEXT_MULTI(step, enm);
819 }
820 idx = sc->sc_nexttx;
821 qc->qc_xmit[idx].qe_buf_len = -64;
822
823 /*
824 * How is the DEQNA turned in ALLMULTI mode???
825 * Until someone tells me, fall back to PROMISC when more than
826 * 12 ethernet addresses.
827 */
828 if (ifp->if_flags & IFF_ALLMULTI)
829 ifp->if_flags |= IFF_PROMISC;
830 else if (ifp->if_pcount == 0)
831 ifp->if_flags &= ~IFF_PROMISC;
832 if (ifp->if_flags & IFF_PROMISC)
833 qc->qc_xmit[idx].qe_buf_len = -65;
834
835 qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
836 qc->qc_xmit[idx].qe_addr_hi =
837 HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
838 qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
839 qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
840
841 if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
842 QE_WCSR(QE_CSR_XMTL,
843 LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
844 QE_WCSR(QE_CSR_XMTH,
845 HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
846 }
847
848 sc->sc_inq++;
849 if (++sc->sc_nexttx == TXDESCS)
850 sc->sc_nexttx = 0;
851 splx(s);
852 }
853
854 /*
855 * Check for dead transmit logic. Not uncommon.
856 */
857 void
858 qetimeout(struct ifnet *ifp)
859 {
860 struct qe_softc *sc = ifp->if_softc;
861
862 if (sc->sc_inq == 0)
863 return;
864
865 aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
866 /*
867 * Do a reset of interface, to get it going again.
868 * Will it work by just restart the transmit logic?
869 */
870 qeinit(sc);
871 }
872