if_qe.c revision 1.40 1 1.40 augustss /* $NetBSD: if_qe.c,v 1.40 2000/03/30 12:45:37 augustss Exp $ */
2 1.1 ragge /*
3 1.37 ragge * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 1.1 ragge *
5 1.1 ragge * Redistribution and use in source and binary forms, with or without
6 1.1 ragge * modification, are permitted provided that the following conditions
7 1.1 ragge * are met:
8 1.1 ragge * 1. Redistributions of source code must retain the above copyright
9 1.1 ragge * notice, this list of conditions and the following disclaimer.
10 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 ragge * notice, this list of conditions and the following disclaimer in the
12 1.1 ragge * documentation and/or other materials provided with the distribution.
13 1.1 ragge * 3. All advertising materials mentioning features or use of this software
14 1.1 ragge * must display the following acknowledgement:
15 1.37 ragge * This product includes software developed at Ludd, University of
16 1.37 ragge * Lule}, Sweden and its contributors.
17 1.37 ragge * 4. The name of the author may not be used to endorse or promote products
18 1.37 ragge * derived from this software without specific prior written permission
19 1.37 ragge *
20 1.37 ragge * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.37 ragge * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.37 ragge * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.37 ragge * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.37 ragge * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.37 ragge * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.37 ragge * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.37 ragge * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.37 ragge * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.37 ragge * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.1 ragge */
31 1.1 ragge
32 1.1 ragge /*
33 1.37 ragge * Driver for DEQNA/DELQA ethernet cards.
34 1.37 ragge * Things that is still to do:
35 1.37 ragge * Have a timeout check for hang transmit logic.
36 1.37 ragge * Handle ubaresets. Does not work at all right now.
37 1.37 ragge * Fix ALLMULTI reception. But someone must tell me how...
38 1.37 ragge * Collect statistics.
39 1.1 ragge */
40 1.22 ragge
41 1.27 jonathan #include "opt_inet.h"
42 1.22 ragge #include "bpfilter.h"
43 1.22 ragge
44 1.9 mycroft #include <sys/param.h>
45 1.9 mycroft #include <sys/mbuf.h>
46 1.9 mycroft #include <sys/socket.h>
47 1.9 mycroft #include <sys/device.h>
48 1.37 ragge #include <sys/systm.h>
49 1.37 ragge #include <sys/sockio.h>
50 1.9 mycroft
51 1.9 mycroft #include <net/if.h>
52 1.20 is #include <net/if_ether.h>
53 1.21 ragge #include <net/if_dl.h>
54 1.1 ragge
55 1.9 mycroft #include <netinet/in.h>
56 1.20 is #include <netinet/if_inarp.h>
57 1.22 ragge
58 1.22 ragge #if NBPFILTER > 0
59 1.22 ragge #include <net/bpf.h>
60 1.22 ragge #include <net/bpfdesc.h>
61 1.22 ragge #endif
62 1.22 ragge
63 1.37 ragge #include <machine/bus.h>
64 1.1 ragge
65 1.37 ragge #include <dev/qbus/ubavar.h>
66 1.37 ragge #include <dev/qbus/if_qereg.h>
67 1.1 ragge
68 1.37 ragge #include "ioconf.h"
69 1.37 ragge
70 1.37 ragge #define RXDESCS 30 /* # of receive descriptors */
71 1.37 ragge #define TXDESCS 60 /* # transmit descs */
72 1.6 jtc
73 1.1 ragge /*
74 1.37 ragge * Structure containing the elements that must be in DMA-safe memory.
75 1.1 ragge */
76 1.37 ragge struct qe_cdata {
77 1.37 ragge struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
78 1.37 ragge struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
79 1.37 ragge u_int8_t qc_setup[128]; /* Setup packet layout */
80 1.37 ragge };
81 1.37 ragge
82 1.1 ragge struct qe_softc {
83 1.37 ragge struct device sc_dev; /* Configuration common part */
84 1.37 ragge struct ethercom sc_ec; /* Ethernet common part */
85 1.37 ragge #define sc_if sc_ec.ec_if /* network-visible interface */
86 1.37 ragge bus_space_tag_t sc_iot;
87 1.37 ragge bus_addr_t sc_ioh;
88 1.37 ragge bus_dma_tag_t sc_dmat;
89 1.37 ragge struct qe_cdata *sc_qedata; /* Descriptor struct */
90 1.37 ragge struct qe_cdata *sc_pqedata; /* Unibus address of above */
91 1.37 ragge bus_dmamap_t sc_cmap; /* Map for control structures */
92 1.37 ragge struct mbuf* sc_txmbuf[TXDESCS];
93 1.37 ragge struct mbuf* sc_rxmbuf[RXDESCS];
94 1.37 ragge bus_dmamap_t sc_xmtmap[TXDESCS];
95 1.37 ragge bus_dmamap_t sc_rcvmap[RXDESCS];
96 1.37 ragge int sc_intvec; /* Interrupt vector */
97 1.37 ragge int sc_nexttx;
98 1.37 ragge int sc_inq;
99 1.37 ragge int sc_lastack;
100 1.37 ragge int sc_nextrx;
101 1.37 ragge int sc_setup; /* Setup packet in queue */
102 1.7 ragge };
103 1.1 ragge
104 1.37 ragge static int qematch __P((struct device *, struct cfdata *, void *));
105 1.37 ragge static void qeattach __P((struct device *, struct device *, void *));
106 1.37 ragge static void qeinit __P((struct qe_softc *));
107 1.37 ragge static void qestart __P((struct ifnet *));
108 1.39 matt static void qeintr __P((void *));
109 1.37 ragge static int qeioctl __P((struct ifnet *, u_long, caddr_t));
110 1.37 ragge static int qe_add_rxbuf __P((struct qe_softc *, int));
111 1.37 ragge static void qe_setup __P((struct qe_softc *));
112 1.38 ragge static void qetimeout __P((struct ifnet *));
113 1.1 ragge
114 1.12 ragge struct cfattach qe_ca = {
115 1.12 ragge sizeof(struct qe_softc), qematch, qeattach
116 1.12 ragge };
117 1.23 thorpej
118 1.37 ragge #define QE_WCSR(csr, val) \
119 1.37 ragge bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
120 1.37 ragge #define QE_RCSR(csr) \
121 1.37 ragge bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
122 1.1 ragge
123 1.37 ragge #define LOWORD(x) ((int)(x) & 0xffff)
124 1.37 ragge #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
125 1.7 ragge
126 1.1 ragge /*
127 1.37 ragge * Check for present DEQNA. Done by sending a fake setup packet
128 1.37 ragge * and wait for interrupt.
129 1.1 ragge */
130 1.7 ragge int
131 1.24 ragge qematch(parent, cf, aux)
132 1.7 ragge struct device *parent;
133 1.24 ragge struct cfdata *cf;
134 1.24 ragge void *aux;
135 1.7 ragge {
136 1.37 ragge bus_dmamap_t cmap;
137 1.37 ragge struct qe_softc ssc;
138 1.37 ragge struct qe_softc *sc = &ssc;
139 1.7 ragge struct uba_attach_args *ua = aux;
140 1.7 ragge struct uba_softc *ubasc = (struct uba_softc *)parent;
141 1.37 ragge
142 1.37 ragge #define PROBESIZE (sizeof(struct qe_ring) * 4 + 128)
143 1.37 ragge struct qe_ring ring[15]; /* For diag purposes only */
144 1.21 ragge struct qe_ring *rp;
145 1.37 ragge int error;
146 1.1 ragge
147 1.37 ragge bzero(sc, sizeof(struct qe_softc));
148 1.37 ragge bzero(ring, PROBESIZE);
149 1.37 ragge sc->sc_iot = ua->ua_iot;
150 1.37 ragge sc->sc_ioh = ua->ua_ioh;
151 1.37 ragge sc->sc_dmat = ua->ua_dmat;
152 1.7 ragge
153 1.37 ragge ubasc->uh_lastiv -= 4;
154 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
155 1.37 ragge QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
156 1.1 ragge
157 1.1 ragge /*
158 1.37 ragge * Map the ring area. Actually this is done only to be able to
159 1.37 ragge * send and receive a internal packet; some junk is loopbacked
160 1.37 ragge * so that the DEQNA has a reason to interrupt.
161 1.1 ragge */
162 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, PROBESIZE, 1, PROBESIZE, 0,
163 1.37 ragge BUS_DMA_NOWAIT, &cmap))) {
164 1.37 ragge printf("qematch: bus_dmamap_create failed = %d\n", error);
165 1.37 ragge return 0;
166 1.37 ragge }
167 1.37 ragge if ((error = bus_dmamap_load(sc->sc_dmat, cmap, ring, PROBESIZE, 0,
168 1.37 ragge BUS_DMA_NOWAIT))) {
169 1.37 ragge printf("qematch: bus_dmamap_load failed = %d\n", error);
170 1.37 ragge bus_dmamap_destroy(sc->sc_dmat, cmap);
171 1.37 ragge return 0;
172 1.37 ragge }
173 1.1 ragge
174 1.1 ragge /*
175 1.37 ragge * Init a simple "fake" receive and transmit descriptor that
176 1.37 ragge * points to some unused area. Send a fake setup packet.
177 1.1 ragge */
178 1.37 ragge rp = (void *)cmap->dm_segs[0].ds_addr;
179 1.37 ragge ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
180 1.37 ragge ring[0].qe_addr_lo = LOWORD(&rp[4]);
181 1.37 ragge ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
182 1.37 ragge ring[0].qe_buf_len = 128;
183 1.1 ragge
184 1.37 ragge ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
185 1.37 ragge ring[2].qe_addr_lo = LOWORD(&rp[4]);
186 1.37 ragge ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
187 1.37 ragge ring[2].qe_buf_len = 128;
188 1.1 ragge
189 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
190 1.37 ragge DELAY(1000);
191 1.1 ragge
192 1.1 ragge /*
193 1.1 ragge * Start the interface and wait for the packet.
194 1.1 ragge */
195 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
196 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
197 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
198 1.37 ragge QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
199 1.37 ragge QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
200 1.1 ragge DELAY(10000);
201 1.37 ragge
202 1.1 ragge /*
203 1.1 ragge * All done with the bus resources.
204 1.1 ragge */
205 1.37 ragge bus_dmamap_unload(sc->sc_dmat, cmap);
206 1.37 ragge bus_dmamap_destroy(sc->sc_dmat, cmap);
207 1.7 ragge return 1;
208 1.1 ragge }
209 1.1 ragge
210 1.1 ragge /*
211 1.1 ragge * Interface exists: make available by filling in network interface
212 1.1 ragge * record. System will initialize the interface when it is ready
213 1.1 ragge * to accept packets.
214 1.1 ragge */
215 1.7 ragge void
216 1.7 ragge qeattach(parent, self, aux)
217 1.7 ragge struct device *parent, *self;
218 1.7 ragge void *aux;
219 1.7 ragge {
220 1.7 ragge struct uba_attach_args *ua = aux;
221 1.37 ragge struct uba_softc *ubasc = (struct uba_softc *)parent;
222 1.7 ragge struct qe_softc *sc = (struct qe_softc *)self;
223 1.37 ragge struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
224 1.37 ragge struct qe_ring *rp;
225 1.37 ragge u_int8_t enaddr[ETHER_ADDR_LEN];
226 1.37 ragge bus_dma_segment_t seg;
227 1.37 ragge int i, rseg, error;
228 1.37 ragge
229 1.37 ragge sc->sc_iot = ua->ua_iot;
230 1.37 ragge sc->sc_ioh = ua->ua_ioh;
231 1.37 ragge sc->sc_dmat = ua->ua_dmat;
232 1.37 ragge
233 1.37 ragge /*
234 1.37 ragge * Allocate DMA safe memory for descriptors and setup memory.
235 1.37 ragge */
236 1.37 ragge if ((error = bus_dmamem_alloc(sc->sc_dmat,
237 1.37 ragge sizeof(struct qe_cdata), NBPG, 0, &seg, 1, &rseg,
238 1.37 ragge BUS_DMA_NOWAIT)) != 0) {
239 1.37 ragge printf(": unable to allocate control data, error = %d\n",
240 1.37 ragge error);
241 1.37 ragge goto fail_0;
242 1.37 ragge }
243 1.37 ragge
244 1.37 ragge if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
245 1.37 ragge sizeof(struct qe_cdata), (caddr_t *)&sc->sc_qedata,
246 1.37 ragge BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
247 1.37 ragge printf(": unable to map control data, error = %d\n", error);
248 1.37 ragge goto fail_1;
249 1.37 ragge }
250 1.37 ragge
251 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat,
252 1.37 ragge sizeof(struct qe_cdata), 1,
253 1.37 ragge sizeof(struct qe_cdata), 0, BUS_DMA_NOWAIT,
254 1.37 ragge &sc->sc_cmap)) != 0) {
255 1.37 ragge printf(": unable to create control data DMA map, error = %d\n",
256 1.37 ragge error);
257 1.37 ragge goto fail_2;
258 1.37 ragge }
259 1.37 ragge
260 1.37 ragge if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
261 1.37 ragge sc->sc_qedata, sizeof(struct qe_cdata), NULL,
262 1.37 ragge BUS_DMA_NOWAIT)) != 0) {
263 1.37 ragge printf(": unable to load control data DMA map, error = %d\n",
264 1.37 ragge error);
265 1.37 ragge goto fail_3;
266 1.37 ragge }
267 1.37 ragge
268 1.37 ragge /*
269 1.37 ragge * Zero the newly allocated memory.
270 1.37 ragge */
271 1.37 ragge bzero(sc->sc_qedata, sizeof(struct qe_cdata));
272 1.37 ragge /*
273 1.37 ragge * Create the transmit descriptor DMA maps. We take advantage
274 1.37 ragge * of the fact that the Qbus address space is big, and therefore
275 1.37 ragge * allocate map registers for all transmit descriptors also,
276 1.37 ragge * so that we can avoid this each time we send a packet.
277 1.37 ragge */
278 1.37 ragge for (i = 0; i < TXDESCS; i++) {
279 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
280 1.37 ragge 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
281 1.37 ragge &sc->sc_xmtmap[i]))) {
282 1.37 ragge printf(": unable to create tx DMA map %d, error = %d\n",
283 1.37 ragge i, error);
284 1.37 ragge goto fail_4;
285 1.37 ragge }
286 1.37 ragge }
287 1.37 ragge
288 1.37 ragge /*
289 1.37 ragge * Create receive buffer DMA maps.
290 1.37 ragge */
291 1.37 ragge for (i = 0; i < RXDESCS; i++) {
292 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
293 1.37 ragge MCLBYTES, 0, BUS_DMA_NOWAIT,
294 1.37 ragge &sc->sc_rcvmap[i]))) {
295 1.37 ragge printf(": unable to create rx DMA map %d, error = %d\n",
296 1.37 ragge i, error);
297 1.37 ragge goto fail_5;
298 1.37 ragge }
299 1.37 ragge }
300 1.37 ragge /*
301 1.37 ragge * Pre-allocate the receive buffers.
302 1.37 ragge */
303 1.37 ragge for (i = 0; i < RXDESCS; i++) {
304 1.37 ragge if ((error = qe_add_rxbuf(sc, i)) != 0) {
305 1.37 ragge printf(": unable to allocate or map rx buffer %d\n,"
306 1.37 ragge " error = %d\n", i, error);
307 1.37 ragge goto fail_6;
308 1.37 ragge }
309 1.37 ragge }
310 1.1 ragge
311 1.1 ragge /*
312 1.37 ragge * Create ring loops of the buffer chains.
313 1.37 ragge * This is only done once.
314 1.1 ragge */
315 1.37 ragge sc->sc_pqedata = (struct qe_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
316 1.37 ragge
317 1.37 ragge rp = sc->sc_qedata->qc_recv;
318 1.37 ragge rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
319 1.37 ragge rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
320 1.37 ragge QE_VALID | QE_CHAIN;
321 1.37 ragge rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
322 1.37 ragge
323 1.37 ragge rp = sc->sc_qedata->qc_xmit;
324 1.37 ragge rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
325 1.37 ragge rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
326 1.37 ragge QE_VALID | QE_CHAIN;
327 1.37 ragge rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
328 1.1 ragge
329 1.1 ragge /*
330 1.37 ragge * Get the vector that were set at match time, and remember it.
331 1.1 ragge */
332 1.37 ragge sc->sc_intvec = ubasc->uh_lastiv;
333 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
334 1.37 ragge DELAY(1000);
335 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
336 1.1 ragge
337 1.1 ragge /*
338 1.37 ragge * Read out ethernet address and tell which type this card is.
339 1.1 ragge */
340 1.37 ragge for (i = 0; i < 6; i++)
341 1.37 ragge enaddr[i] = QE_RCSR(i * 2) & 0xff;
342 1.1 ragge
343 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
344 1.37 ragge printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
345 1.37 ragge QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
346 1.37 ragge ether_sprintf(enaddr));
347 1.37 ragge
348 1.37 ragge QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
349 1.37 ragge
350 1.39 matt uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr, sc);
351 1.39 matt
352 1.37 ragge strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
353 1.37 ragge ifp->if_softc = sc;
354 1.37 ragge ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
355 1.1 ragge ifp->if_start = qestart;
356 1.1 ragge ifp->if_ioctl = qeioctl;
357 1.38 ragge ifp->if_watchdog = qetimeout;
358 1.37 ragge
359 1.37 ragge /*
360 1.37 ragge * Attach the interface.
361 1.37 ragge */
362 1.1 ragge if_attach(ifp);
363 1.37 ragge ether_ifattach(ifp, enaddr);
364 1.22 ragge
365 1.22 ragge #if NBPFILTER > 0
366 1.22 ragge bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
367 1.22 ragge #endif
368 1.37 ragge return;
369 1.1 ragge
370 1.37 ragge /*
371 1.37 ragge * Free any resources we've allocated during the failed attach
372 1.37 ragge * attempt. Do this in reverse order and fall through.
373 1.37 ragge */
374 1.37 ragge fail_6:
375 1.37 ragge for (i = 0; i < RXDESCS; i++) {
376 1.37 ragge if (sc->sc_rxmbuf[i] != NULL) {
377 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
378 1.37 ragge m_freem(sc->sc_rxmbuf[i]);
379 1.37 ragge }
380 1.37 ragge }
381 1.37 ragge fail_5:
382 1.37 ragge for (i = 0; i < RXDESCS; i++) {
383 1.37 ragge if (sc->sc_xmtmap[i] != NULL)
384 1.37 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
385 1.37 ragge }
386 1.37 ragge fail_4:
387 1.37 ragge for (i = 0; i < TXDESCS; i++) {
388 1.37 ragge if (sc->sc_rcvmap[i] != NULL)
389 1.37 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
390 1.37 ragge }
391 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
392 1.37 ragge fail_3:
393 1.37 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
394 1.37 ragge fail_2:
395 1.37 ragge bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_qedata,
396 1.37 ragge sizeof(struct qe_cdata));
397 1.37 ragge fail_1:
398 1.37 ragge bus_dmamem_free(sc->sc_dmat, &seg, rseg);
399 1.37 ragge fail_0:
400 1.37 ragge return;
401 1.1 ragge }
402 1.1 ragge
403 1.1 ragge /*
404 1.1 ragge * Initialization of interface.
405 1.1 ragge */
406 1.7 ragge void
407 1.14 thorpej qeinit(sc)
408 1.14 thorpej struct qe_softc *sc;
409 1.1 ragge {
410 1.37 ragge struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
411 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
412 1.4 ragge int i;
413 1.1 ragge
414 1.1 ragge
415 1.37 ragge /*
416 1.37 ragge * Reset the interface.
417 1.37 ragge */
418 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
419 1.37 ragge DELAY(1000);
420 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
421 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
422 1.37 ragge
423 1.37 ragge sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
424 1.37 ragge /*
425 1.37 ragge * Release and init transmit descriptors.
426 1.37 ragge */
427 1.37 ragge for (i = 0; i < TXDESCS; i++) {
428 1.37 ragge if (sc->sc_txmbuf[i]) {
429 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
430 1.37 ragge m_freem(sc->sc_txmbuf[i]);
431 1.37 ragge sc->sc_txmbuf[i] = 0;
432 1.1 ragge }
433 1.37 ragge qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
434 1.37 ragge qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
435 1.1 ragge }
436 1.37 ragge
437 1.37 ragge
438 1.37 ragge /*
439 1.37 ragge * Init receive descriptors.
440 1.37 ragge */
441 1.37 ragge for (i = 0; i < RXDESCS; i++)
442 1.37 ragge qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
443 1.37 ragge sc->sc_nextrx = 0;
444 1.37 ragge
445 1.37 ragge /*
446 1.37 ragge * Write the descriptor addresses to the device.
447 1.37 ragge * Receiving packets will be enabled in the interrupt routine.
448 1.37 ragge */
449 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
450 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
451 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
452 1.37 ragge
453 1.37 ragge ifp->if_flags |= IFF_RUNNING;
454 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE;
455 1.37 ragge
456 1.1 ragge /*
457 1.37 ragge * Send a setup frame.
458 1.37 ragge * This will start the transmit machinery as well.
459 1.1 ragge */
460 1.37 ragge qe_setup(sc);
461 1.37 ragge
462 1.1 ragge }
463 1.1 ragge
464 1.1 ragge /*
465 1.1 ragge * Start output on interface.
466 1.1 ragge */
467 1.2 mycroft void
468 1.1 ragge qestart(ifp)
469 1.1 ragge struct ifnet *ifp;
470 1.1 ragge {
471 1.37 ragge struct qe_softc *sc = ifp->if_softc;
472 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
473 1.37 ragge paddr_t buffer;
474 1.37 ragge struct mbuf *m, *m0;
475 1.38 ragge int idx, len, s, i, totlen, error;
476 1.37 ragge short orword;
477 1.37 ragge
478 1.37 ragge if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
479 1.37 ragge return;
480 1.1 ragge
481 1.37 ragge s = splimp();
482 1.37 ragge while (sc->sc_inq < (TXDESCS - 1)) {
483 1.1 ragge
484 1.37 ragge if (sc->sc_setup) {
485 1.37 ragge qe_setup(sc);
486 1.37 ragge continue;
487 1.37 ragge }
488 1.37 ragge idx = sc->sc_nexttx;
489 1.37 ragge IF_DEQUEUE(&sc->sc_if.if_snd, m);
490 1.37 ragge if (m == 0)
491 1.37 ragge goto out;
492 1.37 ragge /*
493 1.37 ragge * Count number of mbufs in chain.
494 1.37 ragge * Always do DMA directly from mbufs, therefore the transmit
495 1.37 ragge * ring is really big.
496 1.37 ragge */
497 1.37 ragge for (m0 = m, i = 0; m0; m0 = m0->m_next)
498 1.38 ragge if (m0->m_len)
499 1.38 ragge i++;
500 1.37 ragge if (i >= TXDESCS)
501 1.37 ragge panic("qestart");
502 1.37 ragge
503 1.37 ragge if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
504 1.37 ragge IF_PREPEND(&sc->sc_if.if_snd, m);
505 1.38 ragge ifp->if_flags |= IFF_OACTIVE;
506 1.37 ragge goto out;
507 1.37 ragge }
508 1.37 ragge
509 1.22 ragge #if NBPFILTER > 0
510 1.37 ragge if (ifp->if_bpf)
511 1.37 ragge bpf_mtap(ifp->if_bpf, m);
512 1.22 ragge #endif
513 1.1 ragge /*
514 1.37 ragge * m now points to a mbuf chain that can be loaded.
515 1.37 ragge * Loop around and set it.
516 1.1 ragge */
517 1.38 ragge totlen = 0;
518 1.37 ragge for (m0 = m; m0; m0 = m0->m_next) {
519 1.38 ragge error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
520 1.37 ragge mtod(m0, void *), m0->m_len, 0, 0);
521 1.37 ragge buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
522 1.37 ragge len = m0->m_len;
523 1.38 ragge if (len == 0)
524 1.38 ragge continue;
525 1.37 ragge
526 1.38 ragge totlen += len;
527 1.37 ragge /* Word alignment calc */
528 1.37 ragge orword = 0;
529 1.38 ragge if (totlen == m->m_pkthdr.len) {
530 1.38 ragge if (totlen < ETHER_MIN_LEN)
531 1.38 ragge len += (ETHER_MIN_LEN - totlen);
532 1.37 ragge orword |= QE_EOMSG;
533 1.38 ragge sc->sc_txmbuf[idx] = m;
534 1.37 ragge }
535 1.37 ragge if ((buffer & 1) || (len & 1))
536 1.37 ragge len += 2;
537 1.37 ragge if (buffer & 1)
538 1.37 ragge orword |= QE_ODDBEGIN;
539 1.37 ragge if ((buffer + len) & 1)
540 1.37 ragge orword |= QE_ODDEND;
541 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -(len/2);
542 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
543 1.37 ragge qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
544 1.37 ragge qc->qc_xmit[idx].qe_flag =
545 1.37 ragge qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
546 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
547 1.37 ragge if (++idx == TXDESCS)
548 1.37 ragge idx = 0;
549 1.37 ragge sc->sc_inq++;
550 1.37 ragge }
551 1.38 ragge #ifdef DIAGNOSTIC
552 1.38 ragge if (totlen != m->m_pkthdr.len)
553 1.38 ragge panic("qestart: len fault");
554 1.38 ragge #endif
555 1.37 ragge
556 1.37 ragge /*
557 1.37 ragge * Kick off the transmit logic, if it is stopped.
558 1.37 ragge */
559 1.37 ragge if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
560 1.37 ragge QE_WCSR(QE_CSR_XMTL,
561 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
562 1.37 ragge QE_WCSR(QE_CSR_XMTH,
563 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
564 1.37 ragge }
565 1.37 ragge sc->sc_nexttx = idx;
566 1.37 ragge }
567 1.37 ragge if (sc->sc_inq == (TXDESCS - 1))
568 1.37 ragge ifp->if_flags |= IFF_OACTIVE;
569 1.38 ragge
570 1.38 ragge out: if (sc->sc_inq)
571 1.38 ragge ifp->if_timer = 5; /* If transmit logic dies */
572 1.38 ragge splx(s);
573 1.1 ragge }
574 1.1 ragge
575 1.39 matt static void
576 1.39 matt qeintr(arg)
577 1.39 matt void *arg;
578 1.1 ragge {
579 1.39 matt struct qe_softc *sc = arg;
580 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
581 1.37 ragge struct ifnet *ifp = &sc->sc_if;
582 1.37 ragge struct ether_header *eh;
583 1.37 ragge struct mbuf *m;
584 1.37 ragge int csr, status1, status2, len;
585 1.1 ragge
586 1.37 ragge csr = QE_RCSR(QE_CSR_CSR);
587 1.1 ragge
588 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
589 1.37 ragge QE_RCV_INT | QE_ILOOP);
590 1.1 ragge
591 1.37 ragge if (csr & QE_RCV_INT)
592 1.37 ragge while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
593 1.37 ragge status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
594 1.37 ragge status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
595 1.37 ragge m = sc->sc_rxmbuf[sc->sc_nextrx];
596 1.37 ragge len = ((status1 & QE_RBL_HI) |
597 1.37 ragge (status2 & QE_RBL_LO)) + 60;
598 1.37 ragge qe_add_rxbuf(sc, sc->sc_nextrx);
599 1.37 ragge m->m_pkthdr.rcvif = ifp;
600 1.37 ragge m->m_pkthdr.len = m->m_len = len;
601 1.37 ragge if (++sc->sc_nextrx == RXDESCS)
602 1.37 ragge sc->sc_nextrx = 0;
603 1.37 ragge eh = mtod(m, struct ether_header *);
604 1.37 ragge #if NBPFILTER > 0
605 1.37 ragge if (ifp->if_bpf) {
606 1.37 ragge bpf_mtap(ifp->if_bpf, m);
607 1.37 ragge if ((ifp->if_flags & IFF_PROMISC) != 0 &&
608 1.37 ragge bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
609 1.37 ragge ETHER_ADDR_LEN) != 0 &&
610 1.37 ragge ((eh->ether_dhost[0] & 1) == 0)) {
611 1.37 ragge m_freem(m);
612 1.37 ragge continue;
613 1.37 ragge }
614 1.37 ragge }
615 1.37 ragge #endif
616 1.1 ragge /*
617 1.37 ragge * ALLMULTI means PROMISC in this driver.
618 1.1 ragge */
619 1.37 ragge if ((ifp->if_flags & IFF_ALLMULTI) &&
620 1.37 ragge ((eh->ether_dhost[0] & 1) == 0) &&
621 1.37 ragge bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
622 1.37 ragge ETHER_ADDR_LEN)) {
623 1.37 ragge m_freem(m);
624 1.37 ragge continue;
625 1.1 ragge }
626 1.37 ragge (*ifp->if_input)(ifp, m);
627 1.1 ragge }
628 1.37 ragge
629 1.37 ragge if (csr & QE_XMIT_INT) {
630 1.37 ragge while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
631 1.37 ragge int idx = sc->sc_lastack;
632 1.37 ragge
633 1.37 ragge sc->sc_inq--;
634 1.37 ragge if (++sc->sc_lastack == TXDESCS)
635 1.37 ragge sc->sc_lastack = 0;
636 1.37 ragge
637 1.37 ragge /* XXX collect statistics */
638 1.37 ragge qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
639 1.37 ragge qc->qc_xmit[idx].qe_status1 =
640 1.37 ragge qc->qc_xmit[idx].qe_flag = QE_NOTYET;
641 1.37 ragge
642 1.37 ragge if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
643 1.37 ragge continue;
644 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
645 1.37 ragge if (sc->sc_txmbuf[idx]) {
646 1.37 ragge m_freem(sc->sc_txmbuf[idx]);
647 1.37 ragge sc->sc_txmbuf[idx] = 0;
648 1.37 ragge }
649 1.37 ragge }
650 1.38 ragge ifp->if_timer = 0;
651 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE;
652 1.37 ragge qestart(ifp); /* Put in more in queue */
653 1.1 ragge }
654 1.37 ragge /*
655 1.37 ragge * How can the receive list get invalid???
656 1.37 ragge * Verified that it happens anyway.
657 1.1 ragge */
658 1.37 ragge if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
659 1.37 ragge (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
660 1.37 ragge QE_WCSR(QE_CSR_RCLL,
661 1.37 ragge LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
662 1.37 ragge QE_WCSR(QE_CSR_RCLH,
663 1.37 ragge HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
664 1.1 ragge }
665 1.1 ragge }
666 1.1 ragge
667 1.1 ragge /*
668 1.1 ragge * Process an ioctl request.
669 1.1 ragge */
670 1.7 ragge int
671 1.1 ragge qeioctl(ifp, cmd, data)
672 1.40 augustss struct ifnet *ifp;
673 1.7 ragge u_long cmd;
674 1.1 ragge caddr_t data;
675 1.1 ragge {
676 1.14 thorpej struct qe_softc *sc = ifp->if_softc;
677 1.37 ragge struct ifreq *ifr = (struct ifreq *)data;
678 1.1 ragge struct ifaddr *ifa = (struct ifaddr *)data;
679 1.8 mycroft int s = splnet(), error = 0;
680 1.1 ragge
681 1.1 ragge switch (cmd) {
682 1.1 ragge
683 1.1 ragge case SIOCSIFADDR:
684 1.1 ragge ifp->if_flags |= IFF_UP;
685 1.1 ragge switch(ifa->ifa_addr->sa_family) {
686 1.1 ragge #ifdef INET
687 1.1 ragge case AF_INET:
688 1.37 ragge qeinit(sc);
689 1.20 is arp_ifinit(ifp, ifa);
690 1.1 ragge break;
691 1.1 ragge #endif
692 1.1 ragge }
693 1.1 ragge break;
694 1.1 ragge
695 1.1 ragge case SIOCSIFFLAGS:
696 1.1 ragge if ((ifp->if_flags & IFF_UP) == 0 &&
697 1.37 ragge (ifp->if_flags & IFF_RUNNING) != 0) {
698 1.37 ragge /*
699 1.37 ragge * If interface is marked down and it is running,
700 1.37 ragge * stop it. (by disabling receive mechanism).
701 1.37 ragge */
702 1.37 ragge QE_WCSR(QE_CSR_CSR,
703 1.37 ragge QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
704 1.37 ragge ifp->if_flags &= ~IFF_RUNNING;
705 1.37 ragge } else if ((ifp->if_flags & IFF_UP) != 0 &&
706 1.37 ragge (ifp->if_flags & IFF_RUNNING) == 0) {
707 1.37 ragge /*
708 1.37 ragge * If interface it marked up and it is stopped, then
709 1.37 ragge * start it.
710 1.37 ragge */
711 1.19 ragge qeinit(sc);
712 1.37 ragge } else if ((ifp->if_flags & IFF_UP) != 0) {
713 1.37 ragge /*
714 1.37 ragge * Send a new setup packet to match any new changes.
715 1.37 ragge * (Like IFF_PROMISC etc)
716 1.37 ragge */
717 1.37 ragge qe_setup(sc);
718 1.37 ragge }
719 1.1 ragge break;
720 1.1 ragge
721 1.22 ragge case SIOCADDMULTI:
722 1.22 ragge case SIOCDELMULTI:
723 1.22 ragge /*
724 1.22 ragge * Update our multicast list.
725 1.22 ragge */
726 1.22 ragge error = (cmd == SIOCADDMULTI) ?
727 1.37 ragge ether_addmulti(ifr, &sc->sc_ec):
728 1.37 ragge ether_delmulti(ifr, &sc->sc_ec);
729 1.22 ragge
730 1.22 ragge if (error == ENETRESET) {
731 1.22 ragge /*
732 1.22 ragge * Multicast list has changed; set the hardware filter
733 1.22 ragge * accordingly.
734 1.22 ragge */
735 1.37 ragge qe_setup(sc);
736 1.22 ragge error = 0;
737 1.22 ragge }
738 1.22 ragge break;
739 1.22 ragge
740 1.1 ragge default:
741 1.1 ragge error = EINVAL;
742 1.1 ragge
743 1.1 ragge }
744 1.1 ragge splx(s);
745 1.1 ragge return (error);
746 1.1 ragge }
747 1.1 ragge
748 1.1 ragge /*
749 1.37 ragge * Add a receive buffer to the indicated descriptor.
750 1.1 ragge */
751 1.37 ragge int
752 1.37 ragge qe_add_rxbuf(sc, i)
753 1.14 thorpej struct qe_softc *sc;
754 1.37 ragge int i;
755 1.1 ragge {
756 1.37 ragge struct mbuf *m;
757 1.37 ragge struct qe_ring *rp;
758 1.37 ragge vaddr_t addr;
759 1.37 ragge int error;
760 1.37 ragge
761 1.37 ragge MGETHDR(m, M_DONTWAIT, MT_DATA);
762 1.37 ragge if (m == NULL)
763 1.37 ragge return (ENOBUFS);
764 1.37 ragge
765 1.37 ragge MCLGET(m, M_DONTWAIT);
766 1.37 ragge if ((m->m_flags & M_EXT) == 0) {
767 1.37 ragge m_freem(m);
768 1.37 ragge return (ENOBUFS);
769 1.37 ragge }
770 1.37 ragge
771 1.37 ragge if (sc->sc_rxmbuf[i] != NULL)
772 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
773 1.1 ragge
774 1.37 ragge error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
775 1.37 ragge m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
776 1.37 ragge if (error)
777 1.37 ragge panic("%s: can't load rx DMA map %d, error = %d\n",
778 1.37 ragge sc->sc_dev.dv_xname, i, error);
779 1.37 ragge sc->sc_rxmbuf[i] = m;
780 1.1 ragge
781 1.37 ragge bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
782 1.37 ragge sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
783 1.1 ragge
784 1.1 ragge /*
785 1.37 ragge * We know that the mbuf cluster is page aligned. Also, be sure
786 1.37 ragge * that the IP header will be longword aligned.
787 1.1 ragge */
788 1.37 ragge m->m_data += 2;
789 1.37 ragge addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
790 1.37 ragge rp = &sc->sc_qedata->qc_recv[i];
791 1.37 ragge rp->qe_flag = rp->qe_status1 = QE_NOTYET;
792 1.37 ragge rp->qe_addr_lo = LOWORD(addr);
793 1.37 ragge rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
794 1.37 ragge rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
795 1.1 ragge
796 1.37 ragge return (0);
797 1.1 ragge }
798 1.37 ragge
799 1.1 ragge /*
800 1.37 ragge * Create a setup packet and put in queue for sending.
801 1.1 ragge */
802 1.7 ragge void
803 1.37 ragge qe_setup(sc)
804 1.7 ragge struct qe_softc *sc;
805 1.1 ragge {
806 1.37 ragge struct ether_multi *enm;
807 1.37 ragge struct ether_multistep step;
808 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
809 1.37 ragge struct ifnet *ifp = &sc->sc_if;
810 1.37 ragge u_int8_t *enaddr = LLADDR(ifp->if_sadl);
811 1.37 ragge int i, j, k, idx, s;
812 1.37 ragge
813 1.37 ragge s = splimp();
814 1.37 ragge if (sc->sc_inq == (TXDESCS - 1)) {
815 1.37 ragge sc->sc_setup = 1;
816 1.37 ragge splx(s);
817 1.37 ragge return;
818 1.37 ragge }
819 1.37 ragge sc->sc_setup = 0;
820 1.1 ragge /*
821 1.37 ragge * Init the setup packet with valid info.
822 1.1 ragge */
823 1.37 ragge memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
824 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++)
825 1.37 ragge qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
826 1.37 ragge
827 1.1 ragge /*
828 1.37 ragge * Multicast handling. The DEQNA can handle up to 12 direct
829 1.37 ragge * ethernet addresses.
830 1.1 ragge */
831 1.37 ragge j = 3; k = 0;
832 1.37 ragge ifp->if_flags &= ~IFF_ALLMULTI;
833 1.37 ragge ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
834 1.37 ragge while (enm != NULL) {
835 1.37 ragge if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
836 1.37 ragge ifp->if_flags |= IFF_ALLMULTI;
837 1.37 ragge break;
838 1.37 ragge }
839 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++)
840 1.37 ragge qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
841 1.37 ragge j++;
842 1.37 ragge if (j == 8) {
843 1.37 ragge j = 1; k += 64;
844 1.37 ragge }
845 1.37 ragge if (k > 64) {
846 1.37 ragge ifp->if_flags |= IFF_ALLMULTI;
847 1.37 ragge break;
848 1.22 ragge }
849 1.37 ragge ETHER_NEXT_MULTI(step, enm);
850 1.22 ragge }
851 1.37 ragge idx = sc->sc_nexttx;
852 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -64;
853 1.1 ragge
854 1.1 ragge /*
855 1.37 ragge * How is the DEQNA turned in ALLMULTI mode???
856 1.37 ragge * Until someone tells me, fall back to PROMISC when more than
857 1.37 ragge * 12 ethernet addresses.
858 1.1 ragge */
859 1.37 ragge if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
860 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -65;
861 1.1 ragge
862 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
863 1.37 ragge qc->qc_xmit[idx].qe_addr_hi =
864 1.37 ragge HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
865 1.37 ragge qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
866 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
867 1.1 ragge
868 1.37 ragge if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
869 1.37 ragge QE_WCSR(QE_CSR_XMTL,
870 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
871 1.37 ragge QE_WCSR(QE_CSR_XMTH,
872 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
873 1.22 ragge }
874 1.1 ragge
875 1.37 ragge sc->sc_inq++;
876 1.37 ragge if (++sc->sc_nexttx == TXDESCS)
877 1.37 ragge sc->sc_nexttx = 0;
878 1.37 ragge splx(s);
879 1.38 ragge }
880 1.38 ragge
881 1.38 ragge /*
882 1.38 ragge * Check for dead transmit logic. Not uncommon.
883 1.38 ragge */
884 1.38 ragge void
885 1.38 ragge qetimeout(ifp)
886 1.38 ragge struct ifnet *ifp;
887 1.38 ragge {
888 1.38 ragge struct qe_softc *sc = ifp->if_softc;
889 1.38 ragge
890 1.38 ragge if (sc->sc_inq == 0)
891 1.38 ragge return;
892 1.38 ragge
893 1.38 ragge printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
894 1.38 ragge /*
895 1.38 ragge * Do a reset of interface, to get it going again.
896 1.38 ragge * Will it work by just restart the transmit logic?
897 1.38 ragge */
898 1.38 ragge qeinit(sc);
899 1.1 ragge }
900