if_qe.c revision 1.71 1 1.71 joerg /* $NetBSD: if_qe.c,v 1.71 2010/04/05 07:21:47 joerg Exp $ */
2 1.1 ragge /*
3 1.37 ragge * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 1.1 ragge *
5 1.1 ragge * Redistribution and use in source and binary forms, with or without
6 1.1 ragge * modification, are permitted provided that the following conditions
7 1.1 ragge * are met:
8 1.1 ragge * 1. Redistributions of source code must retain the above copyright
9 1.1 ragge * notice, this list of conditions and the following disclaimer.
10 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 ragge * notice, this list of conditions and the following disclaimer in the
12 1.1 ragge * documentation and/or other materials provided with the distribution.
13 1.1 ragge * 3. All advertising materials mentioning features or use of this software
14 1.1 ragge * must display the following acknowledgement:
15 1.59 simonb * This product includes software developed at Ludd, University of
16 1.37 ragge * Lule}, Sweden and its contributors.
17 1.37 ragge * 4. The name of the author may not be used to endorse or promote products
18 1.37 ragge * derived from this software without specific prior written permission
19 1.37 ragge *
20 1.37 ragge * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.37 ragge * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.37 ragge * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.37 ragge * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.37 ragge * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.37 ragge * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.37 ragge * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.37 ragge * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.37 ragge * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.37 ragge * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.1 ragge */
31 1.1 ragge
32 1.1 ragge /*
33 1.37 ragge * Driver for DEQNA/DELQA ethernet cards.
34 1.37 ragge * Things that is still to do:
35 1.37 ragge * Handle ubaresets. Does not work at all right now.
36 1.37 ragge * Fix ALLMULTI reception. But someone must tell me how...
37 1.37 ragge * Collect statistics.
38 1.1 ragge */
39 1.49 lukem
40 1.49 lukem #include <sys/cdefs.h>
41 1.71 joerg __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.71 2010/04/05 07:21:47 joerg Exp $");
42 1.22 ragge
43 1.27 jonathan #include "opt_inet.h"
44 1.22 ragge
45 1.9 mycroft #include <sys/param.h>
46 1.9 mycroft #include <sys/mbuf.h>
47 1.9 mycroft #include <sys/socket.h>
48 1.9 mycroft #include <sys/device.h>
49 1.37 ragge #include <sys/systm.h>
50 1.37 ragge #include <sys/sockio.h>
51 1.9 mycroft
52 1.9 mycroft #include <net/if.h>
53 1.20 is #include <net/if_ether.h>
54 1.21 ragge #include <net/if_dl.h>
55 1.1 ragge
56 1.9 mycroft #include <netinet/in.h>
57 1.20 is #include <netinet/if_inarp.h>
58 1.22 ragge
59 1.22 ragge #include <net/bpf.h>
60 1.22 ragge #include <net/bpfdesc.h>
61 1.22 ragge
62 1.65 ad #include <sys/bus.h>
63 1.1 ragge
64 1.37 ragge #include <dev/qbus/ubavar.h>
65 1.37 ragge #include <dev/qbus/if_qereg.h>
66 1.1 ragge
67 1.37 ragge #include "ioconf.h"
68 1.37 ragge
69 1.37 ragge #define RXDESCS 30 /* # of receive descriptors */
70 1.37 ragge #define TXDESCS 60 /* # transmit descs */
71 1.6 jtc
72 1.1 ragge /*
73 1.37 ragge * Structure containing the elements that must be in DMA-safe memory.
74 1.1 ragge */
75 1.37 ragge struct qe_cdata {
76 1.37 ragge struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
77 1.37 ragge struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
78 1.37 ragge u_int8_t qc_setup[128]; /* Setup packet layout */
79 1.37 ragge };
80 1.37 ragge
81 1.1 ragge struct qe_softc {
82 1.67 matt device_t sc_dev; /* Configuration common part */
83 1.67 matt struct uba_softc *sc_uh; /* our parent */
84 1.41 matt struct evcnt sc_intrcnt; /* Interrupt counting */
85 1.37 ragge struct ethercom sc_ec; /* Ethernet common part */
86 1.37 ragge #define sc_if sc_ec.ec_if /* network-visible interface */
87 1.37 ragge bus_space_tag_t sc_iot;
88 1.37 ragge bus_addr_t sc_ioh;
89 1.37 ragge bus_dma_tag_t sc_dmat;
90 1.37 ragge struct qe_cdata *sc_qedata; /* Descriptor struct */
91 1.37 ragge struct qe_cdata *sc_pqedata; /* Unibus address of above */
92 1.37 ragge struct mbuf* sc_txmbuf[TXDESCS];
93 1.37 ragge struct mbuf* sc_rxmbuf[RXDESCS];
94 1.37 ragge bus_dmamap_t sc_xmtmap[TXDESCS];
95 1.37 ragge bus_dmamap_t sc_rcvmap[RXDESCS];
96 1.57 bouyer bus_dmamap_t sc_nulldmamap; /* ethernet padding buffer */
97 1.48 ragge struct ubinfo sc_ui;
98 1.37 ragge int sc_intvec; /* Interrupt vector */
99 1.37 ragge int sc_nexttx;
100 1.37 ragge int sc_inq;
101 1.37 ragge int sc_lastack;
102 1.37 ragge int sc_nextrx;
103 1.37 ragge int sc_setup; /* Setup packet in queue */
104 1.7 ragge };
105 1.1 ragge
106 1.67 matt static int qematch(device_t, cfdata_t, void *);
107 1.67 matt static void qeattach(device_t, device_t, void *);
108 1.46 ragge static void qeinit(struct qe_softc *);
109 1.46 ragge static void qestart(struct ifnet *);
110 1.46 ragge static void qeintr(void *);
111 1.62 christos static int qeioctl(struct ifnet *, u_long, void *);
112 1.46 ragge static int qe_add_rxbuf(struct qe_softc *, int);
113 1.46 ragge static void qe_setup(struct qe_softc *);
114 1.46 ragge static void qetimeout(struct ifnet *);
115 1.1 ragge
116 1.67 matt CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
117 1.55 thorpej qematch, qeattach, NULL, NULL);
118 1.23 thorpej
119 1.37 ragge #define QE_WCSR(csr, val) \
120 1.37 ragge bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
121 1.37 ragge #define QE_RCSR(csr) \
122 1.37 ragge bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
123 1.1 ragge
124 1.37 ragge #define LOWORD(x) ((int)(x) & 0xffff)
125 1.37 ragge #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
126 1.7 ragge
127 1.57 bouyer #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
128 1.57 bouyer
129 1.1 ragge /*
130 1.37 ragge * Check for present DEQNA. Done by sending a fake setup packet
131 1.37 ragge * and wait for interrupt.
132 1.1 ragge */
133 1.7 ragge int
134 1.67 matt qematch(device_t parent, cfdata_t cf, void *aux)
135 1.7 ragge {
136 1.37 ragge struct qe_softc ssc;
137 1.37 ragge struct qe_softc *sc = &ssc;
138 1.7 ragge struct uba_attach_args *ua = aux;
139 1.67 matt struct uba_softc *uh = device_private(parent);
140 1.48 ragge struct ubinfo ui;
141 1.37 ragge
142 1.51 ragge #define PROBESIZE 4096
143 1.51 ragge struct qe_ring *ring;
144 1.21 ragge struct qe_ring *rp;
145 1.37 ragge int error;
146 1.1 ragge
147 1.67 matt ring = malloc(PROBESIZE, M_TEMP, M_WAITOK|M_ZERO);
148 1.69 cegger memset(sc, 0, sizeof(*sc));
149 1.37 ragge sc->sc_iot = ua->ua_iot;
150 1.37 ragge sc->sc_ioh = ua->ua_ioh;
151 1.37 ragge sc->sc_dmat = ua->ua_dmat;
152 1.7 ragge
153 1.67 matt uh->uh_lastiv -= 4;
154 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
155 1.67 matt QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
156 1.1 ragge
157 1.1 ragge /*
158 1.59 simonb * Map the ring area. Actually this is done only to be able to
159 1.37 ragge * send and receive a internal packet; some junk is loopbacked
160 1.37 ragge * so that the DEQNA has a reason to interrupt.
161 1.1 ragge */
162 1.48 ragge ui.ui_size = PROBESIZE;
163 1.62 christos ui.ui_vaddr = (void *)&ring[0];
164 1.67 matt if ((error = uballoc(uh, &ui, UBA_CANTWAIT)))
165 1.37 ragge return 0;
166 1.1 ragge
167 1.1 ragge /*
168 1.37 ragge * Init a simple "fake" receive and transmit descriptor that
169 1.37 ragge * points to some unused area. Send a fake setup packet.
170 1.1 ragge */
171 1.48 ragge rp = (void *)ui.ui_baddr;
172 1.37 ragge ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
173 1.37 ragge ring[0].qe_addr_lo = LOWORD(&rp[4]);
174 1.37 ragge ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
175 1.51 ragge ring[0].qe_buf_len = -64;
176 1.1 ragge
177 1.37 ragge ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
178 1.37 ragge ring[2].qe_addr_lo = LOWORD(&rp[4]);
179 1.37 ragge ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
180 1.51 ragge ring[2].qe_buf_len = -(1500/2);
181 1.1 ragge
182 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
183 1.37 ragge DELAY(1000);
184 1.1 ragge
185 1.1 ragge /*
186 1.1 ragge * Start the interface and wait for the packet.
187 1.1 ragge */
188 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
189 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
190 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
191 1.37 ragge QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
192 1.37 ragge QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
193 1.1 ragge DELAY(10000);
194 1.37 ragge
195 1.1 ragge /*
196 1.1 ragge * All done with the bus resources.
197 1.1 ragge */
198 1.67 matt ubfree(uh, &ui);
199 1.51 ragge free(ring, M_TEMP);
200 1.7 ragge return 1;
201 1.1 ragge }
202 1.1 ragge
203 1.1 ragge /*
204 1.1 ragge * Interface exists: make available by filling in network interface
205 1.1 ragge * record. System will initialize the interface when it is ready
206 1.1 ragge * to accept packets.
207 1.1 ragge */
208 1.7 ragge void
209 1.67 matt qeattach(device_t parent, device_t self, void *aux)
210 1.7 ragge {
211 1.67 matt struct uba_attach_args *ua = aux;
212 1.67 matt struct qe_softc *sc = device_private(self);
213 1.67 matt struct ifnet *ifp = &sc->sc_if;
214 1.67 matt struct qe_ring *rp;
215 1.37 ragge u_int8_t enaddr[ETHER_ADDR_LEN];
216 1.48 ragge int i, error;
217 1.57 bouyer char *nullbuf;
218 1.37 ragge
219 1.67 matt sc->sc_dev = self;
220 1.67 matt sc->sc_uh = device_private(parent);
221 1.37 ragge sc->sc_iot = ua->ua_iot;
222 1.37 ragge sc->sc_ioh = ua->ua_ioh;
223 1.37 ragge sc->sc_dmat = ua->ua_dmat;
224 1.37 ragge
225 1.59 simonb /*
226 1.59 simonb * Allocate DMA safe memory for descriptors and setup memory.
227 1.59 simonb */
228 1.37 ragge
229 1.57 bouyer sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
230 1.67 matt if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
231 1.67 matt aprint_error(": unable to ubmemalloc(), error = %d\n", error);
232 1.48 ragge return;
233 1.37 ragge }
234 1.48 ragge sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
235 1.48 ragge sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
236 1.37 ragge
237 1.37 ragge /*
238 1.37 ragge * Zero the newly allocated memory.
239 1.37 ragge */
240 1.69 cegger memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
241 1.57 bouyer nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
242 1.37 ragge /*
243 1.37 ragge * Create the transmit descriptor DMA maps. We take advantage
244 1.59 simonb * of the fact that the Qbus address space is big, and therefore
245 1.37 ragge * allocate map registers for all transmit descriptors also,
246 1.37 ragge * so that we can avoid this each time we send a packet.
247 1.37 ragge */
248 1.37 ragge for (i = 0; i < TXDESCS; i++) {
249 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
250 1.37 ragge 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
251 1.37 ragge &sc->sc_xmtmap[i]))) {
252 1.67 matt aprint_error(
253 1.67 matt ": unable to create tx DMA map %d, error = %d\n",
254 1.37 ragge i, error);
255 1.37 ragge goto fail_4;
256 1.37 ragge }
257 1.37 ragge }
258 1.37 ragge
259 1.37 ragge /*
260 1.37 ragge * Create receive buffer DMA maps.
261 1.37 ragge */
262 1.37 ragge for (i = 0; i < RXDESCS; i++) {
263 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
264 1.37 ragge MCLBYTES, 0, BUS_DMA_NOWAIT,
265 1.37 ragge &sc->sc_rcvmap[i]))) {
266 1.67 matt aprint_error(
267 1.67 matt ": unable to create rx DMA map %d, error = %d\n",
268 1.37 ragge i, error);
269 1.37 ragge goto fail_5;
270 1.37 ragge }
271 1.37 ragge }
272 1.37 ragge /*
273 1.37 ragge * Pre-allocate the receive buffers.
274 1.37 ragge */
275 1.37 ragge for (i = 0; i < RXDESCS; i++) {
276 1.37 ragge if ((error = qe_add_rxbuf(sc, i)) != 0) {
277 1.67 matt aprint_error(
278 1.67 matt ": unable to allocate or map rx buffer %d,"
279 1.37 ragge " error = %d\n", i, error);
280 1.37 ragge goto fail_6;
281 1.37 ragge }
282 1.37 ragge }
283 1.1 ragge
284 1.57 bouyer if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
285 1.57 bouyer ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
286 1.67 matt aprint_error(
287 1.67 matt ": unable to create pad buffer DMA map, error = %d\n",
288 1.67 matt error);
289 1.57 bouyer goto fail_6;
290 1.57 bouyer }
291 1.57 bouyer if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
292 1.57 bouyer nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
293 1.67 matt aprint_error(
294 1.67 matt ": unable to load pad buffer DMA map, error = %d\n",
295 1.67 matt error);
296 1.57 bouyer goto fail_7;
297 1.57 bouyer }
298 1.57 bouyer bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
299 1.57 bouyer BUS_DMASYNC_PREWRITE);
300 1.57 bouyer
301 1.1 ragge /*
302 1.37 ragge * Create ring loops of the buffer chains.
303 1.37 ragge * This is only done once.
304 1.1 ragge */
305 1.37 ragge
306 1.37 ragge rp = sc->sc_qedata->qc_recv;
307 1.37 ragge rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
308 1.37 ragge rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
309 1.37 ragge QE_VALID | QE_CHAIN;
310 1.37 ragge rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
311 1.37 ragge
312 1.37 ragge rp = sc->sc_qedata->qc_xmit;
313 1.37 ragge rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
314 1.37 ragge rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
315 1.37 ragge QE_VALID | QE_CHAIN;
316 1.37 ragge rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
317 1.1 ragge
318 1.1 ragge /*
319 1.37 ragge * Get the vector that were set at match time, and remember it.
320 1.1 ragge */
321 1.67 matt sc->sc_intvec = sc->sc_uh->uh_lastiv;
322 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
323 1.37 ragge DELAY(1000);
324 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
325 1.1 ragge
326 1.1 ragge /*
327 1.37 ragge * Read out ethernet address and tell which type this card is.
328 1.1 ragge */
329 1.37 ragge for (i = 0; i < 6; i++)
330 1.37 ragge enaddr[i] = QE_RCSR(i * 2) & 0xff;
331 1.1 ragge
332 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
333 1.67 matt aprint_normal(": %s, hardware address %s\n",
334 1.37 ragge QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
335 1.37 ragge ether_sprintf(enaddr));
336 1.37 ragge
337 1.37 ragge QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
338 1.37 ragge
339 1.41 matt uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
340 1.41 matt sc, &sc->sc_intrcnt);
341 1.42 matt evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
342 1.67 matt device_xname(sc->sc_dev), "intr");
343 1.39 matt
344 1.67 matt strcpy(ifp->if_xname, device_xname(sc->sc_dev));
345 1.37 ragge ifp->if_softc = sc;
346 1.37 ragge ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
347 1.1 ragge ifp->if_start = qestart;
348 1.1 ragge ifp->if_ioctl = qeioctl;
349 1.38 ragge ifp->if_watchdog = qetimeout;
350 1.45 thorpej IFQ_SET_READY(&ifp->if_snd);
351 1.37 ragge
352 1.37 ragge /*
353 1.37 ragge * Attach the interface.
354 1.37 ragge */
355 1.1 ragge if_attach(ifp);
356 1.37 ragge ether_ifattach(ifp, enaddr);
357 1.22 ragge
358 1.37 ragge return;
359 1.1 ragge
360 1.37 ragge /*
361 1.37 ragge * Free any resources we've allocated during the failed attach
362 1.37 ragge * attempt. Do this in reverse order and fall through.
363 1.37 ragge */
364 1.57 bouyer fail_7:
365 1.57 bouyer bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
366 1.37 ragge fail_6:
367 1.37 ragge for (i = 0; i < RXDESCS; i++) {
368 1.37 ragge if (sc->sc_rxmbuf[i] != NULL) {
369 1.57 bouyer bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
370 1.37 ragge m_freem(sc->sc_rxmbuf[i]);
371 1.37 ragge }
372 1.37 ragge }
373 1.37 ragge fail_5:
374 1.37 ragge for (i = 0; i < RXDESCS; i++) {
375 1.37 ragge if (sc->sc_xmtmap[i] != NULL)
376 1.37 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
377 1.37 ragge }
378 1.37 ragge fail_4:
379 1.37 ragge for (i = 0; i < TXDESCS; i++) {
380 1.37 ragge if (sc->sc_rcvmap[i] != NULL)
381 1.37 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
382 1.37 ragge }
383 1.1 ragge }
384 1.1 ragge
385 1.1 ragge /*
386 1.1 ragge * Initialization of interface.
387 1.1 ragge */
388 1.7 ragge void
389 1.46 ragge qeinit(struct qe_softc *sc)
390 1.1 ragge {
391 1.37 ragge struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
392 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
393 1.4 ragge int i;
394 1.1 ragge
395 1.1 ragge
396 1.37 ragge /*
397 1.37 ragge * Reset the interface.
398 1.37 ragge */
399 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
400 1.37 ragge DELAY(1000);
401 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
402 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
403 1.37 ragge
404 1.37 ragge sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
405 1.37 ragge /*
406 1.37 ragge * Release and init transmit descriptors.
407 1.37 ragge */
408 1.37 ragge for (i = 0; i < TXDESCS; i++) {
409 1.37 ragge if (sc->sc_txmbuf[i]) {
410 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
411 1.37 ragge m_freem(sc->sc_txmbuf[i]);
412 1.37 ragge sc->sc_txmbuf[i] = 0;
413 1.1 ragge }
414 1.37 ragge qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
415 1.37 ragge qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
416 1.1 ragge }
417 1.37 ragge
418 1.37 ragge
419 1.37 ragge /*
420 1.37 ragge * Init receive descriptors.
421 1.37 ragge */
422 1.37 ragge for (i = 0; i < RXDESCS; i++)
423 1.37 ragge qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
424 1.37 ragge sc->sc_nextrx = 0;
425 1.37 ragge
426 1.37 ragge /*
427 1.37 ragge * Write the descriptor addresses to the device.
428 1.37 ragge * Receiving packets will be enabled in the interrupt routine.
429 1.37 ragge */
430 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
431 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
432 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
433 1.37 ragge
434 1.37 ragge ifp->if_flags |= IFF_RUNNING;
435 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE;
436 1.37 ragge
437 1.1 ragge /*
438 1.37 ragge * Send a setup frame.
439 1.37 ragge * This will start the transmit machinery as well.
440 1.1 ragge */
441 1.37 ragge qe_setup(sc);
442 1.37 ragge
443 1.1 ragge }
444 1.1 ragge
445 1.1 ragge /*
446 1.1 ragge * Start output on interface.
447 1.1 ragge */
448 1.2 mycroft void
449 1.46 ragge qestart(struct ifnet *ifp)
450 1.1 ragge {
451 1.37 ragge struct qe_softc *sc = ifp->if_softc;
452 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
453 1.37 ragge paddr_t buffer;
454 1.37 ragge struct mbuf *m, *m0;
455 1.57 bouyer int idx, len, s, i, totlen, buflen, error;
456 1.46 ragge short orword, csr;
457 1.37 ragge
458 1.37 ragge if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
459 1.37 ragge return;
460 1.1 ragge
461 1.47 thorpej s = splnet();
462 1.37 ragge while (sc->sc_inq < (TXDESCS - 1)) {
463 1.1 ragge
464 1.37 ragge if (sc->sc_setup) {
465 1.37 ragge qe_setup(sc);
466 1.37 ragge continue;
467 1.37 ragge }
468 1.37 ragge idx = sc->sc_nexttx;
469 1.45 thorpej IFQ_POLL(&ifp->if_snd, m);
470 1.37 ragge if (m == 0)
471 1.37 ragge goto out;
472 1.37 ragge /*
473 1.37 ragge * Count number of mbufs in chain.
474 1.37 ragge * Always do DMA directly from mbufs, therefore the transmit
475 1.37 ragge * ring is really big.
476 1.37 ragge */
477 1.37 ragge for (m0 = m, i = 0; m0; m0 = m0->m_next)
478 1.38 ragge if (m0->m_len)
479 1.38 ragge i++;
480 1.57 bouyer if (m->m_pkthdr.len < ETHER_PAD_LEN) {
481 1.57 bouyer buflen = ETHER_PAD_LEN;
482 1.57 bouyer i++;
483 1.57 bouyer } else
484 1.57 bouyer buflen = m->m_pkthdr.len;
485 1.37 ragge if (i >= TXDESCS)
486 1.37 ragge panic("qestart");
487 1.37 ragge
488 1.37 ragge if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
489 1.38 ragge ifp->if_flags |= IFF_OACTIVE;
490 1.37 ragge goto out;
491 1.37 ragge }
492 1.45 thorpej
493 1.45 thorpej IFQ_DEQUEUE(&ifp->if_snd, m);
494 1.45 thorpej
495 1.71 joerg bpf_mtap(ifp, m);
496 1.1 ragge /*
497 1.37 ragge * m now points to a mbuf chain that can be loaded.
498 1.37 ragge * Loop around and set it.
499 1.1 ragge */
500 1.38 ragge totlen = 0;
501 1.57 bouyer for (m0 = m; ; m0 = m0->m_next) {
502 1.57 bouyer if (m0) {
503 1.57 bouyer if (m0->m_len == 0)
504 1.57 bouyer continue;
505 1.57 bouyer error = bus_dmamap_load(sc->sc_dmat,
506 1.57 bouyer sc->sc_xmtmap[idx], mtod(m0, void *),
507 1.57 bouyer m0->m_len, 0, 0);
508 1.57 bouyer buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
509 1.57 bouyer len = m0->m_len;
510 1.57 bouyer } else if (totlen < ETHER_PAD_LEN) {
511 1.57 bouyer buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
512 1.57 bouyer len = ETHER_PAD_LEN - totlen;
513 1.57 bouyer } else {
514 1.57 bouyer break;
515 1.57 bouyer }
516 1.37 ragge
517 1.38 ragge totlen += len;
518 1.37 ragge /* Word alignment calc */
519 1.37 ragge orword = 0;
520 1.57 bouyer if (totlen == buflen) {
521 1.37 ragge orword |= QE_EOMSG;
522 1.38 ragge sc->sc_txmbuf[idx] = m;
523 1.37 ragge }
524 1.37 ragge if ((buffer & 1) || (len & 1))
525 1.37 ragge len += 2;
526 1.37 ragge if (buffer & 1)
527 1.37 ragge orword |= QE_ODDBEGIN;
528 1.37 ragge if ((buffer + len) & 1)
529 1.37 ragge orword |= QE_ODDEND;
530 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -(len/2);
531 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
532 1.37 ragge qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
533 1.37 ragge qc->qc_xmit[idx].qe_flag =
534 1.37 ragge qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
535 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
536 1.37 ragge if (++idx == TXDESCS)
537 1.37 ragge idx = 0;
538 1.37 ragge sc->sc_inq++;
539 1.57 bouyer if (m0 == NULL)
540 1.57 bouyer break;
541 1.37 ragge }
542 1.38 ragge #ifdef DIAGNOSTIC
543 1.57 bouyer if (totlen != buflen)
544 1.38 ragge panic("qestart: len fault");
545 1.38 ragge #endif
546 1.37 ragge
547 1.37 ragge /*
548 1.37 ragge * Kick off the transmit logic, if it is stopped.
549 1.37 ragge */
550 1.46 ragge csr = QE_RCSR(QE_CSR_CSR);
551 1.46 ragge if (csr & QE_XL_INVALID) {
552 1.37 ragge QE_WCSR(QE_CSR_XMTL,
553 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
554 1.37 ragge QE_WCSR(QE_CSR_XMTH,
555 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
556 1.37 ragge }
557 1.37 ragge sc->sc_nexttx = idx;
558 1.37 ragge }
559 1.37 ragge if (sc->sc_inq == (TXDESCS - 1))
560 1.37 ragge ifp->if_flags |= IFF_OACTIVE;
561 1.38 ragge
562 1.38 ragge out: if (sc->sc_inq)
563 1.38 ragge ifp->if_timer = 5; /* If transmit logic dies */
564 1.38 ragge splx(s);
565 1.1 ragge }
566 1.1 ragge
567 1.39 matt static void
568 1.46 ragge qeintr(void *arg)
569 1.1 ragge {
570 1.39 matt struct qe_softc *sc = arg;
571 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
572 1.37 ragge struct ifnet *ifp = &sc->sc_if;
573 1.37 ragge struct mbuf *m;
574 1.37 ragge int csr, status1, status2, len;
575 1.1 ragge
576 1.37 ragge csr = QE_RCSR(QE_CSR_CSR);
577 1.1 ragge
578 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
579 1.37 ragge QE_RCV_INT | QE_ILOOP);
580 1.1 ragge
581 1.37 ragge if (csr & QE_RCV_INT)
582 1.37 ragge while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
583 1.37 ragge status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
584 1.37 ragge status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
585 1.46 ragge
586 1.37 ragge m = sc->sc_rxmbuf[sc->sc_nextrx];
587 1.37 ragge len = ((status1 & QE_RBL_HI) |
588 1.37 ragge (status2 & QE_RBL_LO)) + 60;
589 1.37 ragge qe_add_rxbuf(sc, sc->sc_nextrx);
590 1.37 ragge m->m_pkthdr.rcvif = ifp;
591 1.37 ragge m->m_pkthdr.len = m->m_len = len;
592 1.37 ragge if (++sc->sc_nextrx == RXDESCS)
593 1.37 ragge sc->sc_nextrx = 0;
594 1.71 joerg bpf_mtap(ifp, m);
595 1.46 ragge if ((status1 & QE_ESETUP) == 0)
596 1.46 ragge (*ifp->if_input)(ifp, m);
597 1.46 ragge else
598 1.46 ragge m_freem(m);
599 1.1 ragge }
600 1.37 ragge
601 1.46 ragge if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
602 1.37 ragge while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
603 1.37 ragge int idx = sc->sc_lastack;
604 1.37 ragge
605 1.37 ragge sc->sc_inq--;
606 1.37 ragge if (++sc->sc_lastack == TXDESCS)
607 1.37 ragge sc->sc_lastack = 0;
608 1.37 ragge
609 1.37 ragge /* XXX collect statistics */
610 1.37 ragge qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
611 1.37 ragge qc->qc_xmit[idx].qe_status1 =
612 1.37 ragge qc->qc_xmit[idx].qe_flag = QE_NOTYET;
613 1.37 ragge
614 1.37 ragge if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
615 1.37 ragge continue;
616 1.57 bouyer if (sc->sc_txmbuf[idx] == NULL ||
617 1.57 bouyer sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
618 1.57 bouyer bus_dmamap_unload(sc->sc_dmat,
619 1.57 bouyer sc->sc_xmtmap[idx]);
620 1.37 ragge if (sc->sc_txmbuf[idx]) {
621 1.37 ragge m_freem(sc->sc_txmbuf[idx]);
622 1.57 bouyer sc->sc_txmbuf[idx] = NULL;
623 1.37 ragge }
624 1.37 ragge }
625 1.38 ragge ifp->if_timer = 0;
626 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE;
627 1.37 ragge qestart(ifp); /* Put in more in queue */
628 1.1 ragge }
629 1.37 ragge /*
630 1.37 ragge * How can the receive list get invalid???
631 1.37 ragge * Verified that it happens anyway.
632 1.1 ragge */
633 1.37 ragge if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
634 1.37 ragge (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
635 1.37 ragge QE_WCSR(QE_CSR_RCLL,
636 1.37 ragge LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
637 1.37 ragge QE_WCSR(QE_CSR_RCLH,
638 1.37 ragge HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
639 1.1 ragge }
640 1.1 ragge }
641 1.1 ragge
642 1.1 ragge /*
643 1.1 ragge * Process an ioctl request.
644 1.1 ragge */
645 1.7 ragge int
646 1.62 christos qeioctl(struct ifnet *ifp, u_long cmd, void *data)
647 1.1 ragge {
648 1.14 thorpej struct qe_softc *sc = ifp->if_softc;
649 1.1 ragge struct ifaddr *ifa = (struct ifaddr *)data;
650 1.8 mycroft int s = splnet(), error = 0;
651 1.1 ragge
652 1.1 ragge switch (cmd) {
653 1.1 ragge
654 1.68 dyoung case SIOCINITIFADDR:
655 1.1 ragge ifp->if_flags |= IFF_UP;
656 1.1 ragge switch(ifa->ifa_addr->sa_family) {
657 1.1 ragge #ifdef INET
658 1.1 ragge case AF_INET:
659 1.37 ragge qeinit(sc);
660 1.20 is arp_ifinit(ifp, ifa);
661 1.1 ragge break;
662 1.1 ragge #endif
663 1.1 ragge }
664 1.1 ragge break;
665 1.1 ragge
666 1.1 ragge case SIOCSIFFLAGS:
667 1.68 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0)
668 1.68 dyoung break;
669 1.68 dyoung /* XXX re-use ether_ioctl() */
670 1.68 dyoung switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
671 1.68 dyoung case IFF_RUNNING:
672 1.37 ragge /*
673 1.37 ragge * If interface is marked down and it is running,
674 1.37 ragge * stop it. (by disabling receive mechanism).
675 1.37 ragge */
676 1.37 ragge QE_WCSR(QE_CSR_CSR,
677 1.37 ragge QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
678 1.37 ragge ifp->if_flags &= ~IFF_RUNNING;
679 1.68 dyoung break;
680 1.68 dyoung case IFF_UP:
681 1.37 ragge /*
682 1.37 ragge * If interface it marked up and it is stopped, then
683 1.37 ragge * start it.
684 1.37 ragge */
685 1.19 ragge qeinit(sc);
686 1.68 dyoung break;
687 1.68 dyoung case IFF_UP|IFF_RUNNING:
688 1.37 ragge /*
689 1.37 ragge * Send a new setup packet to match any new changes.
690 1.37 ragge * (Like IFF_PROMISC etc)
691 1.37 ragge */
692 1.37 ragge qe_setup(sc);
693 1.68 dyoung break;
694 1.68 dyoung case 0:
695 1.68 dyoung break;
696 1.37 ragge }
697 1.1 ragge break;
698 1.1 ragge
699 1.22 ragge case SIOCADDMULTI:
700 1.22 ragge case SIOCDELMULTI:
701 1.22 ragge /*
702 1.22 ragge * Update our multicast list.
703 1.22 ragge */
704 1.63 dyoung if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
705 1.22 ragge /*
706 1.22 ragge * Multicast list has changed; set the hardware filter
707 1.22 ragge * accordingly.
708 1.22 ragge */
709 1.58 thorpej if (ifp->if_flags & IFF_RUNNING)
710 1.58 thorpej qe_setup(sc);
711 1.22 ragge error = 0;
712 1.22 ragge }
713 1.22 ragge break;
714 1.22 ragge
715 1.1 ragge default:
716 1.68 dyoung error = ether_ioctl(ifp, cmd, data);
717 1.1 ragge }
718 1.1 ragge splx(s);
719 1.1 ragge return (error);
720 1.1 ragge }
721 1.1 ragge
722 1.1 ragge /*
723 1.37 ragge * Add a receive buffer to the indicated descriptor.
724 1.1 ragge */
725 1.37 ragge int
726 1.59 simonb qe_add_rxbuf(struct qe_softc *sc, int i)
727 1.1 ragge {
728 1.37 ragge struct mbuf *m;
729 1.37 ragge struct qe_ring *rp;
730 1.37 ragge vaddr_t addr;
731 1.37 ragge int error;
732 1.37 ragge
733 1.37 ragge MGETHDR(m, M_DONTWAIT, MT_DATA);
734 1.37 ragge if (m == NULL)
735 1.37 ragge return (ENOBUFS);
736 1.37 ragge
737 1.37 ragge MCLGET(m, M_DONTWAIT);
738 1.37 ragge if ((m->m_flags & M_EXT) == 0) {
739 1.37 ragge m_freem(m);
740 1.37 ragge return (ENOBUFS);
741 1.37 ragge }
742 1.37 ragge
743 1.37 ragge if (sc->sc_rxmbuf[i] != NULL)
744 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
745 1.1 ragge
746 1.37 ragge error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
747 1.37 ragge m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
748 1.37 ragge if (error)
749 1.52 provos panic("%s: can't load rx DMA map %d, error = %d",
750 1.67 matt device_xname(sc->sc_dev), i, error);
751 1.37 ragge sc->sc_rxmbuf[i] = m;
752 1.1 ragge
753 1.37 ragge bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
754 1.37 ragge sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
755 1.1 ragge
756 1.1 ragge /*
757 1.37 ragge * We know that the mbuf cluster is page aligned. Also, be sure
758 1.37 ragge * that the IP header will be longword aligned.
759 1.1 ragge */
760 1.37 ragge m->m_data += 2;
761 1.37 ragge addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
762 1.37 ragge rp = &sc->sc_qedata->qc_recv[i];
763 1.37 ragge rp->qe_flag = rp->qe_status1 = QE_NOTYET;
764 1.37 ragge rp->qe_addr_lo = LOWORD(addr);
765 1.37 ragge rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
766 1.37 ragge rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
767 1.1 ragge
768 1.37 ragge return (0);
769 1.1 ragge }
770 1.37 ragge
771 1.1 ragge /*
772 1.37 ragge * Create a setup packet and put in queue for sending.
773 1.1 ragge */
774 1.7 ragge void
775 1.46 ragge qe_setup(struct qe_softc *sc)
776 1.1 ragge {
777 1.37 ragge struct ether_multi *enm;
778 1.37 ragge struct ether_multistep step;
779 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
780 1.37 ragge struct ifnet *ifp = &sc->sc_if;
781 1.66 tsutsui u_int8_t enaddr[ETHER_ADDR_LEN];
782 1.37 ragge int i, j, k, idx, s;
783 1.37 ragge
784 1.47 thorpej s = splnet();
785 1.37 ragge if (sc->sc_inq == (TXDESCS - 1)) {
786 1.37 ragge sc->sc_setup = 1;
787 1.37 ragge splx(s);
788 1.37 ragge return;
789 1.37 ragge }
790 1.37 ragge sc->sc_setup = 0;
791 1.1 ragge /*
792 1.37 ragge * Init the setup packet with valid info.
793 1.1 ragge */
794 1.37 ragge memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
795 1.66 tsutsui memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
796 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++)
797 1.37 ragge qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
798 1.37 ragge
799 1.1 ragge /*
800 1.59 simonb * Multicast handling. The DEQNA can handle up to 12 direct
801 1.37 ragge * ethernet addresses.
802 1.1 ragge */
803 1.37 ragge j = 3; k = 0;
804 1.37 ragge ifp->if_flags &= ~IFF_ALLMULTI;
805 1.37 ragge ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
806 1.37 ragge while (enm != NULL) {
807 1.50 wiz if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
808 1.37 ragge ifp->if_flags |= IFF_ALLMULTI;
809 1.37 ragge break;
810 1.37 ragge }
811 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++)
812 1.37 ragge qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
813 1.37 ragge j++;
814 1.37 ragge if (j == 8) {
815 1.37 ragge j = 1; k += 64;
816 1.37 ragge }
817 1.37 ragge if (k > 64) {
818 1.37 ragge ifp->if_flags |= IFF_ALLMULTI;
819 1.37 ragge break;
820 1.22 ragge }
821 1.37 ragge ETHER_NEXT_MULTI(step, enm);
822 1.22 ragge }
823 1.37 ragge idx = sc->sc_nexttx;
824 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -64;
825 1.1 ragge
826 1.1 ragge /*
827 1.37 ragge * How is the DEQNA turned in ALLMULTI mode???
828 1.37 ragge * Until someone tells me, fall back to PROMISC when more than
829 1.37 ragge * 12 ethernet addresses.
830 1.1 ragge */
831 1.43 thorpej if (ifp->if_flags & IFF_ALLMULTI)
832 1.43 thorpej ifp->if_flags |= IFF_PROMISC;
833 1.43 thorpej else if (ifp->if_pcount == 0)
834 1.43 thorpej ifp->if_flags &= ~IFF_PROMISC;
835 1.43 thorpej if (ifp->if_flags & IFF_PROMISC)
836 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -65;
837 1.1 ragge
838 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
839 1.37 ragge qc->qc_xmit[idx].qe_addr_hi =
840 1.37 ragge HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
841 1.37 ragge qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
842 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
843 1.1 ragge
844 1.37 ragge if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
845 1.37 ragge QE_WCSR(QE_CSR_XMTL,
846 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
847 1.37 ragge QE_WCSR(QE_CSR_XMTH,
848 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
849 1.22 ragge }
850 1.1 ragge
851 1.37 ragge sc->sc_inq++;
852 1.37 ragge if (++sc->sc_nexttx == TXDESCS)
853 1.37 ragge sc->sc_nexttx = 0;
854 1.37 ragge splx(s);
855 1.38 ragge }
856 1.38 ragge
857 1.38 ragge /*
858 1.38 ragge * Check for dead transmit logic. Not uncommon.
859 1.38 ragge */
860 1.38 ragge void
861 1.46 ragge qetimeout(struct ifnet *ifp)
862 1.38 ragge {
863 1.38 ragge struct qe_softc *sc = ifp->if_softc;
864 1.38 ragge
865 1.38 ragge if (sc->sc_inq == 0)
866 1.38 ragge return;
867 1.38 ragge
868 1.67 matt aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
869 1.38 ragge /*
870 1.38 ragge * Do a reset of interface, to get it going again.
871 1.38 ragge * Will it work by just restart the transmit logic?
872 1.38 ragge */
873 1.38 ragge qeinit(sc);
874 1.1 ragge }
875