if_qe.c revision 1.82 1 1.82 mrg /* $NetBSD: if_qe.c,v 1.82 2024/03/25 05:37:45 mrg Exp $ */
2 1.1 ragge /*
3 1.37 ragge * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 1.1 ragge *
5 1.1 ragge * Redistribution and use in source and binary forms, with or without
6 1.1 ragge * modification, are permitted provided that the following conditions
7 1.1 ragge * are met:
8 1.1 ragge * 1. Redistributions of source code must retain the above copyright
9 1.1 ragge * notice, this list of conditions and the following disclaimer.
10 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 ragge * notice, this list of conditions and the following disclaimer in the
12 1.1 ragge * documentation and/or other materials provided with the distribution.
13 1.37 ragge *
14 1.37 ragge * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 1.37 ragge * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 1.37 ragge * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 1.37 ragge * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 1.37 ragge * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 1.37 ragge * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 1.37 ragge * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 1.37 ragge * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 1.37 ragge * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 1.37 ragge * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 1.1 ragge */
25 1.1 ragge
26 1.1 ragge /*
27 1.37 ragge * Driver for DEQNA/DELQA ethernet cards.
28 1.37 ragge * Things that is still to do:
29 1.37 ragge * Handle ubaresets. Does not work at all right now.
30 1.37 ragge * Fix ALLMULTI reception. But someone must tell me how...
31 1.37 ragge * Collect statistics.
32 1.1 ragge */
33 1.49 lukem
34 1.49 lukem #include <sys/cdefs.h>
35 1.82 mrg __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.82 2024/03/25 05:37:45 mrg Exp $");
36 1.22 ragge
37 1.27 jonathan #include "opt_inet.h"
38 1.22 ragge
39 1.9 mycroft #include <sys/param.h>
40 1.9 mycroft #include <sys/mbuf.h>
41 1.9 mycroft #include <sys/socket.h>
42 1.9 mycroft #include <sys/device.h>
43 1.37 ragge #include <sys/systm.h>
44 1.37 ragge #include <sys/sockio.h>
45 1.9 mycroft
46 1.9 mycroft #include <net/if.h>
47 1.20 is #include <net/if_ether.h>
48 1.21 ragge #include <net/if_dl.h>
49 1.78 msaitoh #include <net/bpf.h>
50 1.1 ragge
51 1.9 mycroft #include <netinet/in.h>
52 1.20 is #include <netinet/if_inarp.h>
53 1.22 ragge
54 1.65 ad #include <sys/bus.h>
55 1.1 ragge
56 1.37 ragge #include <dev/qbus/ubavar.h>
57 1.37 ragge #include <dev/qbus/if_qereg.h>
58 1.1 ragge
59 1.37 ragge #include "ioconf.h"
60 1.37 ragge
61 1.37 ragge #define RXDESCS 30 /* # of receive descriptors */
62 1.37 ragge #define TXDESCS 60 /* # transmit descs */
63 1.6 jtc
64 1.1 ragge /*
65 1.37 ragge * Structure containing the elements that must be in DMA-safe memory.
66 1.1 ragge */
67 1.37 ragge struct qe_cdata {
68 1.37 ragge struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */
69 1.37 ragge struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */
70 1.80 msaitoh uint8_t qc_setup[128]; /* Setup packet layout */
71 1.37 ragge };
72 1.37 ragge
73 1.1 ragge struct qe_softc {
74 1.67 matt device_t sc_dev; /* Configuration common part */
75 1.67 matt struct uba_softc *sc_uh; /* our parent */
76 1.41 matt struct evcnt sc_intrcnt; /* Interrupt counting */
77 1.37 ragge struct ethercom sc_ec; /* Ethernet common part */
78 1.37 ragge #define sc_if sc_ec.ec_if /* network-visible interface */
79 1.37 ragge bus_space_tag_t sc_iot;
80 1.37 ragge bus_addr_t sc_ioh;
81 1.37 ragge bus_dma_tag_t sc_dmat;
82 1.37 ragge struct qe_cdata *sc_qedata; /* Descriptor struct */
83 1.37 ragge struct qe_cdata *sc_pqedata; /* Unibus address of above */
84 1.37 ragge struct mbuf* sc_txmbuf[TXDESCS];
85 1.37 ragge struct mbuf* sc_rxmbuf[RXDESCS];
86 1.37 ragge bus_dmamap_t sc_xmtmap[TXDESCS];
87 1.37 ragge bus_dmamap_t sc_rcvmap[RXDESCS];
88 1.57 bouyer bus_dmamap_t sc_nulldmamap; /* ethernet padding buffer */
89 1.48 ragge struct ubinfo sc_ui;
90 1.37 ragge int sc_intvec; /* Interrupt vector */
91 1.37 ragge int sc_nexttx;
92 1.37 ragge int sc_inq;
93 1.37 ragge int sc_lastack;
94 1.37 ragge int sc_nextrx;
95 1.37 ragge int sc_setup; /* Setup packet in queue */
96 1.7 ragge };
97 1.1 ragge
98 1.67 matt static int qematch(device_t, cfdata_t, void *);
99 1.67 matt static void qeattach(device_t, device_t, void *);
100 1.82 mrg static int qeinit(struct ifnet *);
101 1.46 ragge static void qestart(struct ifnet *);
102 1.46 ragge static void qeintr(void *);
103 1.62 christos static int qeioctl(struct ifnet *, u_long, void *);
104 1.46 ragge static int qe_add_rxbuf(struct qe_softc *, int);
105 1.46 ragge static void qe_setup(struct qe_softc *);
106 1.46 ragge static void qetimeout(struct ifnet *);
107 1.1 ragge
108 1.67 matt CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
109 1.55 thorpej qematch, qeattach, NULL, NULL);
110 1.23 thorpej
111 1.37 ragge #define QE_WCSR(csr, val) \
112 1.37 ragge bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
113 1.37 ragge #define QE_RCSR(csr) \
114 1.37 ragge bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
115 1.1 ragge
116 1.37 ragge #define LOWORD(x) ((int)(x) & 0xffff)
117 1.37 ragge #define HIWORD(x) (((int)(x) >> 16) & 0x3f)
118 1.7 ragge
119 1.57 bouyer #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
120 1.57 bouyer
121 1.1 ragge /*
122 1.37 ragge * Check for present DEQNA. Done by sending a fake setup packet
123 1.37 ragge * and wait for interrupt.
124 1.1 ragge */
125 1.7 ragge int
126 1.67 matt qematch(device_t parent, cfdata_t cf, void *aux)
127 1.7 ragge {
128 1.37 ragge struct qe_softc ssc;
129 1.37 ragge struct qe_softc *sc = &ssc;
130 1.7 ragge struct uba_attach_args *ua = aux;
131 1.67 matt struct uba_softc *uh = device_private(parent);
132 1.48 ragge struct ubinfo ui;
133 1.37 ragge
134 1.51 ragge #define PROBESIZE 4096
135 1.51 ragge struct qe_ring *ring;
136 1.21 ragge struct qe_ring *rp;
137 1.73 riastrad int error, match;
138 1.1 ragge
139 1.80 msaitoh ring = malloc(PROBESIZE, M_TEMP, M_WAITOK | M_ZERO);
140 1.69 cegger memset(sc, 0, sizeof(*sc));
141 1.37 ragge sc->sc_iot = ua->ua_iot;
142 1.37 ragge sc->sc_ioh = ua->ua_ioh;
143 1.37 ragge sc->sc_dmat = ua->ua_dmat;
144 1.7 ragge
145 1.67 matt uh->uh_lastiv -= 4;
146 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
147 1.67 matt QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
148 1.1 ragge
149 1.1 ragge /*
150 1.59 simonb * Map the ring area. Actually this is done only to be able to
151 1.37 ragge * send and receive a internal packet; some junk is loopbacked
152 1.37 ragge * so that the DEQNA has a reason to interrupt.
153 1.1 ragge */
154 1.48 ragge ui.ui_size = PROBESIZE;
155 1.62 christos ui.ui_vaddr = (void *)&ring[0];
156 1.73 riastrad if ((error = uballoc(uh, &ui, UBA_CANTWAIT))) {
157 1.73 riastrad match = 0;
158 1.73 riastrad goto out0;
159 1.73 riastrad }
160 1.1 ragge
161 1.1 ragge /*
162 1.37 ragge * Init a simple "fake" receive and transmit descriptor that
163 1.37 ragge * points to some unused area. Send a fake setup packet.
164 1.1 ragge */
165 1.48 ragge rp = (void *)ui.ui_baddr;
166 1.37 ragge ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
167 1.37 ragge ring[0].qe_addr_lo = LOWORD(&rp[4]);
168 1.37 ragge ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
169 1.51 ragge ring[0].qe_buf_len = -64;
170 1.1 ragge
171 1.37 ragge ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
172 1.37 ragge ring[2].qe_addr_lo = LOWORD(&rp[4]);
173 1.37 ragge ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
174 1.51 ragge ring[2].qe_buf_len = -(1500/2);
175 1.1 ragge
176 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
177 1.37 ragge DELAY(1000);
178 1.1 ragge
179 1.1 ragge /*
180 1.1 ragge * Start the interface and wait for the packet.
181 1.1 ragge */
182 1.80 msaitoh QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
183 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
184 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
185 1.37 ragge QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
186 1.37 ragge QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
187 1.1 ragge DELAY(10000);
188 1.37 ragge
189 1.73 riastrad match = 1;
190 1.73 riastrad
191 1.1 ragge /*
192 1.1 ragge * All done with the bus resources.
193 1.1 ragge */
194 1.67 matt ubfree(uh, &ui);
195 1.73 riastrad out0: free(ring, M_TEMP);
196 1.73 riastrad return match;
197 1.1 ragge }
198 1.1 ragge
199 1.1 ragge /*
200 1.1 ragge * Interface exists: make available by filling in network interface
201 1.1 ragge * record. System will initialize the interface when it is ready
202 1.1 ragge * to accept packets.
203 1.1 ragge */
204 1.7 ragge void
205 1.67 matt qeattach(device_t parent, device_t self, void *aux)
206 1.7 ragge {
207 1.67 matt struct uba_attach_args *ua = aux;
208 1.67 matt struct qe_softc *sc = device_private(self);
209 1.67 matt struct ifnet *ifp = &sc->sc_if;
210 1.67 matt struct qe_ring *rp;
211 1.80 msaitoh uint8_t enaddr[ETHER_ADDR_LEN];
212 1.48 ragge int i, error;
213 1.57 bouyer char *nullbuf;
214 1.37 ragge
215 1.67 matt sc->sc_dev = self;
216 1.67 matt sc->sc_uh = device_private(parent);
217 1.37 ragge sc->sc_iot = ua->ua_iot;
218 1.37 ragge sc->sc_ioh = ua->ua_ioh;
219 1.37 ragge sc->sc_dmat = ua->ua_dmat;
220 1.37 ragge
221 1.59 simonb /*
222 1.59 simonb * Allocate DMA safe memory for descriptors and setup memory.
223 1.59 simonb */
224 1.37 ragge
225 1.57 bouyer sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
226 1.67 matt if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
227 1.67 matt aprint_error(": unable to ubmemalloc(), error = %d\n", error);
228 1.48 ragge return;
229 1.37 ragge }
230 1.48 ragge sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
231 1.48 ragge sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
232 1.37 ragge
233 1.37 ragge /*
234 1.37 ragge * Zero the newly allocated memory.
235 1.37 ragge */
236 1.69 cegger memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
237 1.57 bouyer nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
238 1.37 ragge /*
239 1.37 ragge * Create the transmit descriptor DMA maps. We take advantage
240 1.59 simonb * of the fact that the Qbus address space is big, and therefore
241 1.37 ragge * allocate map registers for all transmit descriptors also,
242 1.37 ragge * so that we can avoid this each time we send a packet.
243 1.37 ragge */
244 1.37 ragge for (i = 0; i < TXDESCS; i++) {
245 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
246 1.80 msaitoh 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
247 1.37 ragge &sc->sc_xmtmap[i]))) {
248 1.67 matt aprint_error(
249 1.67 matt ": unable to create tx DMA map %d, error = %d\n",
250 1.37 ragge i, error);
251 1.37 ragge goto fail_4;
252 1.37 ragge }
253 1.37 ragge }
254 1.37 ragge
255 1.37 ragge /*
256 1.37 ragge * Create receive buffer DMA maps.
257 1.37 ragge */
258 1.37 ragge for (i = 0; i < RXDESCS; i++) {
259 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
260 1.37 ragge MCLBYTES, 0, BUS_DMA_NOWAIT,
261 1.37 ragge &sc->sc_rcvmap[i]))) {
262 1.67 matt aprint_error(
263 1.67 matt ": unable to create rx DMA map %d, error = %d\n",
264 1.37 ragge i, error);
265 1.37 ragge goto fail_5;
266 1.37 ragge }
267 1.37 ragge }
268 1.37 ragge /*
269 1.37 ragge * Pre-allocate the receive buffers.
270 1.37 ragge */
271 1.37 ragge for (i = 0; i < RXDESCS; i++) {
272 1.37 ragge if ((error = qe_add_rxbuf(sc, i)) != 0) {
273 1.67 matt aprint_error(
274 1.67 matt ": unable to allocate or map rx buffer %d,"
275 1.37 ragge " error = %d\n", i, error);
276 1.37 ragge goto fail_6;
277 1.37 ragge }
278 1.37 ragge }
279 1.1 ragge
280 1.57 bouyer if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
281 1.57 bouyer ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
282 1.67 matt aprint_error(
283 1.67 matt ": unable to create pad buffer DMA map, error = %d\n",
284 1.67 matt error);
285 1.57 bouyer goto fail_6;
286 1.57 bouyer }
287 1.57 bouyer if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
288 1.57 bouyer nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
289 1.67 matt aprint_error(
290 1.67 matt ": unable to load pad buffer DMA map, error = %d\n",
291 1.67 matt error);
292 1.57 bouyer goto fail_7;
293 1.57 bouyer }
294 1.57 bouyer bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
295 1.57 bouyer BUS_DMASYNC_PREWRITE);
296 1.57 bouyer
297 1.1 ragge /*
298 1.37 ragge * Create ring loops of the buffer chains.
299 1.37 ragge * This is only done once.
300 1.1 ragge */
301 1.37 ragge
302 1.37 ragge rp = sc->sc_qedata->qc_recv;
303 1.37 ragge rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
304 1.37 ragge rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
305 1.37 ragge QE_VALID | QE_CHAIN;
306 1.37 ragge rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
307 1.37 ragge
308 1.37 ragge rp = sc->sc_qedata->qc_xmit;
309 1.37 ragge rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
310 1.37 ragge rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
311 1.37 ragge QE_VALID | QE_CHAIN;
312 1.37 ragge rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
313 1.1 ragge
314 1.1 ragge /*
315 1.37 ragge * Get the vector that were set at match time, and remember it.
316 1.1 ragge */
317 1.67 matt sc->sc_intvec = sc->sc_uh->uh_lastiv;
318 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
319 1.37 ragge DELAY(1000);
320 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
321 1.1 ragge
322 1.1 ragge /*
323 1.37 ragge * Read out ethernet address and tell which type this card is.
324 1.1 ragge */
325 1.37 ragge for (i = 0; i < 6; i++)
326 1.37 ragge enaddr[i] = QE_RCSR(i * 2) & 0xff;
327 1.1 ragge
328 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
329 1.67 matt aprint_normal(": %s, hardware address %s\n",
330 1.37 ragge QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
331 1.37 ragge ether_sprintf(enaddr));
332 1.37 ragge
333 1.37 ragge QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
334 1.37 ragge
335 1.41 matt uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
336 1.41 matt sc, &sc->sc_intrcnt);
337 1.42 matt evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
338 1.67 matt device_xname(sc->sc_dev), "intr");
339 1.39 matt
340 1.67 matt strcpy(ifp->if_xname, device_xname(sc->sc_dev));
341 1.37 ragge ifp->if_softc = sc;
342 1.37 ragge ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
343 1.1 ragge ifp->if_start = qestart;
344 1.82 mrg ifp->if_init = qeinit;
345 1.1 ragge ifp->if_ioctl = qeioctl;
346 1.38 ragge ifp->if_watchdog = qetimeout;
347 1.45 thorpej IFQ_SET_READY(&ifp->if_snd);
348 1.37 ragge
349 1.37 ragge /*
350 1.37 ragge * Attach the interface.
351 1.37 ragge */
352 1.1 ragge if_attach(ifp);
353 1.37 ragge ether_ifattach(ifp, enaddr);
354 1.22 ragge
355 1.37 ragge return;
356 1.1 ragge
357 1.37 ragge /*
358 1.37 ragge * Free any resources we've allocated during the failed attach
359 1.37 ragge * attempt. Do this in reverse order and fall through.
360 1.37 ragge */
361 1.57 bouyer fail_7:
362 1.57 bouyer bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
363 1.37 ragge fail_6:
364 1.37 ragge for (i = 0; i < RXDESCS; i++) {
365 1.37 ragge if (sc->sc_rxmbuf[i] != NULL) {
366 1.57 bouyer bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
367 1.37 ragge m_freem(sc->sc_rxmbuf[i]);
368 1.37 ragge }
369 1.37 ragge }
370 1.37 ragge fail_5:
371 1.37 ragge for (i = 0; i < RXDESCS; i++) {
372 1.72 martin if (sc->sc_rcvmap[i] != NULL)
373 1.72 martin bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
374 1.37 ragge }
375 1.37 ragge fail_4:
376 1.37 ragge for (i = 0; i < TXDESCS; i++) {
377 1.72 martin if (sc->sc_xmtmap[i] != NULL)
378 1.72 martin bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
379 1.37 ragge }
380 1.1 ragge }
381 1.1 ragge
382 1.1 ragge /*
383 1.1 ragge * Initialization of interface.
384 1.1 ragge */
385 1.82 mrg int
386 1.82 mrg qeinit(struct ifnet *ifp)
387 1.1 ragge {
388 1.82 mrg struct qe_softc *sc = ifp->if_softc;
389 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
390 1.4 ragge int i;
391 1.1 ragge
392 1.1 ragge
393 1.37 ragge /*
394 1.37 ragge * Reset the interface.
395 1.37 ragge */
396 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET);
397 1.37 ragge DELAY(1000);
398 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
399 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
400 1.37 ragge
401 1.37 ragge sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
402 1.37 ragge /*
403 1.37 ragge * Release and init transmit descriptors.
404 1.37 ragge */
405 1.37 ragge for (i = 0; i < TXDESCS; i++) {
406 1.37 ragge if (sc->sc_txmbuf[i]) {
407 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
408 1.37 ragge m_freem(sc->sc_txmbuf[i]);
409 1.37 ragge sc->sc_txmbuf[i] = 0;
410 1.1 ragge }
411 1.37 ragge qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
412 1.37 ragge qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
413 1.1 ragge }
414 1.37 ragge
415 1.37 ragge /*
416 1.37 ragge * Init receive descriptors.
417 1.37 ragge */
418 1.37 ragge for (i = 0; i < RXDESCS; i++)
419 1.37 ragge qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
420 1.37 ragge sc->sc_nextrx = 0;
421 1.37 ragge
422 1.37 ragge /*
423 1.37 ragge * Write the descriptor addresses to the device.
424 1.37 ragge * Receiving packets will be enabled in the interrupt routine.
425 1.37 ragge */
426 1.80 msaitoh QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
427 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
428 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
429 1.37 ragge
430 1.37 ragge ifp->if_flags |= IFF_RUNNING;
431 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE;
432 1.37 ragge
433 1.1 ragge /*
434 1.37 ragge * Send a setup frame.
435 1.37 ragge * This will start the transmit machinery as well.
436 1.1 ragge */
437 1.37 ragge qe_setup(sc);
438 1.37 ragge
439 1.82 mrg return 0;
440 1.1 ragge }
441 1.1 ragge
442 1.1 ragge /*
443 1.1 ragge * Start output on interface.
444 1.1 ragge */
445 1.2 mycroft void
446 1.46 ragge qestart(struct ifnet *ifp)
447 1.1 ragge {
448 1.37 ragge struct qe_softc *sc = ifp->if_softc;
449 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
450 1.37 ragge paddr_t buffer;
451 1.37 ragge struct mbuf *m, *m0;
452 1.72 martin int idx, len, s, i, totlen, buflen;
453 1.46 ragge short orword, csr;
454 1.37 ragge
455 1.37 ragge if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
456 1.37 ragge return;
457 1.1 ragge
458 1.47 thorpej s = splnet();
459 1.37 ragge while (sc->sc_inq < (TXDESCS - 1)) {
460 1.1 ragge
461 1.37 ragge if (sc->sc_setup) {
462 1.37 ragge qe_setup(sc);
463 1.37 ragge continue;
464 1.37 ragge }
465 1.37 ragge idx = sc->sc_nexttx;
466 1.45 thorpej IFQ_POLL(&ifp->if_snd, m);
467 1.37 ragge if (m == 0)
468 1.37 ragge goto out;
469 1.37 ragge /*
470 1.37 ragge * Count number of mbufs in chain.
471 1.37 ragge * Always do DMA directly from mbufs, therefore the transmit
472 1.37 ragge * ring is really big.
473 1.37 ragge */
474 1.37 ragge for (m0 = m, i = 0; m0; m0 = m0->m_next)
475 1.38 ragge if (m0->m_len)
476 1.38 ragge i++;
477 1.57 bouyer if (m->m_pkthdr.len < ETHER_PAD_LEN) {
478 1.57 bouyer buflen = ETHER_PAD_LEN;
479 1.57 bouyer i++;
480 1.57 bouyer } else
481 1.57 bouyer buflen = m->m_pkthdr.len;
482 1.37 ragge if (i >= TXDESCS)
483 1.37 ragge panic("qestart");
484 1.37 ragge
485 1.37 ragge if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
486 1.38 ragge ifp->if_flags |= IFF_OACTIVE;
487 1.37 ragge goto out;
488 1.37 ragge }
489 1.45 thorpej
490 1.45 thorpej IFQ_DEQUEUE(&ifp->if_snd, m);
491 1.45 thorpej
492 1.79 msaitoh bpf_mtap(ifp, m, BPF_D_OUT);
493 1.1 ragge /*
494 1.37 ragge * m now points to a mbuf chain that can be loaded.
495 1.37 ragge * Loop around and set it.
496 1.1 ragge */
497 1.38 ragge totlen = 0;
498 1.57 bouyer for (m0 = m; ; m0 = m0->m_next) {
499 1.57 bouyer if (m0) {
500 1.57 bouyer if (m0->m_len == 0)
501 1.57 bouyer continue;
502 1.72 martin bus_dmamap_load(sc->sc_dmat,
503 1.57 bouyer sc->sc_xmtmap[idx], mtod(m0, void *),
504 1.57 bouyer m0->m_len, 0, 0);
505 1.57 bouyer buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
506 1.57 bouyer len = m0->m_len;
507 1.57 bouyer } else if (totlen < ETHER_PAD_LEN) {
508 1.57 bouyer buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
509 1.57 bouyer len = ETHER_PAD_LEN - totlen;
510 1.57 bouyer } else {
511 1.57 bouyer break;
512 1.57 bouyer }
513 1.37 ragge
514 1.38 ragge totlen += len;
515 1.37 ragge /* Word alignment calc */
516 1.37 ragge orword = 0;
517 1.57 bouyer if (totlen == buflen) {
518 1.37 ragge orword |= QE_EOMSG;
519 1.38 ragge sc->sc_txmbuf[idx] = m;
520 1.37 ragge }
521 1.37 ragge if ((buffer & 1) || (len & 1))
522 1.37 ragge len += 2;
523 1.37 ragge if (buffer & 1)
524 1.37 ragge orword |= QE_ODDBEGIN;
525 1.37 ragge if ((buffer + len) & 1)
526 1.37 ragge orword |= QE_ODDEND;
527 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -(len/2);
528 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
529 1.37 ragge qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
530 1.37 ragge qc->qc_xmit[idx].qe_flag =
531 1.37 ragge qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
532 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
533 1.37 ragge if (++idx == TXDESCS)
534 1.37 ragge idx = 0;
535 1.37 ragge sc->sc_inq++;
536 1.57 bouyer if (m0 == NULL)
537 1.57 bouyer break;
538 1.37 ragge }
539 1.38 ragge #ifdef DIAGNOSTIC
540 1.57 bouyer if (totlen != buflen)
541 1.38 ragge panic("qestart: len fault");
542 1.38 ragge #endif
543 1.37 ragge
544 1.37 ragge /*
545 1.37 ragge * Kick off the transmit logic, if it is stopped.
546 1.37 ragge */
547 1.46 ragge csr = QE_RCSR(QE_CSR_CSR);
548 1.46 ragge if (csr & QE_XL_INVALID) {
549 1.37 ragge QE_WCSR(QE_CSR_XMTL,
550 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
551 1.37 ragge QE_WCSR(QE_CSR_XMTH,
552 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
553 1.37 ragge }
554 1.37 ragge sc->sc_nexttx = idx;
555 1.37 ragge }
556 1.37 ragge if (sc->sc_inq == (TXDESCS - 1))
557 1.37 ragge ifp->if_flags |= IFF_OACTIVE;
558 1.38 ragge
559 1.38 ragge out: if (sc->sc_inq)
560 1.38 ragge ifp->if_timer = 5; /* If transmit logic dies */
561 1.38 ragge splx(s);
562 1.1 ragge }
563 1.1 ragge
564 1.39 matt static void
565 1.46 ragge qeintr(void *arg)
566 1.1 ragge {
567 1.39 matt struct qe_softc *sc = arg;
568 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
569 1.37 ragge struct ifnet *ifp = &sc->sc_if;
570 1.37 ragge struct mbuf *m;
571 1.37 ragge int csr, status1, status2, len;
572 1.1 ragge
573 1.37 ragge csr = QE_RCSR(QE_CSR_CSR);
574 1.1 ragge
575 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
576 1.37 ragge QE_RCV_INT | QE_ILOOP);
577 1.1 ragge
578 1.37 ragge if (csr & QE_RCV_INT)
579 1.37 ragge while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
580 1.37 ragge status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
581 1.37 ragge status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
582 1.46 ragge
583 1.37 ragge m = sc->sc_rxmbuf[sc->sc_nextrx];
584 1.37 ragge len = ((status1 & QE_RBL_HI) |
585 1.37 ragge (status2 & QE_RBL_LO)) + 60;
586 1.37 ragge qe_add_rxbuf(sc, sc->sc_nextrx);
587 1.75 ozaki m_set_rcvif(m, ifp);
588 1.37 ragge m->m_pkthdr.len = m->m_len = len;
589 1.37 ragge if (++sc->sc_nextrx == RXDESCS)
590 1.37 ragge sc->sc_nextrx = 0;
591 1.46 ragge if ((status1 & QE_ESETUP) == 0)
592 1.74 ozaki if_percpuq_enqueue(ifp->if_percpuq, m);
593 1.46 ragge else
594 1.46 ragge m_freem(m);
595 1.1 ragge }
596 1.37 ragge
597 1.80 msaitoh if (csr & (QE_XMIT_INT | QE_XL_INVALID)) {
598 1.37 ragge while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
599 1.37 ragge int idx = sc->sc_lastack;
600 1.37 ragge
601 1.37 ragge sc->sc_inq--;
602 1.37 ragge if (++sc->sc_lastack == TXDESCS)
603 1.37 ragge sc->sc_lastack = 0;
604 1.37 ragge
605 1.37 ragge /* XXX collect statistics */
606 1.37 ragge qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
607 1.37 ragge qc->qc_xmit[idx].qe_status1 =
608 1.37 ragge qc->qc_xmit[idx].qe_flag = QE_NOTYET;
609 1.37 ragge
610 1.37 ragge if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
611 1.37 ragge continue;
612 1.57 bouyer if (sc->sc_txmbuf[idx] == NULL ||
613 1.57 bouyer sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
614 1.57 bouyer bus_dmamap_unload(sc->sc_dmat,
615 1.57 bouyer sc->sc_xmtmap[idx]);
616 1.37 ragge if (sc->sc_txmbuf[idx]) {
617 1.37 ragge m_freem(sc->sc_txmbuf[idx]);
618 1.57 bouyer sc->sc_txmbuf[idx] = NULL;
619 1.37 ragge }
620 1.37 ragge }
621 1.38 ragge ifp->if_timer = 0;
622 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE;
623 1.37 ragge qestart(ifp); /* Put in more in queue */
624 1.1 ragge }
625 1.37 ragge /*
626 1.37 ragge * How can the receive list get invalid???
627 1.37 ragge * Verified that it happens anyway.
628 1.1 ragge */
629 1.37 ragge if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
630 1.37 ragge (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
631 1.37 ragge QE_WCSR(QE_CSR_RCLL,
632 1.37 ragge LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
633 1.37 ragge QE_WCSR(QE_CSR_RCLH,
634 1.37 ragge HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
635 1.1 ragge }
636 1.1 ragge }
637 1.1 ragge
638 1.1 ragge /*
639 1.1 ragge * Process an ioctl request.
640 1.1 ragge */
641 1.7 ragge int
642 1.62 christos qeioctl(struct ifnet *ifp, u_long cmd, void *data)
643 1.1 ragge {
644 1.14 thorpej struct qe_softc *sc = ifp->if_softc;
645 1.1 ragge struct ifaddr *ifa = (struct ifaddr *)data;
646 1.8 mycroft int s = splnet(), error = 0;
647 1.1 ragge
648 1.1 ragge switch (cmd) {
649 1.1 ragge
650 1.68 dyoung case SIOCINITIFADDR:
651 1.1 ragge ifp->if_flags |= IFF_UP;
652 1.80 msaitoh switch (ifa->ifa_addr->sa_family) {
653 1.1 ragge #ifdef INET
654 1.1 ragge case AF_INET:
655 1.82 mrg qeinit(ifp);
656 1.20 is arp_ifinit(ifp, ifa);
657 1.1 ragge break;
658 1.1 ragge #endif
659 1.1 ragge }
660 1.1 ragge break;
661 1.1 ragge
662 1.1 ragge case SIOCSIFFLAGS:
663 1.68 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0)
664 1.68 dyoung break;
665 1.68 dyoung /* XXX re-use ether_ioctl() */
666 1.80 msaitoh switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
667 1.68 dyoung case IFF_RUNNING:
668 1.37 ragge /*
669 1.37 ragge * If interface is marked down and it is running,
670 1.37 ragge * stop it. (by disabling receive mechanism).
671 1.37 ragge */
672 1.37 ragge QE_WCSR(QE_CSR_CSR,
673 1.37 ragge QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
674 1.37 ragge ifp->if_flags &= ~IFF_RUNNING;
675 1.68 dyoung break;
676 1.68 dyoung case IFF_UP:
677 1.37 ragge /*
678 1.37 ragge * If interface it marked up and it is stopped, then
679 1.37 ragge * start it.
680 1.37 ragge */
681 1.82 mrg qeinit(ifp);
682 1.68 dyoung break;
683 1.80 msaitoh case IFF_UP | IFF_RUNNING:
684 1.37 ragge /*
685 1.37 ragge * Send a new setup packet to match any new changes.
686 1.37 ragge * (Like IFF_PROMISC etc)
687 1.37 ragge */
688 1.37 ragge qe_setup(sc);
689 1.68 dyoung break;
690 1.68 dyoung case 0:
691 1.68 dyoung break;
692 1.37 ragge }
693 1.1 ragge break;
694 1.1 ragge
695 1.22 ragge case SIOCADDMULTI:
696 1.22 ragge case SIOCDELMULTI:
697 1.22 ragge /*
698 1.22 ragge * Update our multicast list.
699 1.22 ragge */
700 1.63 dyoung if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
701 1.22 ragge /*
702 1.22 ragge * Multicast list has changed; set the hardware filter
703 1.22 ragge * accordingly.
704 1.22 ragge */
705 1.58 thorpej if (ifp->if_flags & IFF_RUNNING)
706 1.58 thorpej qe_setup(sc);
707 1.22 ragge error = 0;
708 1.22 ragge }
709 1.22 ragge break;
710 1.22 ragge
711 1.1 ragge default:
712 1.68 dyoung error = ether_ioctl(ifp, cmd, data);
713 1.1 ragge }
714 1.1 ragge splx(s);
715 1.80 msaitoh return error;
716 1.1 ragge }
717 1.1 ragge
718 1.1 ragge /*
719 1.37 ragge * Add a receive buffer to the indicated descriptor.
720 1.1 ragge */
721 1.37 ragge int
722 1.59 simonb qe_add_rxbuf(struct qe_softc *sc, int i)
723 1.1 ragge {
724 1.37 ragge struct mbuf *m;
725 1.37 ragge struct qe_ring *rp;
726 1.37 ragge vaddr_t addr;
727 1.37 ragge int error;
728 1.37 ragge
729 1.37 ragge MGETHDR(m, M_DONTWAIT, MT_DATA);
730 1.37 ragge if (m == NULL)
731 1.80 msaitoh return ENOBUFS;
732 1.37 ragge
733 1.37 ragge MCLGET(m, M_DONTWAIT);
734 1.37 ragge if ((m->m_flags & M_EXT) == 0) {
735 1.37 ragge m_freem(m);
736 1.80 msaitoh return ENOBUFS;
737 1.37 ragge }
738 1.37 ragge
739 1.37 ragge if (sc->sc_rxmbuf[i] != NULL)
740 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
741 1.1 ragge
742 1.37 ragge error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
743 1.37 ragge m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
744 1.37 ragge if (error)
745 1.52 provos panic("%s: can't load rx DMA map %d, error = %d",
746 1.67 matt device_xname(sc->sc_dev), i, error);
747 1.37 ragge sc->sc_rxmbuf[i] = m;
748 1.1 ragge
749 1.37 ragge bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
750 1.37 ragge sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
751 1.1 ragge
752 1.1 ragge /*
753 1.37 ragge * We know that the mbuf cluster is page aligned. Also, be sure
754 1.37 ragge * that the IP header will be longword aligned.
755 1.1 ragge */
756 1.37 ragge m->m_data += 2;
757 1.37 ragge addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
758 1.37 ragge rp = &sc->sc_qedata->qc_recv[i];
759 1.37 ragge rp->qe_flag = rp->qe_status1 = QE_NOTYET;
760 1.37 ragge rp->qe_addr_lo = LOWORD(addr);
761 1.37 ragge rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
762 1.80 msaitoh rp->qe_buf_len = -(m->m_ext.ext_size - 2) / 2;
763 1.1 ragge
764 1.80 msaitoh return 0;
765 1.1 ragge }
766 1.37 ragge
767 1.1 ragge /*
768 1.37 ragge * Create a setup packet and put in queue for sending.
769 1.1 ragge */
770 1.7 ragge void
771 1.46 ragge qe_setup(struct qe_softc *sc)
772 1.1 ragge {
773 1.81 msaitoh struct ethercom *ec = &sc->sc_ec;
774 1.37 ragge struct ether_multi *enm;
775 1.37 ragge struct ether_multistep step;
776 1.37 ragge struct qe_cdata *qc = sc->sc_qedata;
777 1.37 ragge struct ifnet *ifp = &sc->sc_if;
778 1.80 msaitoh uint8_t enaddr[ETHER_ADDR_LEN];
779 1.37 ragge int i, j, k, idx, s;
780 1.37 ragge
781 1.47 thorpej s = splnet();
782 1.37 ragge if (sc->sc_inq == (TXDESCS - 1)) {
783 1.37 ragge sc->sc_setup = 1;
784 1.37 ragge splx(s);
785 1.37 ragge return;
786 1.37 ragge }
787 1.37 ragge sc->sc_setup = 0;
788 1.1 ragge /*
789 1.37 ragge * Init the setup packet with valid info.
790 1.1 ragge */
791 1.37 ragge memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
792 1.66 tsutsui memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
793 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++)
794 1.37 ragge qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
795 1.37 ragge
796 1.1 ragge /*
797 1.59 simonb * Multicast handling. The DEQNA can handle up to 12 direct
798 1.37 ragge * ethernet addresses.
799 1.1 ragge */
800 1.37 ragge j = 3; k = 0;
801 1.37 ragge ifp->if_flags &= ~IFF_ALLMULTI;
802 1.81 msaitoh ETHER_LOCK(ec);
803 1.81 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
804 1.37 ragge while (enm != NULL) {
805 1.50 wiz if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
806 1.37 ragge ifp->if_flags |= IFF_ALLMULTI;
807 1.37 ragge break;
808 1.37 ragge }
809 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++)
810 1.37 ragge qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
811 1.37 ragge j++;
812 1.37 ragge if (j == 8) {
813 1.37 ragge j = 1; k += 64;
814 1.37 ragge }
815 1.37 ragge if (k > 64) {
816 1.37 ragge ifp->if_flags |= IFF_ALLMULTI;
817 1.37 ragge break;
818 1.22 ragge }
819 1.37 ragge ETHER_NEXT_MULTI(step, enm);
820 1.22 ragge }
821 1.81 msaitoh ETHER_UNLOCK(ec);
822 1.37 ragge idx = sc->sc_nexttx;
823 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -64;
824 1.1 ragge
825 1.1 ragge /*
826 1.37 ragge * How is the DEQNA turned in ALLMULTI mode???
827 1.37 ragge * Until someone tells me, fall back to PROMISC when more than
828 1.37 ragge * 12 ethernet addresses.
829 1.1 ragge */
830 1.43 thorpej if (ifp->if_flags & IFF_ALLMULTI)
831 1.43 thorpej ifp->if_flags |= IFF_PROMISC;
832 1.43 thorpej else if (ifp->if_pcount == 0)
833 1.43 thorpej ifp->if_flags &= ~IFF_PROMISC;
834 1.43 thorpej if (ifp->if_flags & IFF_PROMISC)
835 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -65;
836 1.1 ragge
837 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
838 1.37 ragge qc->qc_xmit[idx].qe_addr_hi =
839 1.37 ragge HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
840 1.37 ragge qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
841 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
842 1.1 ragge
843 1.37 ragge if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
844 1.37 ragge QE_WCSR(QE_CSR_XMTL,
845 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
846 1.37 ragge QE_WCSR(QE_CSR_XMTH,
847 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
848 1.22 ragge }
849 1.1 ragge
850 1.37 ragge sc->sc_inq++;
851 1.37 ragge if (++sc->sc_nexttx == TXDESCS)
852 1.37 ragge sc->sc_nexttx = 0;
853 1.37 ragge splx(s);
854 1.38 ragge }
855 1.38 ragge
856 1.38 ragge /*
857 1.38 ragge * Check for dead transmit logic. Not uncommon.
858 1.38 ragge */
859 1.38 ragge void
860 1.46 ragge qetimeout(struct ifnet *ifp)
861 1.38 ragge {
862 1.38 ragge struct qe_softc *sc = ifp->if_softc;
863 1.38 ragge
864 1.38 ragge if (sc->sc_inq == 0)
865 1.38 ragge return;
866 1.38 ragge
867 1.67 matt aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
868 1.38 ragge /*
869 1.38 ragge * Do a reset of interface, to get it going again.
870 1.38 ragge * Will it work by just restart the transmit logic?
871 1.38 ragge */
872 1.82 mrg qeinit(ifp);
873 1.1 ragge }
874