1 1.83 rin /* $NetBSD: if_qe.c,v 1.83 2024/07/05 04:31:52 rin Exp $ */ 2 1.1 ragge /* 3 1.37 ragge * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved. 4 1.1 ragge * 5 1.1 ragge * Redistribution and use in source and binary forms, with or without 6 1.1 ragge * modification, are permitted provided that the following conditions 7 1.1 ragge * are met: 8 1.1 ragge * 1. Redistributions of source code must retain the above copyright 9 1.1 ragge * notice, this list of conditions and the following disclaimer. 10 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright 11 1.1 ragge * notice, this list of conditions and the following disclaimer in the 12 1.1 ragge * documentation and/or other materials provided with the distribution. 13 1.37 ragge * 14 1.37 ragge * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 1.37 ragge * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 1.37 ragge * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 1.37 ragge * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 1.37 ragge * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 1.37 ragge * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 1.37 ragge * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 1.37 ragge * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 1.37 ragge * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 1.37 ragge * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 1.1 ragge */ 25 1.1 ragge 26 1.1 ragge /* 27 1.37 ragge * Driver for DEQNA/DELQA ethernet cards. 28 1.37 ragge * Things that is still to do: 29 1.37 ragge * Handle ubaresets. Does not work at all right now. 30 1.37 ragge * Fix ALLMULTI reception. But someone must tell me how... 31 1.37 ragge * Collect statistics. 32 1.1 ragge */ 33 1.49 lukem 34 1.49 lukem #include <sys/cdefs.h> 35 1.83 rin __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.83 2024/07/05 04:31:52 rin Exp $"); 36 1.22 ragge 37 1.27 jonathan #include "opt_inet.h" 38 1.22 ragge 39 1.9 mycroft #include <sys/param.h> 40 1.9 mycroft #include <sys/mbuf.h> 41 1.9 mycroft #include <sys/socket.h> 42 1.9 mycroft #include <sys/device.h> 43 1.37 ragge #include <sys/systm.h> 44 1.37 ragge #include <sys/sockio.h> 45 1.9 mycroft 46 1.9 mycroft #include <net/if.h> 47 1.20 is #include <net/if_ether.h> 48 1.21 ragge #include <net/if_dl.h> 49 1.78 msaitoh #include <net/bpf.h> 50 1.1 ragge 51 1.9 mycroft #include <netinet/in.h> 52 1.20 is #include <netinet/if_inarp.h> 53 1.22 ragge 54 1.65 ad #include <sys/bus.h> 55 1.1 ragge 56 1.37 ragge #include <dev/qbus/ubavar.h> 57 1.37 ragge #include <dev/qbus/if_qereg.h> 58 1.1 ragge 59 1.37 ragge #include "ioconf.h" 60 1.37 ragge 61 1.37 ragge #define RXDESCS 30 /* # of receive descriptors */ 62 1.37 ragge #define TXDESCS 60 /* # transmit descs */ 63 1.6 jtc 64 1.1 ragge /* 65 1.37 ragge * Structure containing the elements that must be in DMA-safe memory. 66 1.1 ragge */ 67 1.37 ragge struct qe_cdata { 68 1.37 ragge struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */ 69 1.37 ragge struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */ 70 1.80 msaitoh uint8_t qc_setup[128]; /* Setup packet layout */ 71 1.37 ragge }; 72 1.37 ragge 73 1.1 ragge struct qe_softc { 74 1.67 matt device_t sc_dev; /* Configuration common part */ 75 1.67 matt struct uba_softc *sc_uh; /* our parent */ 76 1.41 matt struct evcnt sc_intrcnt; /* Interrupt counting */ 77 1.37 ragge struct ethercom sc_ec; /* Ethernet common part */ 78 1.37 ragge #define sc_if sc_ec.ec_if /* network-visible interface */ 79 1.37 ragge bus_space_tag_t sc_iot; 80 1.37 ragge bus_addr_t sc_ioh; 81 1.37 ragge bus_dma_tag_t sc_dmat; 82 1.37 ragge struct qe_cdata *sc_qedata; /* Descriptor struct */ 83 1.37 ragge struct qe_cdata *sc_pqedata; /* Unibus address of above */ 84 1.37 ragge struct mbuf* sc_txmbuf[TXDESCS]; 85 1.37 ragge struct mbuf* sc_rxmbuf[RXDESCS]; 86 1.37 ragge bus_dmamap_t sc_xmtmap[TXDESCS]; 87 1.37 ragge bus_dmamap_t sc_rcvmap[RXDESCS]; 88 1.57 bouyer bus_dmamap_t sc_nulldmamap; /* ethernet padding buffer */ 89 1.48 ragge struct ubinfo sc_ui; 90 1.37 ragge int sc_intvec; /* Interrupt vector */ 91 1.37 ragge int sc_nexttx; 92 1.37 ragge int sc_inq; 93 1.37 ragge int sc_lastack; 94 1.37 ragge int sc_nextrx; 95 1.37 ragge int sc_setup; /* Setup packet in queue */ 96 1.7 ragge }; 97 1.1 ragge 98 1.67 matt static int qematch(device_t, cfdata_t, void *); 99 1.67 matt static void qeattach(device_t, device_t, void *); 100 1.82 mrg static int qeinit(struct ifnet *); 101 1.46 ragge static void qestart(struct ifnet *); 102 1.46 ragge static void qeintr(void *); 103 1.62 christos static int qeioctl(struct ifnet *, u_long, void *); 104 1.46 ragge static int qe_add_rxbuf(struct qe_softc *, int); 105 1.46 ragge static void qe_setup(struct qe_softc *); 106 1.46 ragge static void qetimeout(struct ifnet *); 107 1.1 ragge 108 1.67 matt CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc), 109 1.55 thorpej qematch, qeattach, NULL, NULL); 110 1.23 thorpej 111 1.37 ragge #define QE_WCSR(csr, val) \ 112 1.37 ragge bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val) 113 1.37 ragge #define QE_RCSR(csr) \ 114 1.37 ragge bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr) 115 1.1 ragge 116 1.37 ragge #define LOWORD(x) ((int)(x) & 0xffff) 117 1.37 ragge #define HIWORD(x) (((int)(x) >> 16) & 0x3f) 118 1.7 ragge 119 1.57 bouyer #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 120 1.57 bouyer 121 1.1 ragge /* 122 1.37 ragge * Check for present DEQNA. Done by sending a fake setup packet 123 1.37 ragge * and wait for interrupt. 124 1.1 ragge */ 125 1.7 ragge int 126 1.67 matt qematch(device_t parent, cfdata_t cf, void *aux) 127 1.7 ragge { 128 1.37 ragge struct qe_softc ssc; 129 1.37 ragge struct qe_softc *sc = &ssc; 130 1.7 ragge struct uba_attach_args *ua = aux; 131 1.67 matt struct uba_softc *uh = device_private(parent); 132 1.48 ragge struct ubinfo ui; 133 1.37 ragge 134 1.51 ragge #define PROBESIZE 4096 135 1.51 ragge struct qe_ring *ring; 136 1.21 ragge struct qe_ring *rp; 137 1.73 riastrad int error, match; 138 1.1 ragge 139 1.80 msaitoh ring = malloc(PROBESIZE, M_TEMP, M_WAITOK | M_ZERO); 140 1.69 cegger memset(sc, 0, sizeof(*sc)); 141 1.37 ragge sc->sc_iot = ua->ua_iot; 142 1.37 ragge sc->sc_ioh = ua->ua_ioh; 143 1.37 ragge sc->sc_dmat = ua->ua_dmat; 144 1.7 ragge 145 1.67 matt uh->uh_lastiv -= 4; 146 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET); 147 1.67 matt QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv); 148 1.1 ragge 149 1.1 ragge /* 150 1.59 simonb * Map the ring area. Actually this is done only to be able to 151 1.37 ragge * send and receive a internal packet; some junk is loopbacked 152 1.37 ragge * so that the DEQNA has a reason to interrupt. 153 1.1 ragge */ 154 1.48 ragge ui.ui_size = PROBESIZE; 155 1.62 christos ui.ui_vaddr = (void *)&ring[0]; 156 1.73 riastrad if ((error = uballoc(uh, &ui, UBA_CANTWAIT))) { 157 1.73 riastrad match = 0; 158 1.73 riastrad goto out0; 159 1.73 riastrad } 160 1.1 ragge 161 1.1 ragge /* 162 1.37 ragge * Init a simple "fake" receive and transmit descriptor that 163 1.37 ragge * points to some unused area. Send a fake setup packet. 164 1.1 ragge */ 165 1.48 ragge rp = (void *)ui.ui_baddr; 166 1.37 ragge ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET; 167 1.37 ragge ring[0].qe_addr_lo = LOWORD(&rp[4]); 168 1.37 ragge ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP; 169 1.51 ragge ring[0].qe_buf_len = -64; 170 1.1 ragge 171 1.37 ragge ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET; 172 1.37 ragge ring[2].qe_addr_lo = LOWORD(&rp[4]); 173 1.37 ragge ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID; 174 1.51 ragge ring[2].qe_buf_len = -(1500/2); 175 1.1 ragge 176 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET); 177 1.37 ragge DELAY(1000); 178 1.1 ragge 179 1.1 ragge /* 180 1.1 ragge * Start the interface and wait for the packet. 181 1.1 ragge */ 182 1.80 msaitoh QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT); 183 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2])); 184 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2])); 185 1.37 ragge QE_WCSR(QE_CSR_XMTL, LOWORD(rp)); 186 1.37 ragge QE_WCSR(QE_CSR_XMTH, HIWORD(rp)); 187 1.1 ragge DELAY(10000); 188 1.37 ragge 189 1.73 riastrad match = 1; 190 1.73 riastrad 191 1.1 ragge /* 192 1.1 ragge * All done with the bus resources. 193 1.1 ragge */ 194 1.67 matt ubfree(uh, &ui); 195 1.73 riastrad out0: free(ring, M_TEMP); 196 1.73 riastrad return match; 197 1.1 ragge } 198 1.1 ragge 199 1.1 ragge /* 200 1.1 ragge * Interface exists: make available by filling in network interface 201 1.1 ragge * record. System will initialize the interface when it is ready 202 1.1 ragge * to accept packets. 203 1.1 ragge */ 204 1.7 ragge void 205 1.67 matt qeattach(device_t parent, device_t self, void *aux) 206 1.7 ragge { 207 1.67 matt struct uba_attach_args *ua = aux; 208 1.67 matt struct qe_softc *sc = device_private(self); 209 1.67 matt struct ifnet *ifp = &sc->sc_if; 210 1.67 matt struct qe_ring *rp; 211 1.80 msaitoh uint8_t enaddr[ETHER_ADDR_LEN]; 212 1.48 ragge int i, error; 213 1.57 bouyer char *nullbuf; 214 1.37 ragge 215 1.67 matt sc->sc_dev = self; 216 1.67 matt sc->sc_uh = device_private(parent); 217 1.37 ragge sc->sc_iot = ua->ua_iot; 218 1.37 ragge sc->sc_ioh = ua->ua_ioh; 219 1.37 ragge sc->sc_dmat = ua->ua_dmat; 220 1.37 ragge 221 1.59 simonb /* 222 1.59 simonb * Allocate DMA safe memory for descriptors and setup memory. 223 1.59 simonb */ 224 1.37 ragge 225 1.57 bouyer sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN; 226 1.67 matt if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) { 227 1.67 matt aprint_error(": unable to ubmemalloc(), error = %d\n", error); 228 1.48 ragge return; 229 1.37 ragge } 230 1.48 ragge sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr; 231 1.48 ragge sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr; 232 1.37 ragge 233 1.37 ragge /* 234 1.37 ragge * Zero the newly allocated memory. 235 1.37 ragge */ 236 1.69 cegger memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN); 237 1.57 bouyer nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata); 238 1.37 ragge /* 239 1.37 ragge * Create the transmit descriptor DMA maps. We take advantage 240 1.59 simonb * of the fact that the Qbus address space is big, and therefore 241 1.37 ragge * allocate map registers for all transmit descriptors also, 242 1.37 ragge * so that we can avoid this each time we send a packet. 243 1.37 ragge */ 244 1.37 ragge for (i = 0; i < TXDESCS; i++) { 245 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 246 1.80 msaitoh 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 247 1.37 ragge &sc->sc_xmtmap[i]))) { 248 1.67 matt aprint_error( 249 1.67 matt ": unable to create tx DMA map %d, error = %d\n", 250 1.37 ragge i, error); 251 1.37 ragge goto fail_4; 252 1.37 ragge } 253 1.37 ragge } 254 1.37 ragge 255 1.37 ragge /* 256 1.37 ragge * Create receive buffer DMA maps. 257 1.37 ragge */ 258 1.37 ragge for (i = 0; i < RXDESCS; i++) { 259 1.37 ragge if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 260 1.37 ragge MCLBYTES, 0, BUS_DMA_NOWAIT, 261 1.37 ragge &sc->sc_rcvmap[i]))) { 262 1.67 matt aprint_error( 263 1.67 matt ": unable to create rx DMA map %d, error = %d\n", 264 1.37 ragge i, error); 265 1.37 ragge goto fail_5; 266 1.37 ragge } 267 1.37 ragge } 268 1.37 ragge /* 269 1.37 ragge * Pre-allocate the receive buffers. 270 1.37 ragge */ 271 1.37 ragge for (i = 0; i < RXDESCS; i++) { 272 1.37 ragge if ((error = qe_add_rxbuf(sc, i)) != 0) { 273 1.67 matt aprint_error( 274 1.67 matt ": unable to allocate or map rx buffer %d," 275 1.37 ragge " error = %d\n", i, error); 276 1.37 ragge goto fail_6; 277 1.37 ragge } 278 1.37 ragge } 279 1.1 ragge 280 1.57 bouyer if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 281 1.57 bouyer ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { 282 1.67 matt aprint_error( 283 1.67 matt ": unable to create pad buffer DMA map, error = %d\n", 284 1.67 matt error); 285 1.57 bouyer goto fail_6; 286 1.57 bouyer } 287 1.57 bouyer if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 288 1.57 bouyer nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 289 1.67 matt aprint_error( 290 1.67 matt ": unable to load pad buffer DMA map, error = %d\n", 291 1.67 matt error); 292 1.57 bouyer goto fail_7; 293 1.57 bouyer } 294 1.57 bouyer bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 295 1.57 bouyer BUS_DMASYNC_PREWRITE); 296 1.57 bouyer 297 1.1 ragge /* 298 1.37 ragge * Create ring loops of the buffer chains. 299 1.37 ragge * This is only done once. 300 1.1 ragge */ 301 1.37 ragge 302 1.37 ragge rp = sc->sc_qedata->qc_recv; 303 1.37 ragge rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]); 304 1.37 ragge rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) | 305 1.37 ragge QE_VALID | QE_CHAIN; 306 1.37 ragge rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET; 307 1.37 ragge 308 1.37 ragge rp = sc->sc_qedata->qc_xmit; 309 1.37 ragge rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]); 310 1.37 ragge rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) | 311 1.37 ragge QE_VALID | QE_CHAIN; 312 1.37 ragge rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET; 313 1.1 ragge 314 1.1 ragge /* 315 1.37 ragge * Get the vector that were set at match time, and remember it. 316 1.1 ragge */ 317 1.67 matt sc->sc_intvec = sc->sc_uh->uh_lastiv; 318 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET); 319 1.37 ragge DELAY(1000); 320 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET); 321 1.1 ragge 322 1.1 ragge /* 323 1.37 ragge * Read out ethernet address and tell which type this card is. 324 1.1 ragge */ 325 1.37 ragge for (i = 0; i < 6; i++) 326 1.37 ragge enaddr[i] = QE_RCSR(i * 2) & 0xff; 327 1.1 ragge 328 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1); 329 1.67 matt aprint_normal(": %s, hardware address %s\n", 330 1.37 ragge QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna", 331 1.37 ragge ether_sprintf(enaddr)); 332 1.37 ragge 333 1.37 ragge QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */ 334 1.37 ragge 335 1.41 matt uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr, 336 1.41 matt sc, &sc->sc_intrcnt); 337 1.42 matt evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt, 338 1.67 matt device_xname(sc->sc_dev), "intr"); 339 1.39 matt 340 1.67 matt strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 341 1.37 ragge ifp->if_softc = sc; 342 1.37 ragge ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 343 1.1 ragge ifp->if_start = qestart; 344 1.82 mrg ifp->if_init = qeinit; 345 1.1 ragge ifp->if_ioctl = qeioctl; 346 1.38 ragge ifp->if_watchdog = qetimeout; 347 1.45 thorpej IFQ_SET_READY(&ifp->if_snd); 348 1.37 ragge 349 1.37 ragge /* 350 1.37 ragge * Attach the interface. 351 1.37 ragge */ 352 1.1 ragge if_attach(ifp); 353 1.37 ragge ether_ifattach(ifp, enaddr); 354 1.22 ragge 355 1.37 ragge return; 356 1.1 ragge 357 1.37 ragge /* 358 1.37 ragge * Free any resources we've allocated during the failed attach 359 1.37 ragge * attempt. Do this in reverse order and fall through. 360 1.37 ragge */ 361 1.57 bouyer fail_7: 362 1.57 bouyer bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 363 1.37 ragge fail_6: 364 1.37 ragge for (i = 0; i < RXDESCS; i++) { 365 1.37 ragge if (sc->sc_rxmbuf[i] != NULL) { 366 1.57 bouyer bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]); 367 1.37 ragge m_freem(sc->sc_rxmbuf[i]); 368 1.37 ragge } 369 1.37 ragge } 370 1.37 ragge fail_5: 371 1.37 ragge for (i = 0; i < RXDESCS; i++) { 372 1.72 martin if (sc->sc_rcvmap[i] != NULL) 373 1.72 martin bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]); 374 1.37 ragge } 375 1.37 ragge fail_4: 376 1.37 ragge for (i = 0; i < TXDESCS; i++) { 377 1.72 martin if (sc->sc_xmtmap[i] != NULL) 378 1.72 martin bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]); 379 1.37 ragge } 380 1.1 ragge } 381 1.1 ragge 382 1.1 ragge /* 383 1.1 ragge * Initialization of interface. 384 1.1 ragge */ 385 1.82 mrg int 386 1.82 mrg qeinit(struct ifnet *ifp) 387 1.1 ragge { 388 1.82 mrg struct qe_softc *sc = ifp->if_softc; 389 1.37 ragge struct qe_cdata *qc = sc->sc_qedata; 390 1.4 ragge int i; 391 1.1 ragge 392 1.1 ragge 393 1.37 ragge /* 394 1.37 ragge * Reset the interface. 395 1.37 ragge */ 396 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RESET); 397 1.37 ragge DELAY(1000); 398 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET); 399 1.37 ragge QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec); 400 1.37 ragge 401 1.37 ragge sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0; 402 1.37 ragge /* 403 1.37 ragge * Release and init transmit descriptors. 404 1.37 ragge */ 405 1.37 ragge for (i = 0; i < TXDESCS; i++) { 406 1.37 ragge if (sc->sc_txmbuf[i]) { 407 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]); 408 1.37 ragge m_freem(sc->sc_txmbuf[i]); 409 1.37 ragge sc->sc_txmbuf[i] = 0; 410 1.1 ragge } 411 1.37 ragge qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */ 412 1.37 ragge qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET; 413 1.1 ragge } 414 1.37 ragge 415 1.37 ragge /* 416 1.37 ragge * Init receive descriptors. 417 1.37 ragge */ 418 1.37 ragge for (i = 0; i < RXDESCS; i++) 419 1.37 ragge qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET; 420 1.37 ragge sc->sc_nextrx = 0; 421 1.37 ragge 422 1.37 ragge /* 423 1.37 ragge * Write the descriptor addresses to the device. 424 1.37 ragge * Receiving packets will be enabled in the interrupt routine. 425 1.37 ragge */ 426 1.80 msaitoh QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT); 427 1.37 ragge QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv)); 428 1.37 ragge QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv)); 429 1.37 ragge 430 1.37 ragge ifp->if_flags |= IFF_RUNNING; 431 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE; 432 1.37 ragge 433 1.1 ragge /* 434 1.37 ragge * Send a setup frame. 435 1.37 ragge * This will start the transmit machinery as well. 436 1.1 ragge */ 437 1.37 ragge qe_setup(sc); 438 1.37 ragge 439 1.82 mrg return 0; 440 1.1 ragge } 441 1.1 ragge 442 1.1 ragge /* 443 1.1 ragge * Start output on interface. 444 1.1 ragge */ 445 1.2 mycroft void 446 1.46 ragge qestart(struct ifnet *ifp) 447 1.1 ragge { 448 1.37 ragge struct qe_softc *sc = ifp->if_softc; 449 1.37 ragge struct qe_cdata *qc = sc->sc_qedata; 450 1.37 ragge paddr_t buffer; 451 1.37 ragge struct mbuf *m, *m0; 452 1.72 martin int idx, len, s, i, totlen, buflen; 453 1.46 ragge short orword, csr; 454 1.37 ragge 455 1.37 ragge if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0) 456 1.37 ragge return; 457 1.1 ragge 458 1.47 thorpej s = splnet(); 459 1.37 ragge while (sc->sc_inq < (TXDESCS - 1)) { 460 1.1 ragge 461 1.37 ragge if (sc->sc_setup) { 462 1.37 ragge qe_setup(sc); 463 1.37 ragge continue; 464 1.37 ragge } 465 1.37 ragge idx = sc->sc_nexttx; 466 1.45 thorpej IFQ_POLL(&ifp->if_snd, m); 467 1.37 ragge if (m == 0) 468 1.37 ragge goto out; 469 1.37 ragge /* 470 1.37 ragge * Count number of mbufs in chain. 471 1.37 ragge * Always do DMA directly from mbufs, therefore the transmit 472 1.37 ragge * ring is really big. 473 1.37 ragge */ 474 1.37 ragge for (m0 = m, i = 0; m0; m0 = m0->m_next) 475 1.38 ragge if (m0->m_len) 476 1.38 ragge i++; 477 1.57 bouyer if (m->m_pkthdr.len < ETHER_PAD_LEN) { 478 1.57 bouyer buflen = ETHER_PAD_LEN; 479 1.57 bouyer i++; 480 1.57 bouyer } else 481 1.57 bouyer buflen = m->m_pkthdr.len; 482 1.37 ragge if (i >= TXDESCS) 483 1.37 ragge panic("qestart"); 484 1.37 ragge 485 1.37 ragge if ((i + sc->sc_inq) >= (TXDESCS - 1)) { 486 1.38 ragge ifp->if_flags |= IFF_OACTIVE; 487 1.37 ragge goto out; 488 1.37 ragge } 489 1.45 thorpej 490 1.45 thorpej IFQ_DEQUEUE(&ifp->if_snd, m); 491 1.45 thorpej 492 1.79 msaitoh bpf_mtap(ifp, m, BPF_D_OUT); 493 1.1 ragge /* 494 1.37 ragge * m now points to a mbuf chain that can be loaded. 495 1.37 ragge * Loop around and set it. 496 1.1 ragge */ 497 1.38 ragge totlen = 0; 498 1.57 bouyer for (m0 = m; ; m0 = m0->m_next) { 499 1.57 bouyer if (m0) { 500 1.57 bouyer if (m0->m_len == 0) 501 1.57 bouyer continue; 502 1.72 martin bus_dmamap_load(sc->sc_dmat, 503 1.57 bouyer sc->sc_xmtmap[idx], mtod(m0, void *), 504 1.57 bouyer m0->m_len, 0, 0); 505 1.57 bouyer buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr; 506 1.57 bouyer len = m0->m_len; 507 1.57 bouyer } else if (totlen < ETHER_PAD_LEN) { 508 1.57 bouyer buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr; 509 1.57 bouyer len = ETHER_PAD_LEN - totlen; 510 1.57 bouyer } else { 511 1.57 bouyer break; 512 1.57 bouyer } 513 1.37 ragge 514 1.38 ragge totlen += len; 515 1.37 ragge /* Word alignment calc */ 516 1.37 ragge orword = 0; 517 1.57 bouyer if (totlen == buflen) { 518 1.37 ragge orword |= QE_EOMSG; 519 1.38 ragge sc->sc_txmbuf[idx] = m; 520 1.37 ragge } 521 1.37 ragge if ((buffer & 1) || (len & 1)) 522 1.37 ragge len += 2; 523 1.37 ragge if (buffer & 1) 524 1.37 ragge orword |= QE_ODDBEGIN; 525 1.37 ragge if ((buffer + len) & 1) 526 1.37 ragge orword |= QE_ODDEND; 527 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -(len/2); 528 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer); 529 1.37 ragge qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer); 530 1.37 ragge qc->qc_xmit[idx].qe_flag = 531 1.37 ragge qc->qc_xmit[idx].qe_status1 = QE_NOTYET; 532 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword); 533 1.37 ragge if (++idx == TXDESCS) 534 1.37 ragge idx = 0; 535 1.37 ragge sc->sc_inq++; 536 1.57 bouyer if (m0 == NULL) 537 1.57 bouyer break; 538 1.37 ragge } 539 1.38 ragge #ifdef DIAGNOSTIC 540 1.57 bouyer if (totlen != buflen) 541 1.38 ragge panic("qestart: len fault"); 542 1.38 ragge #endif 543 1.37 ragge 544 1.37 ragge /* 545 1.37 ragge * Kick off the transmit logic, if it is stopped. 546 1.37 ragge */ 547 1.46 ragge csr = QE_RCSR(QE_CSR_CSR); 548 1.46 ragge if (csr & QE_XL_INVALID) { 549 1.37 ragge QE_WCSR(QE_CSR_XMTL, 550 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx])); 551 1.37 ragge QE_WCSR(QE_CSR_XMTH, 552 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx])); 553 1.37 ragge } 554 1.37 ragge sc->sc_nexttx = idx; 555 1.37 ragge } 556 1.37 ragge if (sc->sc_inq == (TXDESCS - 1)) 557 1.37 ragge ifp->if_flags |= IFF_OACTIVE; 558 1.38 ragge 559 1.38 ragge out: if (sc->sc_inq) 560 1.38 ragge ifp->if_timer = 5; /* If transmit logic dies */ 561 1.38 ragge splx(s); 562 1.1 ragge } 563 1.1 ragge 564 1.39 matt static void 565 1.46 ragge qeintr(void *arg) 566 1.1 ragge { 567 1.39 matt struct qe_softc *sc = arg; 568 1.37 ragge struct qe_cdata *qc = sc->sc_qedata; 569 1.37 ragge struct ifnet *ifp = &sc->sc_if; 570 1.37 ragge struct mbuf *m; 571 1.37 ragge int csr, status1, status2, len; 572 1.1 ragge 573 1.37 ragge csr = QE_RCSR(QE_CSR_CSR); 574 1.1 ragge 575 1.37 ragge QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | 576 1.37 ragge QE_RCV_INT | QE_ILOOP); 577 1.1 ragge 578 1.37 ragge if (csr & QE_RCV_INT) 579 1.37 ragge while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) { 580 1.37 ragge status1 = qc->qc_recv[sc->sc_nextrx].qe_status1; 581 1.37 ragge status2 = qc->qc_recv[sc->sc_nextrx].qe_status2; 582 1.46 ragge 583 1.37 ragge m = sc->sc_rxmbuf[sc->sc_nextrx]; 584 1.37 ragge len = ((status1 & QE_RBL_HI) | 585 1.37 ragge (status2 & QE_RBL_LO)) + 60; 586 1.37 ragge qe_add_rxbuf(sc, sc->sc_nextrx); 587 1.75 ozaki m_set_rcvif(m, ifp); 588 1.37 ragge m->m_pkthdr.len = m->m_len = len; 589 1.37 ragge if (++sc->sc_nextrx == RXDESCS) 590 1.37 ragge sc->sc_nextrx = 0; 591 1.46 ragge if ((status1 & QE_ESETUP) == 0) 592 1.74 ozaki if_percpuq_enqueue(ifp->if_percpuq, m); 593 1.46 ragge else 594 1.46 ragge m_freem(m); 595 1.1 ragge } 596 1.37 ragge 597 1.80 msaitoh if (csr & (QE_XMIT_INT | QE_XL_INVALID)) { 598 1.37 ragge while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) { 599 1.37 ragge int idx = sc->sc_lastack; 600 1.37 ragge 601 1.37 ragge sc->sc_inq--; 602 1.37 ragge if (++sc->sc_lastack == TXDESCS) 603 1.37 ragge sc->sc_lastack = 0; 604 1.37 ragge 605 1.37 ragge /* XXX collect statistics */ 606 1.37 ragge qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID; 607 1.37 ragge qc->qc_xmit[idx].qe_status1 = 608 1.37 ragge qc->qc_xmit[idx].qe_flag = QE_NOTYET; 609 1.37 ragge 610 1.37 ragge if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP) 611 1.37 ragge continue; 612 1.57 bouyer if (sc->sc_txmbuf[idx] == NULL || 613 1.57 bouyer sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN) 614 1.57 bouyer bus_dmamap_unload(sc->sc_dmat, 615 1.57 bouyer sc->sc_xmtmap[idx]); 616 1.83 rin m_freem(sc->sc_txmbuf[idx]); 617 1.83 rin sc->sc_txmbuf[idx] = NULL; 618 1.37 ragge } 619 1.38 ragge ifp->if_timer = 0; 620 1.37 ragge ifp->if_flags &= ~IFF_OACTIVE; 621 1.37 ragge qestart(ifp); /* Put in more in queue */ 622 1.1 ragge } 623 1.37 ragge /* 624 1.37 ragge * How can the receive list get invalid??? 625 1.37 ragge * Verified that it happens anyway. 626 1.1 ragge */ 627 1.37 ragge if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) && 628 1.37 ragge (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) { 629 1.37 ragge QE_WCSR(QE_CSR_RCLL, 630 1.37 ragge LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx])); 631 1.37 ragge QE_WCSR(QE_CSR_RCLH, 632 1.37 ragge HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx])); 633 1.1 ragge } 634 1.1 ragge } 635 1.1 ragge 636 1.1 ragge /* 637 1.1 ragge * Process an ioctl request. 638 1.1 ragge */ 639 1.7 ragge int 640 1.62 christos qeioctl(struct ifnet *ifp, u_long cmd, void *data) 641 1.1 ragge { 642 1.14 thorpej struct qe_softc *sc = ifp->if_softc; 643 1.1 ragge struct ifaddr *ifa = (struct ifaddr *)data; 644 1.8 mycroft int s = splnet(), error = 0; 645 1.1 ragge 646 1.1 ragge switch (cmd) { 647 1.1 ragge 648 1.68 dyoung case SIOCINITIFADDR: 649 1.1 ragge ifp->if_flags |= IFF_UP; 650 1.80 msaitoh switch (ifa->ifa_addr->sa_family) { 651 1.1 ragge #ifdef INET 652 1.1 ragge case AF_INET: 653 1.82 mrg qeinit(ifp); 654 1.20 is arp_ifinit(ifp, ifa); 655 1.1 ragge break; 656 1.1 ragge #endif 657 1.1 ragge } 658 1.1 ragge break; 659 1.1 ragge 660 1.1 ragge case SIOCSIFFLAGS: 661 1.68 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0) 662 1.68 dyoung break; 663 1.68 dyoung /* XXX re-use ether_ioctl() */ 664 1.80 msaitoh switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) { 665 1.68 dyoung case IFF_RUNNING: 666 1.37 ragge /* 667 1.37 ragge * If interface is marked down and it is running, 668 1.37 ragge * stop it. (by disabling receive mechanism). 669 1.37 ragge */ 670 1.37 ragge QE_WCSR(QE_CSR_CSR, 671 1.37 ragge QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE); 672 1.37 ragge ifp->if_flags &= ~IFF_RUNNING; 673 1.68 dyoung break; 674 1.68 dyoung case IFF_UP: 675 1.37 ragge /* 676 1.37 ragge * If interface it marked up and it is stopped, then 677 1.37 ragge * start it. 678 1.37 ragge */ 679 1.82 mrg qeinit(ifp); 680 1.68 dyoung break; 681 1.80 msaitoh case IFF_UP | IFF_RUNNING: 682 1.37 ragge /* 683 1.37 ragge * Send a new setup packet to match any new changes. 684 1.37 ragge * (Like IFF_PROMISC etc) 685 1.37 ragge */ 686 1.37 ragge qe_setup(sc); 687 1.68 dyoung break; 688 1.68 dyoung case 0: 689 1.68 dyoung break; 690 1.37 ragge } 691 1.1 ragge break; 692 1.1 ragge 693 1.22 ragge case SIOCADDMULTI: 694 1.22 ragge case SIOCDELMULTI: 695 1.22 ragge /* 696 1.22 ragge * Update our multicast list. 697 1.22 ragge */ 698 1.63 dyoung if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 699 1.22 ragge /* 700 1.22 ragge * Multicast list has changed; set the hardware filter 701 1.22 ragge * accordingly. 702 1.22 ragge */ 703 1.58 thorpej if (ifp->if_flags & IFF_RUNNING) 704 1.58 thorpej qe_setup(sc); 705 1.22 ragge error = 0; 706 1.22 ragge } 707 1.22 ragge break; 708 1.22 ragge 709 1.1 ragge default: 710 1.68 dyoung error = ether_ioctl(ifp, cmd, data); 711 1.1 ragge } 712 1.1 ragge splx(s); 713 1.80 msaitoh return error; 714 1.1 ragge } 715 1.1 ragge 716 1.1 ragge /* 717 1.37 ragge * Add a receive buffer to the indicated descriptor. 718 1.1 ragge */ 719 1.37 ragge int 720 1.59 simonb qe_add_rxbuf(struct qe_softc *sc, int i) 721 1.1 ragge { 722 1.37 ragge struct mbuf *m; 723 1.37 ragge struct qe_ring *rp; 724 1.37 ragge vaddr_t addr; 725 1.37 ragge int error; 726 1.37 ragge 727 1.37 ragge MGETHDR(m, M_DONTWAIT, MT_DATA); 728 1.37 ragge if (m == NULL) 729 1.80 msaitoh return ENOBUFS; 730 1.37 ragge 731 1.37 ragge MCLGET(m, M_DONTWAIT); 732 1.37 ragge if ((m->m_flags & M_EXT) == 0) { 733 1.37 ragge m_freem(m); 734 1.80 msaitoh return ENOBUFS; 735 1.37 ragge } 736 1.37 ragge 737 1.37 ragge if (sc->sc_rxmbuf[i] != NULL) 738 1.37 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]); 739 1.1 ragge 740 1.37 ragge error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i], 741 1.37 ragge m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 742 1.37 ragge if (error) 743 1.52 provos panic("%s: can't load rx DMA map %d, error = %d", 744 1.67 matt device_xname(sc->sc_dev), i, error); 745 1.37 ragge sc->sc_rxmbuf[i] = m; 746 1.1 ragge 747 1.37 ragge bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0, 748 1.37 ragge sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 749 1.1 ragge 750 1.1 ragge /* 751 1.37 ragge * We know that the mbuf cluster is page aligned. Also, be sure 752 1.37 ragge * that the IP header will be longword aligned. 753 1.1 ragge */ 754 1.37 ragge m->m_data += 2; 755 1.37 ragge addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2; 756 1.37 ragge rp = &sc->sc_qedata->qc_recv[i]; 757 1.37 ragge rp->qe_flag = rp->qe_status1 = QE_NOTYET; 758 1.37 ragge rp->qe_addr_lo = LOWORD(addr); 759 1.37 ragge rp->qe_addr_hi = HIWORD(addr) | QE_VALID; 760 1.80 msaitoh rp->qe_buf_len = -(m->m_ext.ext_size - 2) / 2; 761 1.1 ragge 762 1.80 msaitoh return 0; 763 1.1 ragge } 764 1.37 ragge 765 1.1 ragge /* 766 1.37 ragge * Create a setup packet and put in queue for sending. 767 1.1 ragge */ 768 1.7 ragge void 769 1.46 ragge qe_setup(struct qe_softc *sc) 770 1.1 ragge { 771 1.81 msaitoh struct ethercom *ec = &sc->sc_ec; 772 1.37 ragge struct ether_multi *enm; 773 1.37 ragge struct ether_multistep step; 774 1.37 ragge struct qe_cdata *qc = sc->sc_qedata; 775 1.37 ragge struct ifnet *ifp = &sc->sc_if; 776 1.80 msaitoh uint8_t enaddr[ETHER_ADDR_LEN]; 777 1.37 ragge int i, j, k, idx, s; 778 1.37 ragge 779 1.47 thorpej s = splnet(); 780 1.37 ragge if (sc->sc_inq == (TXDESCS - 1)) { 781 1.37 ragge sc->sc_setup = 1; 782 1.37 ragge splx(s); 783 1.37 ragge return; 784 1.37 ragge } 785 1.37 ragge sc->sc_setup = 0; 786 1.1 ragge /* 787 1.37 ragge * Init the setup packet with valid info. 788 1.1 ragge */ 789 1.37 ragge memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */ 790 1.66 tsutsui memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr)); 791 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++) 792 1.37 ragge qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */ 793 1.37 ragge 794 1.1 ragge /* 795 1.59 simonb * Multicast handling. The DEQNA can handle up to 12 direct 796 1.37 ragge * ethernet addresses. 797 1.1 ragge */ 798 1.37 ragge j = 3; k = 0; 799 1.37 ragge ifp->if_flags &= ~IFF_ALLMULTI; 800 1.81 msaitoh ETHER_LOCK(ec); 801 1.81 msaitoh ETHER_FIRST_MULTI(step, ec, enm); 802 1.37 ragge while (enm != NULL) { 803 1.50 wiz if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) { 804 1.37 ragge ifp->if_flags |= IFF_ALLMULTI; 805 1.37 ragge break; 806 1.37 ragge } 807 1.37 ragge for (i = 0; i < ETHER_ADDR_LEN; i++) 808 1.37 ragge qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i]; 809 1.37 ragge j++; 810 1.37 ragge if (j == 8) { 811 1.37 ragge j = 1; k += 64; 812 1.37 ragge } 813 1.37 ragge if (k > 64) { 814 1.37 ragge ifp->if_flags |= IFF_ALLMULTI; 815 1.37 ragge break; 816 1.22 ragge } 817 1.37 ragge ETHER_NEXT_MULTI(step, enm); 818 1.22 ragge } 819 1.81 msaitoh ETHER_UNLOCK(ec); 820 1.37 ragge idx = sc->sc_nexttx; 821 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -64; 822 1.1 ragge 823 1.1 ragge /* 824 1.37 ragge * How is the DEQNA turned in ALLMULTI mode??? 825 1.37 ragge * Until someone tells me, fall back to PROMISC when more than 826 1.37 ragge * 12 ethernet addresses. 827 1.1 ragge */ 828 1.43 thorpej if (ifp->if_flags & IFF_ALLMULTI) 829 1.43 thorpej ifp->if_flags |= IFF_PROMISC; 830 1.43 thorpej else if (ifp->if_pcount == 0) 831 1.43 thorpej ifp->if_flags &= ~IFF_PROMISC; 832 1.43 thorpej if (ifp->if_flags & IFF_PROMISC) 833 1.37 ragge qc->qc_xmit[idx].qe_buf_len = -65; 834 1.1 ragge 835 1.37 ragge qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup); 836 1.37 ragge qc->qc_xmit[idx].qe_addr_hi = 837 1.37 ragge HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG; 838 1.37 ragge qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET; 839 1.37 ragge qc->qc_xmit[idx].qe_addr_hi |= QE_VALID; 840 1.1 ragge 841 1.37 ragge if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) { 842 1.37 ragge QE_WCSR(QE_CSR_XMTL, 843 1.37 ragge LOWORD(&sc->sc_pqedata->qc_xmit[idx])); 844 1.37 ragge QE_WCSR(QE_CSR_XMTH, 845 1.37 ragge HIWORD(&sc->sc_pqedata->qc_xmit[idx])); 846 1.22 ragge } 847 1.1 ragge 848 1.37 ragge sc->sc_inq++; 849 1.37 ragge if (++sc->sc_nexttx == TXDESCS) 850 1.37 ragge sc->sc_nexttx = 0; 851 1.37 ragge splx(s); 852 1.38 ragge } 853 1.38 ragge 854 1.38 ragge /* 855 1.38 ragge * Check for dead transmit logic. Not uncommon. 856 1.38 ragge */ 857 1.38 ragge void 858 1.46 ragge qetimeout(struct ifnet *ifp) 859 1.38 ragge { 860 1.38 ragge struct qe_softc *sc = ifp->if_softc; 861 1.38 ragge 862 1.38 ragge if (sc->sc_inq == 0) 863 1.38 ragge return; 864 1.38 ragge 865 1.67 matt aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n"); 866 1.38 ragge /* 867 1.38 ragge * Do a reset of interface, to get it going again. 868 1.38 ragge * Will it work by just restart the transmit logic? 869 1.38 ragge */ 870 1.82 mrg qeinit(ifp); 871 1.1 ragge } 872