sgec.c revision 1.36 1 1.36 dyoung /* $NetBSD: sgec.c,v 1.36 2008/11/07 00:20:03 dyoung Exp $ */
2 1.1 ragge /*
3 1.1 ragge * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 1.1 ragge *
5 1.1 ragge * Redistribution and use in source and binary forms, with or without
6 1.1 ragge * modification, are permitted provided that the following conditions
7 1.1 ragge * are met:
8 1.1 ragge * 1. Redistributions of source code must retain the above copyright
9 1.1 ragge * notice, this list of conditions and the following disclaimer.
10 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 ragge * notice, this list of conditions and the following disclaimer in the
12 1.1 ragge * documentation and/or other materials provided with the distribution.
13 1.1 ragge * 3. All advertising materials mentioning features or use of this software
14 1.1 ragge * must display the following acknowledgement:
15 1.26 perry * This product includes software developed at Ludd, University of
16 1.1 ragge * Lule}, Sweden and its contributors.
17 1.1 ragge * 4. The name of the author may not be used to endorse or promote products
18 1.1 ragge * derived from this software without specific prior written permission
19 1.1 ragge *
20 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.1 ragge * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.1 ragge * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.1 ragge * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.1 ragge * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.1 ragge * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.1 ragge * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.1 ragge * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.1 ragge * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.1 ragge * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.1 ragge */
31 1.1 ragge
32 1.1 ragge /*
33 1.1 ragge * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34 1.26 perry * on for example the VAX 4000/300 (KA670).
35 1.1 ragge *
36 1.1 ragge * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37 1.1 ragge *
38 1.1 ragge * Even though the chip is capable to use virtual addresses (read the
39 1.1 ragge * System Page Table directly) this driver doesn't do so, and there
40 1.1 ragge * is no benefit in doing it either in NetBSD of today.
41 1.1 ragge *
42 1.1 ragge * Things that is still to do:
43 1.1 ragge * Collect statistics.
44 1.1 ragge * Use imperfect filtering when many multicast addresses.
45 1.1 ragge */
46 1.18 lukem
47 1.18 lukem #include <sys/cdefs.h>
48 1.36 dyoung __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.36 2008/11/07 00:20:03 dyoung Exp $");
49 1.1 ragge
50 1.1 ragge #include "opt_inet.h"
51 1.1 ragge #include "bpfilter.h"
52 1.1 ragge
53 1.1 ragge #include <sys/param.h>
54 1.1 ragge #include <sys/mbuf.h>
55 1.1 ragge #include <sys/socket.h>
56 1.1 ragge #include <sys/device.h>
57 1.1 ragge #include <sys/systm.h>
58 1.1 ragge #include <sys/sockio.h>
59 1.1 ragge
60 1.9 thorpej #include <uvm/uvm_extern.h>
61 1.9 thorpej
62 1.1 ragge #include <net/if.h>
63 1.1 ragge #include <net/if_ether.h>
64 1.1 ragge #include <net/if_dl.h>
65 1.1 ragge
66 1.1 ragge #include <netinet/in.h>
67 1.1 ragge #include <netinet/if_inarp.h>
68 1.1 ragge
69 1.1 ragge #if NBPFILTER > 0
70 1.1 ragge #include <net/bpf.h>
71 1.1 ragge #include <net/bpfdesc.h>
72 1.1 ragge #endif
73 1.1 ragge
74 1.34 ad #include <sys/bus.h>
75 1.1 ragge
76 1.1 ragge #include <dev/ic/sgecreg.h>
77 1.1 ragge #include <dev/ic/sgecvar.h>
78 1.1 ragge
79 1.25 perry static void zeinit(struct ze_softc *);
80 1.25 perry static void zestart(struct ifnet *);
81 1.28 christos static int zeioctl(struct ifnet *, u_long, void *);
82 1.25 perry static int ze_add_rxbuf(struct ze_softc *, int);
83 1.25 perry static void ze_setup(struct ze_softc *);
84 1.25 perry static void zetimeout(struct ifnet *);
85 1.35 matt static bool zereset(struct ze_softc *);
86 1.1 ragge
87 1.1 ragge #define ZE_WCSR(csr, val) \
88 1.1 ragge bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
89 1.1 ragge #define ZE_RCSR(csr) \
90 1.1 ragge bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
91 1.1 ragge
92 1.1 ragge /*
93 1.1 ragge * Interface exists: make available by filling in network interface
94 1.1 ragge * record. System will initialize the interface when it is ready
95 1.1 ragge * to accept packets.
96 1.1 ragge */
97 1.1 ragge void
98 1.35 matt sgec_attach(struct ze_softc *sc)
99 1.1 ragge {
100 1.35 matt struct ifnet *ifp = &sc->sc_if;
101 1.35 matt struct ze_tdes *tp;
102 1.35 matt struct ze_rdes *rp;
103 1.1 ragge bus_dma_segment_t seg;
104 1.1 ragge int i, rseg, error;
105 1.1 ragge
106 1.1 ragge /*
107 1.1 ragge * Allocate DMA safe memory for descriptors and setup memory.
108 1.1 ragge */
109 1.35 matt error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
110 1.35 matt PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
111 1.35 matt if (error) {
112 1.35 matt aprint_error(": unable to allocate control data, error = %d\n",
113 1.1 ragge error);
114 1.1 ragge goto fail_0;
115 1.1 ragge }
116 1.1 ragge
117 1.35 matt error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
118 1.35 matt (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
119 1.35 matt if (error) {
120 1.35 matt aprint_error(
121 1.35 matt ": unable to map control data, error = %d\n", error);
122 1.1 ragge goto fail_1;
123 1.1 ragge }
124 1.1 ragge
125 1.35 matt error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
126 1.35 matt sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
127 1.35 matt if (error) {
128 1.35 matt aprint_error(
129 1.35 matt ": unable to create control data DMA map, error = %d\n",
130 1.1 ragge error);
131 1.1 ragge goto fail_2;
132 1.1 ragge }
133 1.1 ragge
134 1.35 matt error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
135 1.35 matt sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
136 1.35 matt if (error) {
137 1.35 matt aprint_error(
138 1.35 matt ": unable to load control data DMA map, error = %d\n",
139 1.1 ragge error);
140 1.1 ragge goto fail_3;
141 1.1 ragge }
142 1.1 ragge
143 1.1 ragge /*
144 1.1 ragge * Zero the newly allocated memory.
145 1.1 ragge */
146 1.16 thorpej memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
147 1.35 matt
148 1.1 ragge /*
149 1.1 ragge * Create the transmit descriptor DMA maps.
150 1.1 ragge */
151 1.35 matt for (i = 0; error == 0 && i < TXDESCS; i++) {
152 1.35 matt error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
153 1.29 matt TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
154 1.35 matt &sc->sc_xmtmap[i]);
155 1.35 matt }
156 1.35 matt if (error) {
157 1.35 matt aprint_error(": unable to create tx DMA map %d, error = %d\n",
158 1.35 matt i, error);
159 1.35 matt goto fail_4;
160 1.1 ragge }
161 1.1 ragge
162 1.1 ragge /*
163 1.1 ragge * Create receive buffer DMA maps.
164 1.1 ragge */
165 1.35 matt for (i = 0; error == 0 && i < RXDESCS; i++) {
166 1.35 matt error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
167 1.35 matt MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
168 1.35 matt }
169 1.35 matt if (error) {
170 1.35 matt aprint_error(": unable to create rx DMA map %d, error = %d\n",
171 1.35 matt i, error);
172 1.35 matt goto fail_5;
173 1.1 ragge }
174 1.35 matt
175 1.1 ragge /*
176 1.1 ragge * Pre-allocate the receive buffers.
177 1.1 ragge */
178 1.35 matt for (i = 0; error == 0 && i < RXDESCS; i++) {
179 1.35 matt error = ze_add_rxbuf(sc, i);
180 1.35 matt }
181 1.35 matt
182 1.35 matt if (error) {
183 1.35 matt aprint_error(
184 1.35 matt ": unable to allocate or map rx buffer %d, error = %d\n",
185 1.35 matt i, error);
186 1.35 matt goto fail_6;
187 1.1 ragge }
188 1.5 matt
189 1.5 matt /* For vmstat -i
190 1.5 matt */
191 1.6 matt evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
192 1.35 matt device_xname(sc->sc_dev), "intr");
193 1.30 matt evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
194 1.35 matt &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
195 1.30 matt evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
196 1.35 matt &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
197 1.30 matt evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
198 1.35 matt &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
199 1.30 matt evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
200 1.35 matt &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
201 1.30 matt evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
202 1.35 matt &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");
203 1.1 ragge
204 1.1 ragge /*
205 1.1 ragge * Create ring loops of the buffer chains.
206 1.1 ragge * This is only done once.
207 1.1 ragge */
208 1.1 ragge sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
209 1.1 ragge
210 1.1 ragge rp = sc->sc_zedata->zc_recv;
211 1.1 ragge rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
212 1.1 ragge rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
213 1.1 ragge rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
214 1.1 ragge
215 1.1 ragge tp = sc->sc_zedata->zc_xmit;
216 1.1 ragge tp[TXDESCS].ze_tdr = ZE_TDR_OW;
217 1.1 ragge tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
218 1.1 ragge tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
219 1.1 ragge
220 1.1 ragge if (zereset(sc))
221 1.1 ragge return;
222 1.1 ragge
223 1.35 matt strcpy(ifp->if_xname, device_xname(sc->sc_dev));
224 1.1 ragge ifp->if_softc = sc;
225 1.1 ragge ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226 1.1 ragge ifp->if_start = zestart;
227 1.1 ragge ifp->if_ioctl = zeioctl;
228 1.1 ragge ifp->if_watchdog = zetimeout;
229 1.11 thorpej IFQ_SET_READY(&ifp->if_snd);
230 1.1 ragge
231 1.1 ragge /*
232 1.1 ragge * Attach the interface.
233 1.1 ragge */
234 1.1 ragge if_attach(ifp);
235 1.1 ragge ether_ifattach(ifp, sc->sc_enaddr);
236 1.1 ragge
237 1.35 matt aprint_normal("\n");
238 1.35 matt aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
239 1.1 ragge ether_sprintf(sc->sc_enaddr));
240 1.1 ragge return;
241 1.1 ragge
242 1.1 ragge /*
243 1.1 ragge * Free any resources we've allocated during the failed attach
244 1.1 ragge * attempt. Do this in reverse order and fall through.
245 1.1 ragge */
246 1.1 ragge fail_6:
247 1.1 ragge for (i = 0; i < RXDESCS; i++) {
248 1.1 ragge if (sc->sc_rxmbuf[i] != NULL) {
249 1.1 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
250 1.1 ragge m_freem(sc->sc_rxmbuf[i]);
251 1.1 ragge }
252 1.1 ragge }
253 1.1 ragge fail_5:
254 1.1 ragge for (i = 0; i < RXDESCS; i++) {
255 1.1 ragge if (sc->sc_xmtmap[i] != NULL)
256 1.1 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
257 1.1 ragge }
258 1.1 ragge fail_4:
259 1.1 ragge for (i = 0; i < TXDESCS; i++) {
260 1.1 ragge if (sc->sc_rcvmap[i] != NULL)
261 1.1 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
262 1.1 ragge }
263 1.1 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
264 1.1 ragge fail_3:
265 1.1 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
266 1.1 ragge fail_2:
267 1.28 christos bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
268 1.1 ragge sizeof(struct ze_cdata));
269 1.1 ragge fail_1:
270 1.1 ragge bus_dmamem_free(sc->sc_dmat, &seg, rseg);
271 1.1 ragge fail_0:
272 1.1 ragge return;
273 1.1 ragge }
274 1.1 ragge
275 1.1 ragge /*
276 1.1 ragge * Initialization of interface.
277 1.1 ragge */
278 1.1 ragge void
279 1.35 matt zeinit(struct ze_softc *sc)
280 1.1 ragge {
281 1.35 matt struct ifnet *ifp = &sc->sc_if;
282 1.1 ragge struct ze_cdata *zc = sc->sc_zedata;
283 1.1 ragge int i;
284 1.1 ragge
285 1.1 ragge /*
286 1.1 ragge * Reset the interface.
287 1.1 ragge */
288 1.1 ragge if (zereset(sc))
289 1.1 ragge return;
290 1.1 ragge
291 1.30 matt sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
292 1.1 ragge /*
293 1.1 ragge * Release and init transmit descriptors.
294 1.1 ragge */
295 1.1 ragge for (i = 0; i < TXDESCS; i++) {
296 1.29 matt if (sc->sc_xmtmap[i]->dm_nsegs > 0)
297 1.29 matt bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
298 1.1 ragge if (sc->sc_txmbuf[i]) {
299 1.1 ragge m_freem(sc->sc_txmbuf[i]);
300 1.1 ragge sc->sc_txmbuf[i] = 0;
301 1.1 ragge }
302 1.1 ragge zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
303 1.1 ragge }
304 1.1 ragge
305 1.1 ragge
306 1.1 ragge /*
307 1.1 ragge * Init receive descriptors.
308 1.1 ragge */
309 1.1 ragge for (i = 0; i < RXDESCS; i++)
310 1.1 ragge zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
311 1.1 ragge sc->sc_nextrx = 0;
312 1.1 ragge
313 1.1 ragge ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
314 1.1 ragge ZE_NICSR6_SR|ZE_NICSR6_DC);
315 1.1 ragge
316 1.1 ragge ifp->if_flags |= IFF_RUNNING;
317 1.1 ragge ifp->if_flags &= ~IFF_OACTIVE;
318 1.1 ragge
319 1.1 ragge /*
320 1.1 ragge * Send a setup frame.
321 1.1 ragge * This will start the transmit machinery as well.
322 1.1 ragge */
323 1.1 ragge ze_setup(sc);
324 1.1 ragge
325 1.1 ragge }
326 1.1 ragge
327 1.1 ragge /*
328 1.1 ragge * Start output on interface.
329 1.1 ragge */
330 1.1 ragge void
331 1.35 matt zestart(struct ifnet *ifp)
332 1.1 ragge {
333 1.1 ragge struct ze_softc *sc = ifp->if_softc;
334 1.1 ragge struct ze_cdata *zc = sc->sc_zedata;
335 1.1 ragge paddr_t buffer;
336 1.29 matt struct mbuf *m;
337 1.30 matt int nexttx, starttx;
338 1.30 matt int len, i, totlen, error;
339 1.4 matt int old_inq = sc->sc_inq;
340 1.30 matt uint16_t orword, tdr;
341 1.29 matt bus_dmamap_t map;
342 1.1 ragge
343 1.1 ragge while (sc->sc_inq < (TXDESCS - 1)) {
344 1.1 ragge
345 1.1 ragge if (sc->sc_setup) {
346 1.1 ragge ze_setup(sc);
347 1.1 ragge continue;
348 1.1 ragge }
349 1.29 matt nexttx = sc->sc_nexttx;
350 1.11 thorpej IFQ_POLL(&sc->sc_if.if_snd, m);
351 1.1 ragge if (m == 0)
352 1.1 ragge goto out;
353 1.1 ragge /*
354 1.1 ragge * Count number of mbufs in chain.
355 1.1 ragge * Always do DMA directly from mbufs, therefore the transmit
356 1.1 ragge * ring is really big.
357 1.1 ragge */
358 1.29 matt map = sc->sc_xmtmap[nexttx];
359 1.29 matt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
360 1.29 matt BUS_DMA_WRITE);
361 1.29 matt if (error) {
362 1.35 matt aprint_error_dev(sc->sc_dev,
363 1.35 matt "zestart: load_mbuf failed: %d", error);
364 1.29 matt goto out;
365 1.29 matt }
366 1.29 matt
367 1.29 matt if (map->dm_nsegs >= TXDESCS)
368 1.1 ragge panic("zestart"); /* XXX */
369 1.1 ragge
370 1.29 matt if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
371 1.29 matt bus_dmamap_unload(sc->sc_dmat, map);
372 1.1 ragge ifp->if_flags |= IFF_OACTIVE;
373 1.1 ragge goto out;
374 1.1 ragge }
375 1.26 perry
376 1.1 ragge /*
377 1.1 ragge * m now points to a mbuf chain that can be loaded.
378 1.1 ragge * Loop around and set it.
379 1.1 ragge */
380 1.1 ragge totlen = 0;
381 1.29 matt orword = ZE_TDES1_FS;
382 1.30 matt starttx = nexttx;
383 1.29 matt for (i = 0; i < map->dm_nsegs; i++) {
384 1.29 matt buffer = map->dm_segs[i].ds_addr;
385 1.29 matt len = map->dm_segs[i].ds_len;
386 1.29 matt
387 1.30 matt KASSERT(len > 0);
388 1.1 ragge
389 1.1 ragge totlen += len;
390 1.1 ragge /* Word alignment calc */
391 1.1 ragge if (totlen == m->m_pkthdr.len) {
392 1.30 matt sc->sc_txcnt += map->dm_nsegs;
393 1.30 matt if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
394 1.30 matt orword |= ZE_TDES1_IC;
395 1.30 matt sc->sc_txcnt = 0;
396 1.30 matt }
397 1.30 matt orword |= ZE_TDES1_LS;
398 1.29 matt sc->sc_txmbuf[nexttx] = m;
399 1.1 ragge }
400 1.29 matt zc->zc_xmit[nexttx].ze_bufsize = len;
401 1.29 matt zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
402 1.29 matt zc->zc_xmit[nexttx].ze_tdes1 = orword;
403 1.30 matt zc->zc_xmit[nexttx].ze_tdr = tdr;
404 1.29 matt
405 1.29 matt if (++nexttx == TXDESCS)
406 1.29 matt nexttx = 0;
407 1.29 matt orword = 0;
408 1.30 matt tdr = ZE_TDR_OW;
409 1.1 ragge }
410 1.29 matt
411 1.29 matt sc->sc_inq += map->dm_nsegs;
412 1.29 matt
413 1.11 thorpej IFQ_DEQUEUE(&ifp->if_snd, m);
414 1.1 ragge #ifdef DIAGNOSTIC
415 1.1 ragge if (totlen != m->m_pkthdr.len)
416 1.1 ragge panic("zestart: len fault");
417 1.1 ragge #endif
418 1.30 matt /*
419 1.30 matt * Turn ownership of the packet over to the device.
420 1.30 matt */
421 1.30 matt zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
422 1.1 ragge
423 1.1 ragge /*
424 1.1 ragge * Kick off the transmit logic, if it is stopped.
425 1.1 ragge */
426 1.1 ragge if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
427 1.1 ragge ZE_WCSR(ZE_CSR1, -1);
428 1.29 matt sc->sc_nexttx = nexttx;
429 1.1 ragge }
430 1.1 ragge if (sc->sc_inq == (TXDESCS - 1))
431 1.1 ragge ifp->if_flags |= IFF_OACTIVE;
432 1.1 ragge
433 1.4 matt out: if (old_inq < sc->sc_inq)
434 1.1 ragge ifp->if_timer = 5; /* If transmit logic dies */
435 1.1 ragge }
436 1.1 ragge
437 1.1 ragge int
438 1.35 matt sgec_intr(struct ze_softc *sc)
439 1.1 ragge {
440 1.1 ragge struct ze_cdata *zc = sc->sc_zedata;
441 1.1 ragge struct ifnet *ifp = &sc->sc_if;
442 1.1 ragge struct mbuf *m;
443 1.1 ragge int csr, len;
444 1.1 ragge
445 1.1 ragge csr = ZE_RCSR(ZE_CSR5);
446 1.30 matt if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
447 1.30 matt sc->sc_nointrcnt.ev_count++;
448 1.1 ragge return 0;
449 1.30 matt }
450 1.1 ragge ZE_WCSR(ZE_CSR5, csr);
451 1.1 ragge
452 1.30 matt if (csr & ZE_NICSR5_RU)
453 1.30 matt sc->sc_nobufintrcnt.ev_count++;
454 1.30 matt
455 1.24 thorpej if (csr & ZE_NICSR5_RI) {
456 1.30 matt sc->sc_rxintrcnt.ev_count++;
457 1.1 ragge while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
458 1.1 ragge ZE_FRAMELEN_OW) == 0) {
459 1.1 ragge
460 1.3 matt ifp->if_ipackets++;
461 1.1 ragge m = sc->sc_rxmbuf[sc->sc_nextrx];
462 1.1 ragge len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
463 1.1 ragge ze_add_rxbuf(sc, sc->sc_nextrx);
464 1.1 ragge if (++sc->sc_nextrx == RXDESCS)
465 1.1 ragge sc->sc_nextrx = 0;
466 1.24 thorpej if (len < ETHER_MIN_LEN) {
467 1.24 thorpej ifp->if_ierrors++;
468 1.24 thorpej m_freem(m);
469 1.24 thorpej } else {
470 1.24 thorpej m->m_pkthdr.rcvif = ifp;
471 1.24 thorpej m->m_pkthdr.len = m->m_len =
472 1.24 thorpej len - ETHER_CRC_LEN;
473 1.1 ragge #if NBPFILTER > 0
474 1.24 thorpej if (ifp->if_bpf)
475 1.24 thorpej bpf_mtap(ifp->if_bpf, m);
476 1.1 ragge #endif
477 1.24 thorpej (*ifp->if_input)(ifp, m);
478 1.24 thorpej }
479 1.1 ragge }
480 1.24 thorpej }
481 1.1 ragge
482 1.30 matt if (csr & ZE_NICSR5_TI)
483 1.30 matt sc->sc_txintrcnt.ev_count++;
484 1.30 matt if (sc->sc_lastack != sc->sc_nexttx) {
485 1.30 matt int lastack;
486 1.30 matt for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
487 1.29 matt bus_dmamap_t map;
488 1.29 matt int nlastack;
489 1.1 ragge
490 1.30 matt if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
491 1.1 ragge break;
492 1.1 ragge
493 1.29 matt if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
494 1.29 matt ZE_TDES1_DT_SETUP) {
495 1.29 matt if (++lastack == TXDESCS)
496 1.29 matt lastack = 0;
497 1.29 matt sc->sc_inq--;
498 1.1 ragge continue;
499 1.1 ragge }
500 1.29 matt
501 1.29 matt KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
502 1.29 matt map = sc->sc_xmtmap[lastack];
503 1.29 matt KASSERT(map->dm_nsegs > 0);
504 1.29 matt nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
505 1.29 matt if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
506 1.29 matt break;
507 1.29 matt lastack = nlastack;
508 1.30 matt if (sc->sc_txcnt > map->dm_nsegs)
509 1.30 matt sc->sc_txcnt -= map->dm_nsegs;
510 1.30 matt else
511 1.30 matt sc->sc_txcnt = 0;
512 1.29 matt sc->sc_inq -= map->dm_nsegs;
513 1.29 matt KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
514 1.29 matt ifp->if_opackets++;
515 1.29 matt bus_dmamap_unload(sc->sc_dmat, map);
516 1.29 matt KASSERT(sc->sc_txmbuf[lastack]);
517 1.29 matt #if NBPFILTER > 0
518 1.29 matt if (ifp->if_bpf)
519 1.29 matt bpf_mtap(ifp->if_bpf, sc->sc_txmbuf[lastack]);
520 1.29 matt #endif
521 1.29 matt m_freem(sc->sc_txmbuf[lastack]);
522 1.29 matt sc->sc_txmbuf[lastack] = 0;
523 1.29 matt if (++lastack == TXDESCS)
524 1.29 matt lastack = 0;
525 1.1 ragge }
526 1.30 matt if (lastack != sc->sc_lastack) {
527 1.30 matt sc->sc_txdraincnt.ev_count++;
528 1.30 matt sc->sc_lastack = lastack;
529 1.30 matt if (sc->sc_inq == 0)
530 1.30 matt ifp->if_timer = 0;
531 1.30 matt ifp->if_flags &= ~IFF_OACTIVE;
532 1.30 matt zestart(ifp); /* Put in more in queue */
533 1.30 matt }
534 1.1 ragge }
535 1.1 ragge return 1;
536 1.1 ragge }
537 1.1 ragge
538 1.1 ragge /*
539 1.1 ragge * Process an ioctl request.
540 1.1 ragge */
541 1.1 ragge int
542 1.35 matt zeioctl(struct ifnet *ifp, u_long cmd, void *data)
543 1.1 ragge {
544 1.1 ragge struct ze_softc *sc = ifp->if_softc;
545 1.35 matt struct ifaddr *ifa = data;
546 1.1 ragge int s = splnet(), error = 0;
547 1.1 ragge
548 1.1 ragge switch (cmd) {
549 1.1 ragge
550 1.36 dyoung case SIOCINITIFADDR:
551 1.1 ragge ifp->if_flags |= IFF_UP;
552 1.1 ragge switch(ifa->ifa_addr->sa_family) {
553 1.1 ragge #ifdef INET
554 1.1 ragge case AF_INET:
555 1.1 ragge zeinit(sc);
556 1.1 ragge arp_ifinit(ifp, ifa);
557 1.1 ragge break;
558 1.1 ragge #endif
559 1.1 ragge }
560 1.1 ragge break;
561 1.1 ragge
562 1.1 ragge case SIOCSIFFLAGS:
563 1.36 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0)
564 1.36 dyoung break;
565 1.36 dyoung /* XXX re-use ether_ioctl() */
566 1.36 dyoung switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
567 1.36 dyoung case IFF_RUNNING:
568 1.1 ragge /*
569 1.1 ragge * If interface is marked down and it is running,
570 1.1 ragge * stop it. (by disabling receive mechanism).
571 1.1 ragge */
572 1.1 ragge ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
573 1.1 ragge ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
574 1.1 ragge ifp->if_flags &= ~IFF_RUNNING;
575 1.36 dyoung break;
576 1.36 dyoung case IFF_UP:
577 1.1 ragge /*
578 1.1 ragge * If interface it marked up and it is stopped, then
579 1.1 ragge * start it.
580 1.1 ragge */
581 1.1 ragge zeinit(sc);
582 1.36 dyoung break;
583 1.36 dyoung case IFF_UP|IFF_RUNNING:
584 1.1 ragge /*
585 1.1 ragge * Send a new setup packet to match any new changes.
586 1.1 ragge * (Like IFF_PROMISC etc)
587 1.1 ragge */
588 1.1 ragge ze_setup(sc);
589 1.36 dyoung break;
590 1.36 dyoung case 0:
591 1.36 dyoung break;
592 1.1 ragge }
593 1.1 ragge break;
594 1.1 ragge
595 1.1 ragge case SIOCADDMULTI:
596 1.1 ragge case SIOCDELMULTI:
597 1.1 ragge /*
598 1.1 ragge * Update our multicast list.
599 1.1 ragge */
600 1.32 dyoung if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
601 1.1 ragge /*
602 1.1 ragge * Multicast list has changed; set the hardware filter
603 1.1 ragge * accordingly.
604 1.1 ragge */
605 1.23 thorpej if (ifp->if_flags & IFF_RUNNING)
606 1.23 thorpej ze_setup(sc);
607 1.1 ragge error = 0;
608 1.1 ragge }
609 1.1 ragge break;
610 1.1 ragge
611 1.1 ragge default:
612 1.36 dyoung error = ether_ioctl(ifp, cmd, data);
613 1.1 ragge
614 1.1 ragge }
615 1.1 ragge splx(s);
616 1.1 ragge return (error);
617 1.1 ragge }
618 1.1 ragge
619 1.1 ragge /*
620 1.1 ragge * Add a receive buffer to the indicated descriptor.
621 1.1 ragge */
622 1.1 ragge int
623 1.35 matt ze_add_rxbuf(struct ze_softc *sc, int i)
624 1.1 ragge {
625 1.1 ragge struct mbuf *m;
626 1.1 ragge struct ze_rdes *rp;
627 1.1 ragge int error;
628 1.1 ragge
629 1.1 ragge MGETHDR(m, M_DONTWAIT, MT_DATA);
630 1.1 ragge if (m == NULL)
631 1.1 ragge return (ENOBUFS);
632 1.1 ragge
633 1.22 matt MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
634 1.1 ragge MCLGET(m, M_DONTWAIT);
635 1.1 ragge if ((m->m_flags & M_EXT) == 0) {
636 1.1 ragge m_freem(m);
637 1.1 ragge return (ENOBUFS);
638 1.1 ragge }
639 1.1 ragge
640 1.1 ragge if (sc->sc_rxmbuf[i] != NULL)
641 1.1 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
642 1.1 ragge
643 1.1 ragge error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
644 1.17 thorpej m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
645 1.17 thorpej BUS_DMA_READ|BUS_DMA_NOWAIT);
646 1.1 ragge if (error)
647 1.19 provos panic("%s: can't load rx DMA map %d, error = %d",
648 1.35 matt device_xname(sc->sc_dev), i, error);
649 1.1 ragge sc->sc_rxmbuf[i] = m;
650 1.1 ragge
651 1.1 ragge bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
652 1.1 ragge sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
653 1.1 ragge
654 1.1 ragge /*
655 1.1 ragge * We know that the mbuf cluster is page aligned. Also, be sure
656 1.1 ragge * that the IP header will be longword aligned.
657 1.1 ragge */
658 1.1 ragge m->m_data += 2;
659 1.1 ragge rp = &sc->sc_zedata->zc_recv[i];
660 1.1 ragge rp->ze_bufsize = (m->m_ext.ext_size - 2);
661 1.1 ragge rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
662 1.1 ragge rp->ze_framelen = ZE_FRAMELEN_OW;
663 1.1 ragge
664 1.1 ragge return (0);
665 1.1 ragge }
666 1.1 ragge
667 1.1 ragge /*
668 1.1 ragge * Create a setup packet and put in queue for sending.
669 1.1 ragge */
670 1.1 ragge void
671 1.35 matt ze_setup(struct ze_softc *sc)
672 1.1 ragge {
673 1.1 ragge struct ether_multi *enm;
674 1.1 ragge struct ether_multistep step;
675 1.1 ragge struct ze_cdata *zc = sc->sc_zedata;
676 1.1 ragge struct ifnet *ifp = &sc->sc_if;
677 1.31 dyoung const u_int8_t *enaddr = CLLADDR(ifp->if_sadl);
678 1.13 ragge int j, idx, reg;
679 1.1 ragge
680 1.1 ragge if (sc->sc_inq == (TXDESCS - 1)) {
681 1.1 ragge sc->sc_setup = 1;
682 1.1 ragge return;
683 1.1 ragge }
684 1.1 ragge sc->sc_setup = 0;
685 1.1 ragge /*
686 1.1 ragge * Init the setup packet with valid info.
687 1.1 ragge */
688 1.1 ragge memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
689 1.15 thorpej memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
690 1.1 ragge
691 1.1 ragge /*
692 1.26 perry * Multicast handling. The SGEC can handle up to 16 direct
693 1.1 ragge * ethernet addresses.
694 1.1 ragge */
695 1.1 ragge j = 16;
696 1.1 ragge ifp->if_flags &= ~IFF_ALLMULTI;
697 1.1 ragge ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
698 1.1 ragge while (enm != NULL) {
699 1.14 thorpej if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
700 1.1 ragge ifp->if_flags |= IFF_ALLMULTI;
701 1.1 ragge break;
702 1.1 ragge }
703 1.15 thorpej memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
704 1.1 ragge j += 8;
705 1.1 ragge ETHER_NEXT_MULTI(step, enm);
706 1.1 ragge if ((enm != NULL)&& (j == 128)) {
707 1.1 ragge ifp->if_flags |= IFF_ALLMULTI;
708 1.1 ragge break;
709 1.1 ragge }
710 1.1 ragge }
711 1.7 thorpej
712 1.7 thorpej /*
713 1.7 thorpej * ALLMULTI implies PROMISC in this driver.
714 1.7 thorpej */
715 1.7 thorpej if (ifp->if_flags & IFF_ALLMULTI)
716 1.7 thorpej ifp->if_flags |= IFF_PROMISC;
717 1.7 thorpej else if (ifp->if_pcount == 0)
718 1.7 thorpej ifp->if_flags &= ~IFF_PROMISC;
719 1.1 ragge
720 1.1 ragge /*
721 1.1 ragge * Fiddle with the receive logic.
722 1.1 ragge */
723 1.1 ragge reg = ZE_RCSR(ZE_CSR6);
724 1.1 ragge DELAY(10);
725 1.1 ragge ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
726 1.1 ragge reg &= ~ZE_NICSR6_AF;
727 1.1 ragge if (ifp->if_flags & IFF_PROMISC)
728 1.1 ragge reg |= ZE_NICSR6_AF_PROM;
729 1.1 ragge else if (ifp->if_flags & IFF_ALLMULTI)
730 1.1 ragge reg |= ZE_NICSR6_AF_ALLM;
731 1.1 ragge DELAY(10);
732 1.1 ragge ZE_WCSR(ZE_CSR6, reg);
733 1.1 ragge /*
734 1.1 ragge * Only send a setup packet if needed.
735 1.1 ragge */
736 1.1 ragge if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
737 1.1 ragge idx = sc->sc_nexttx;
738 1.1 ragge zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
739 1.1 ragge zc->zc_xmit[idx].ze_bufsize = 128;
740 1.1 ragge zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
741 1.1 ragge zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
742 1.1 ragge
743 1.1 ragge if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
744 1.1 ragge ZE_WCSR(ZE_CSR1, -1);
745 1.1 ragge
746 1.1 ragge sc->sc_inq++;
747 1.1 ragge if (++sc->sc_nexttx == TXDESCS)
748 1.1 ragge sc->sc_nexttx = 0;
749 1.1 ragge }
750 1.1 ragge }
751 1.1 ragge
752 1.1 ragge /*
753 1.1 ragge * Check for dead transmit logic.
754 1.1 ragge */
755 1.1 ragge void
756 1.35 matt zetimeout(struct ifnet *ifp)
757 1.1 ragge {
758 1.1 ragge struct ze_softc *sc = ifp->if_softc;
759 1.1 ragge
760 1.1 ragge if (sc->sc_inq == 0)
761 1.1 ragge return;
762 1.1 ragge
763 1.35 matt aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
764 1.1 ragge /*
765 1.1 ragge * Do a reset of interface, to get it going again.
766 1.1 ragge * Will it work by just restart the transmit logic?
767 1.1 ragge */
768 1.1 ragge zeinit(sc);
769 1.1 ragge }
770 1.1 ragge
771 1.1 ragge /*
772 1.1 ragge * Reset chip:
773 1.1 ragge * Set/reset the reset flag.
774 1.1 ragge * Write interrupt vector.
775 1.1 ragge * Write ring buffer addresses.
776 1.1 ragge * Write SBR.
777 1.1 ragge */
778 1.35 matt bool
779 1.35 matt zereset(struct ze_softc *sc)
780 1.1 ragge {
781 1.13 ragge int reg, i;
782 1.1 ragge
783 1.1 ragge ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
784 1.1 ragge DELAY(50000);
785 1.1 ragge if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
786 1.35 matt aprint_error_dev(sc->sc_dev, "selftest failed\n");
787 1.35 matt return true;
788 1.1 ragge }
789 1.1 ragge
790 1.1 ragge /*
791 1.1 ragge * Get the vector that were set at match time, and remember it.
792 1.1 ragge * WHICH VECTOR TO USE? Take one unused. XXX
793 1.1 ragge * Funny way to set vector described in the programmers manual.
794 1.1 ragge */
795 1.1 ragge reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
796 1.1 ragge i = 10;
797 1.1 ragge do {
798 1.1 ragge if (i-- == 0) {
799 1.35 matt aprint_error_dev(sc->sc_dev,
800 1.35 matt "failing SGEC CSR0 init\n");
801 1.35 matt return true;
802 1.1 ragge }
803 1.1 ragge ZE_WCSR(ZE_CSR0, reg);
804 1.1 ragge } while (ZE_RCSR(ZE_CSR0) != reg);
805 1.1 ragge
806 1.1 ragge ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
807 1.1 ragge ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
808 1.35 matt return false;
809 1.1 ragge }
810