if_vr.c revision 1.61.2.1 1 /* $NetBSD: if_vr.c,v 1.61.2.1 2004/08/03 10:49:09 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1997, 1998
42 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Bill Paul.
55 * 4. Neither the name of the author nor the names of any co-contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69 * THE POSSIBILITY OF SUCH DAMAGE.
70 *
71 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72 */
73
74 /*
75 * VIA Rhine fast ethernet PCI NIC driver
76 *
77 * Supports various network adapters based on the VIA Rhine
78 * and Rhine II PCI controllers, including the D-Link DFE530TX.
79 * Datasheets are available at http://www.via.com.tw.
80 *
81 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
82 * Electrical Engineering Department
83 * Columbia University, New York City
84 */
85
86 /*
87 * The VIA Rhine controllers are similar in some respects to the
88 * the DEC tulip chips, except less complicated. The controller
89 * uses an MII bus and an external physical layer interface. The
90 * receiver has a one entry perfect filter and a 64-bit hash table
91 * multicast filter. Transmit and receive descriptors are similar
92 * to the tulip.
93 *
94 * The Rhine has a serious flaw in its transmit DMA mechanism:
95 * transmit buffers must be longword aligned. Unfortunately,
96 * the kernel doesn't guarantee that mbufs will be filled in starting
97 * at longword boundaries, so we have to do a buffer copy before
98 * transmission.
99 *
100 * Apparently, the receive DMA mechanism also has the same flaw. This
101 * means that on systems with struct alignment requirements, incoming
102 * frames must be copied to a new buffer which shifts the data forward
103 * 2 bytes so that the payload is aligned on a 4-byte boundary.
104 */
105
106 #include <sys/cdefs.h>
107 __KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.61.2.1 2004/08/03 10:49:09 skrll Exp $");
108
109 #include "rnd.h"
110
111 #include <sys/param.h>
112 #include <sys/systm.h>
113 #include <sys/callout.h>
114 #include <sys/sockio.h>
115 #include <sys/mbuf.h>
116 #include <sys/malloc.h>
117 #include <sys/kernel.h>
118 #include <sys/socket.h>
119 #include <sys/device.h>
120
121 #if NRND > 0
122 #include <sys/rnd.h>
123 #endif
124
125 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
126
127 #include <net/if.h>
128 #include <net/if_arp.h>
129 #include <net/if_dl.h>
130 #include <net/if_media.h>
131 #include <net/if_ether.h>
132
133 #include "bpfilter.h"
134 #if NBPFILTER > 0
135 #include <net/bpf.h>
136 #endif
137
138 #include <machine/bus.h>
139 #include <machine/intr.h>
140 #include <machine/endian.h>
141
142 #include <dev/mii/mii.h>
143 #include <dev/mii/miivar.h>
144 #include <dev/mii/mii_bitbang.h>
145
146 #include <dev/pci/pcireg.h>
147 #include <dev/pci/pcivar.h>
148 #include <dev/pci/pcidevs.h>
149
150 #include <dev/pci/if_vrreg.h>
151
152 #define VR_USEIOSPACE
153
154 /*
155 * Various supported device vendors/types and their names.
156 */
157 static struct vr_type {
158 pci_vendor_id_t vr_vid;
159 pci_product_id_t vr_did;
160 const char *vr_name;
161 } vr_devs[] = {
162 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
163 "VIA VT3043 (Rhine) 10/100" },
164 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
165 "VIA VT6102 (Rhine II) 10/100" },
166 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105,
167 "VIA VT6105 (Rhine III) 10/100" },
168 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
169 "VIA VT86C100A (Rhine-II) 10/100" },
170 { 0, 0, NULL }
171 };
172
173 /*
174 * Transmit descriptor list size.
175 */
176 #define VR_NTXDESC 64
177 #define VR_NTXDESC_MASK (VR_NTXDESC - 1)
178 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK)
179
180 /*
181 * Receive descriptor list size.
182 */
183 #define VR_NRXDESC 64
184 #define VR_NRXDESC_MASK (VR_NRXDESC - 1)
185 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK)
186
187 /*
188 * Control data structres that are DMA'd to the Rhine chip. We allocate
189 * them in a single clump that maps to a single DMA segment to make several
190 * things easier.
191 *
192 * Note that since we always copy outgoing packets to aligned transmit
193 * buffers, we can reduce the transmit descriptors to one per packet.
194 */
195 struct vr_control_data {
196 struct vr_desc vr_txdescs[VR_NTXDESC];
197 struct vr_desc vr_rxdescs[VR_NRXDESC];
198 };
199
200 #define VR_CDOFF(x) offsetof(struct vr_control_data, x)
201 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)])
202 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)])
203
204 /*
205 * Software state of transmit and receive descriptors.
206 */
207 struct vr_descsoft {
208 struct mbuf *ds_mbuf; /* head of mbuf chain */
209 bus_dmamap_t ds_dmamap; /* our DMA map */
210 };
211
212 struct vr_softc {
213 struct device vr_dev; /* generic device glue */
214 void *vr_ih; /* interrupt cookie */
215 void *vr_ats; /* shutdown hook */
216 bus_space_tag_t vr_bst; /* bus space tag */
217 bus_space_handle_t vr_bsh; /* bus space handle */
218 bus_dma_tag_t vr_dmat; /* bus DMA tag */
219 pci_chipset_tag_t vr_pc; /* PCI chipset info */
220 struct ethercom vr_ec; /* Ethernet common info */
221 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
222 struct mii_data vr_mii; /* MII/media info */
223
224 u_int8_t vr_revid; /* Rhine chip revision */
225
226 struct callout vr_tick_ch; /* tick callout */
227
228 bus_dmamap_t vr_cddmamap; /* control data DMA map */
229 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr
230
231 /*
232 * Software state for transmit and receive descriptors.
233 */
234 struct vr_descsoft vr_txsoft[VR_NTXDESC];
235 struct vr_descsoft vr_rxsoft[VR_NRXDESC];
236
237 /*
238 * Control data structures.
239 */
240 struct vr_control_data *vr_control_data;
241
242 int vr_txpending; /* number of TX requests pending */
243 int vr_txdirty; /* first dirty TX descriptor */
244 int vr_txlast; /* last used TX descriptor */
245
246 int vr_rxptr; /* next ready RX descriptor */
247
248 #if NRND > 0
249 rndsource_element_t rnd_source; /* random source */
250 #endif
251 };
252
253 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x)))
254 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x)))
255
256 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)])
257 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)])
258
259 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)])
260 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)])
261
262 #define VR_CDTXSYNC(sc, x, ops) \
263 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
264 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
265
266 #define VR_CDRXSYNC(sc, x, ops) \
267 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
268 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
269
270 /*
271 * Note we rely on MCLBYTES being a power of two below.
272 */
273 #define VR_INIT_RXDESC(sc, i) \
274 do { \
275 struct vr_desc *__d = VR_CDRX((sc), (i)); \
276 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \
277 \
278 __d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \
279 __d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \
280 VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \
281 __d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr); \
282 __d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \
283 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \
284 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
285 } while (/* CONSTCOND */ 0)
286
287 /*
288 * register space access macros
289 */
290 #define CSR_WRITE_4(sc, reg, val) \
291 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
292 #define CSR_WRITE_2(sc, reg, val) \
293 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
294 #define CSR_WRITE_1(sc, reg, val) \
295 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
296
297 #define CSR_READ_4(sc, reg) \
298 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
299 #define CSR_READ_2(sc, reg) \
300 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
301 #define CSR_READ_1(sc, reg) \
302 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
303
304 #define VR_TIMEOUT 1000
305
306 static int vr_add_rxbuf __P((struct vr_softc *, int));
307
308 static void vr_rxeof __P((struct vr_softc *));
309 static void vr_rxeoc __P((struct vr_softc *));
310 static void vr_txeof __P((struct vr_softc *));
311 static int vr_intr __P((void *));
312 static void vr_start __P((struct ifnet *));
313 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
314 static int vr_init __P((struct ifnet *));
315 static void vr_stop __P((struct ifnet *, int));
316 static void vr_rxdrain __P((struct vr_softc *));
317 static void vr_watchdog __P((struct ifnet *));
318 static void vr_tick __P((void *));
319
320 static int vr_ifmedia_upd __P((struct ifnet *));
321 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
322
323 static int vr_mii_readreg __P((struct device *, int, int));
324 static void vr_mii_writereg __P((struct device *, int, int, int));
325 static void vr_mii_statchg __P((struct device *));
326
327 static void vr_setmulti __P((struct vr_softc *));
328 static void vr_reset __P((struct vr_softc *));
329
330 int vr_copy_small = 0;
331
332 #define VR_SETBIT(sc, reg, x) \
333 CSR_WRITE_1(sc, reg, \
334 CSR_READ_1(sc, reg) | (x))
335
336 #define VR_CLRBIT(sc, reg, x) \
337 CSR_WRITE_1(sc, reg, \
338 CSR_READ_1(sc, reg) & ~(x))
339
340 #define VR_SETBIT16(sc, reg, x) \
341 CSR_WRITE_2(sc, reg, \
342 CSR_READ_2(sc, reg) | (x))
343
344 #define VR_CLRBIT16(sc, reg, x) \
345 CSR_WRITE_2(sc, reg, \
346 CSR_READ_2(sc, reg) & ~(x))
347
348 #define VR_SETBIT32(sc, reg, x) \
349 CSR_WRITE_4(sc, reg, \
350 CSR_READ_4(sc, reg) | (x))
351
352 #define VR_CLRBIT32(sc, reg, x) \
353 CSR_WRITE_4(sc, reg, \
354 CSR_READ_4(sc, reg) & ~(x))
355
356 /*
357 * MII bit-bang glue.
358 */
359 u_int32_t vr_mii_bitbang_read __P((struct device *));
360 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
361
362 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
363 vr_mii_bitbang_read,
364 vr_mii_bitbang_write,
365 {
366 VR_MIICMD_DATAOUT, /* MII_BIT_MDO */
367 VR_MIICMD_DATAIN, /* MII_BIT_MDI */
368 VR_MIICMD_CLK, /* MII_BIT_MDC */
369 VR_MIICMD_DIR, /* MII_BIT_DIR_HOST_PHY */
370 0, /* MII_BIT_DIR_PHY_HOST */
371 }
372 };
373
374 u_int32_t
375 vr_mii_bitbang_read(self)
376 struct device *self;
377 {
378 struct vr_softc *sc = (void *) self;
379
380 return (CSR_READ_1(sc, VR_MIICMD));
381 }
382
383 void
384 vr_mii_bitbang_write(self, val)
385 struct device *self;
386 u_int32_t val;
387 {
388 struct vr_softc *sc = (void *) self;
389
390 CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
391 }
392
393 /*
394 * Read an PHY register through the MII.
395 */
396 static int
397 vr_mii_readreg(self, phy, reg)
398 struct device *self;
399 int phy, reg;
400 {
401 struct vr_softc *sc = (void *) self;
402
403 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
404 return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
405 }
406
407 /*
408 * Write to a PHY register through the MII.
409 */
410 static void
411 vr_mii_writereg(self, phy, reg, val)
412 struct device *self;
413 int phy, reg, val;
414 {
415 struct vr_softc *sc = (void *) self;
416
417 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
418 mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
419 }
420
421 static void
422 vr_mii_statchg(self)
423 struct device *self;
424 {
425 struct vr_softc *sc = (struct vr_softc *)self;
426
427 /*
428 * In order to fiddle with the 'full-duplex' bit in the netconfig
429 * register, we first have to put the transmit and/or receive logic
430 * in the idle state.
431 */
432 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
433
434 if (sc->vr_mii.mii_media_active & IFM_FDX)
435 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
436 else
437 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
438
439 if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
440 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
441 }
442
443 #define vr_calchash(addr) \
444 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
445
446 /*
447 * Program the 64-bit multicast hash filter.
448 */
449 static void
450 vr_setmulti(sc)
451 struct vr_softc *sc;
452 {
453 struct ifnet *ifp;
454 int h = 0;
455 u_int32_t hashes[2] = { 0, 0 };
456 struct ether_multistep step;
457 struct ether_multi *enm;
458 int mcnt = 0;
459 u_int8_t rxfilt;
460
461 ifp = &sc->vr_ec.ec_if;
462
463 rxfilt = CSR_READ_1(sc, VR_RXCFG);
464
465 if (ifp->if_flags & IFF_PROMISC) {
466 allmulti:
467 ifp->if_flags |= IFF_ALLMULTI;
468 rxfilt |= VR_RXCFG_RX_MULTI;
469 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
470 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
471 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
472 return;
473 }
474
475 /* first, zot all the existing hash bits */
476 CSR_WRITE_4(sc, VR_MAR0, 0);
477 CSR_WRITE_4(sc, VR_MAR1, 0);
478
479 /* now program new ones */
480 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
481 while (enm != NULL) {
482 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
483 ETHER_ADDR_LEN) != 0)
484 goto allmulti;
485
486 h = vr_calchash(enm->enm_addrlo);
487
488 if (h < 32)
489 hashes[0] |= (1 << h);
490 else
491 hashes[1] |= (1 << (h - 32));
492 ETHER_NEXT_MULTI(step, enm);
493 mcnt++;
494 }
495
496 ifp->if_flags &= ~IFF_ALLMULTI;
497
498 if (mcnt)
499 rxfilt |= VR_RXCFG_RX_MULTI;
500 else
501 rxfilt &= ~VR_RXCFG_RX_MULTI;
502
503 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
504 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
505 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
506 }
507
508 static void
509 vr_reset(sc)
510 struct vr_softc *sc;
511 {
512 int i;
513
514 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
515
516 for (i = 0; i < VR_TIMEOUT; i++) {
517 DELAY(10);
518 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
519 break;
520 }
521 if (i == VR_TIMEOUT) {
522 if (sc->vr_revid < REV_ID_VT3065_A) {
523 printf("%s: reset never completed!\n",
524 sc->vr_dev.dv_xname);
525 } else {
526 /* Use newer force reset command */
527 printf("%s: using force reset command.\n",
528 sc->vr_dev.dv_xname);
529 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
530 }
531 }
532
533 /* Wait a little while for the chip to get its brains in order. */
534 DELAY(1000);
535 }
536
537 /*
538 * Initialize an RX descriptor and attach an MBUF cluster.
539 * Note: the length fields are only 11 bits wide, which means the
540 * largest size we can specify is 2047. This is important because
541 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
542 * overflow the field and make a mess.
543 */
544 static int
545 vr_add_rxbuf(sc, i)
546 struct vr_softc *sc;
547 int i;
548 {
549 struct vr_descsoft *ds = VR_DSRX(sc, i);
550 struct mbuf *m_new;
551 int error;
552
553 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
554 if (m_new == NULL)
555 return (ENOBUFS);
556
557 MCLGET(m_new, M_DONTWAIT);
558 if ((m_new->m_flags & M_EXT) == 0) {
559 m_freem(m_new);
560 return (ENOBUFS);
561 }
562
563 if (ds->ds_mbuf != NULL)
564 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
565
566 ds->ds_mbuf = m_new;
567
568 error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
569 m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL,
570 BUS_DMA_READ|BUS_DMA_NOWAIT);
571 if (error) {
572 printf("%s: unable to load rx DMA map %d, error = %d\n",
573 sc->vr_dev.dv_xname, i, error);
574 panic("vr_add_rxbuf"); /* XXX */
575 }
576
577 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
578 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
579
580 VR_INIT_RXDESC(sc, i);
581
582 return (0);
583 }
584
585 /*
586 * A frame has been uploaded: pass the resulting mbuf chain up to
587 * the higher level protocols.
588 */
589 static void
590 vr_rxeof(sc)
591 struct vr_softc *sc;
592 {
593 struct mbuf *m;
594 struct ifnet *ifp;
595 struct vr_desc *d;
596 struct vr_descsoft *ds;
597 int i, total_len;
598 u_int32_t rxstat;
599
600 ifp = &sc->vr_ec.ec_if;
601
602 for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
603 d = VR_CDRX(sc, i);
604 ds = VR_DSRX(sc, i);
605
606 VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
607
608 rxstat = le32toh(d->vr_status);
609
610 if (rxstat & VR_RXSTAT_OWN) {
611 /*
612 * We have processed all of the receive buffers.
613 */
614 break;
615 }
616
617 /*
618 * If an error occurs, update stats, clear the
619 * status word and leave the mbuf cluster in place:
620 * it should simply get re-used next time this descriptor
621 * comes up in the ring.
622 */
623 if (rxstat & VR_RXSTAT_RXERR) {
624 const char *errstr;
625
626 ifp->if_ierrors++;
627 switch (rxstat & 0x000000FF) {
628 case VR_RXSTAT_CRCERR:
629 errstr = "crc error";
630 break;
631 case VR_RXSTAT_FRAMEALIGNERR:
632 errstr = "frame alignment error";
633 break;
634 case VR_RXSTAT_FIFOOFLOW:
635 errstr = "FIFO overflow";
636 break;
637 case VR_RXSTAT_GIANT:
638 errstr = "received giant packet";
639 break;
640 case VR_RXSTAT_RUNT:
641 errstr = "received runt packet";
642 break;
643 case VR_RXSTAT_BUSERR:
644 errstr = "system bus error";
645 break;
646 case VR_RXSTAT_BUFFERR:
647 errstr = "rx buffer error";
648 break;
649 default:
650 errstr = "unknown rx error";
651 break;
652 }
653 printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
654 errstr);
655
656 VR_INIT_RXDESC(sc, i);
657
658 continue;
659 }
660
661 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
662 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
663
664 /* No errors; receive the packet. */
665 total_len = VR_RXBYTES(le32toh(d->vr_status));
666
667 #ifdef __NO_STRICT_ALIGNMENT
668 /*
669 * If the packet is small enough to fit in a
670 * single header mbuf, allocate one and copy
671 * the data into it. This greatly reduces
672 * memory consumption when we receive lots
673 * of small packets.
674 *
675 * Otherwise, we add a new buffer to the receive
676 * chain. If this fails, we drop the packet and
677 * recycle the old buffer.
678 */
679 if (vr_copy_small != 0 && total_len <= MHLEN) {
680 MGETHDR(m, M_DONTWAIT, MT_DATA);
681 if (m == NULL)
682 goto dropit;
683 memcpy(mtod(m, caddr_t),
684 mtod(ds->ds_mbuf, caddr_t), total_len);
685 VR_INIT_RXDESC(sc, i);
686 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
687 ds->ds_dmamap->dm_mapsize,
688 BUS_DMASYNC_PREREAD);
689 } else {
690 m = ds->ds_mbuf;
691 if (vr_add_rxbuf(sc, i) == ENOBUFS) {
692 dropit:
693 ifp->if_ierrors++;
694 VR_INIT_RXDESC(sc, i);
695 bus_dmamap_sync(sc->vr_dmat,
696 ds->ds_dmamap, 0,
697 ds->ds_dmamap->dm_mapsize,
698 BUS_DMASYNC_PREREAD);
699 continue;
700 }
701 }
702 #else
703 /*
704 * The Rhine's packet buffers must be 4-byte aligned.
705 * But this means that the data after the Ethernet header
706 * is misaligned. We must allocate a new buffer and
707 * copy the data, shifted forward 2 bytes.
708 */
709 MGETHDR(m, M_DONTWAIT, MT_DATA);
710 if (m == NULL) {
711 dropit:
712 ifp->if_ierrors++;
713 VR_INIT_RXDESC(sc, i);
714 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
715 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
716 continue;
717 }
718 if (total_len > (MHLEN - 2)) {
719 MCLGET(m, M_DONTWAIT);
720 if ((m->m_flags & M_EXT) == 0) {
721 m_freem(m);
722 goto dropit;
723 }
724 }
725 m->m_data += 2;
726
727 /*
728 * Note that we use clusters for incoming frames, so the
729 * buffer is virtually contiguous.
730 */
731 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
732 total_len);
733
734 /* Allow the receive descriptor to continue using its mbuf. */
735 VR_INIT_RXDESC(sc, i);
736 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
737 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
738 #endif /* __NO_STRICT_ALIGNMENT */
739
740 /*
741 * The Rhine chip includes the FCS with every
742 * received packet.
743 */
744 m->m_flags |= M_HASFCS;
745
746 ifp->if_ipackets++;
747 m->m_pkthdr.rcvif = ifp;
748 m->m_pkthdr.len = m->m_len = total_len;
749 #if NBPFILTER > 0
750 /*
751 * Handle BPF listeners. Let the BPF user see the packet, but
752 * don't pass it up to the ether_input() layer unless it's
753 * a broadcast packet, multicast packet, matches our ethernet
754 * address or the interface is in promiscuous mode.
755 */
756 if (ifp->if_bpf)
757 bpf_mtap(ifp->if_bpf, m);
758 #endif
759 /* Pass it on. */
760 (*ifp->if_input)(ifp, m);
761 }
762
763 /* Update the receive pointer. */
764 sc->vr_rxptr = i;
765 }
766
767 void
768 vr_rxeoc(sc)
769 struct vr_softc *sc;
770 {
771
772 vr_rxeof(sc);
773 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
774 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
775 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
776 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
777 }
778
779 /*
780 * A frame was downloaded to the chip. It's safe for us to clean up
781 * the list buffers.
782 */
783 static void
784 vr_txeof(sc)
785 struct vr_softc *sc;
786 {
787 struct ifnet *ifp = &sc->vr_ec.ec_if;
788 struct vr_desc *d;
789 struct vr_descsoft *ds;
790 u_int32_t txstat;
791 int i;
792
793 ifp->if_flags &= ~IFF_OACTIVE;
794
795 /*
796 * Go through our tx list and free mbufs for those
797 * frames that have been transmitted.
798 */
799 for (i = sc->vr_txdirty; sc->vr_txpending != 0;
800 i = VR_NEXTTX(i), sc->vr_txpending--) {
801 d = VR_CDTX(sc, i);
802 ds = VR_DSTX(sc, i);
803
804 VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
805
806 txstat = le32toh(d->vr_status);
807 if (txstat & VR_TXSTAT_OWN)
808 break;
809
810 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
811 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
812 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
813 m_freem(ds->ds_mbuf);
814 ds->ds_mbuf = NULL;
815
816 if (txstat & VR_TXSTAT_ERRSUM) {
817 ifp->if_oerrors++;
818 if (txstat & VR_TXSTAT_DEFER)
819 ifp->if_collisions++;
820 if (txstat & VR_TXSTAT_LATECOLL)
821 ifp->if_collisions++;
822 }
823
824 ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
825 ifp->if_opackets++;
826 }
827
828 /* Update the dirty transmit buffer pointer. */
829 sc->vr_txdirty = i;
830
831 /*
832 * Cancel the watchdog timer if there are no pending
833 * transmissions.
834 */
835 if (sc->vr_txpending == 0)
836 ifp->if_timer = 0;
837 }
838
839 static int
840 vr_intr(arg)
841 void *arg;
842 {
843 struct vr_softc *sc;
844 struct ifnet *ifp;
845 u_int16_t status;
846 int handled = 0, dotx = 0;
847
848 sc = arg;
849 ifp = &sc->vr_ec.ec_if;
850
851 /* Suppress unwanted interrupts. */
852 if ((ifp->if_flags & IFF_UP) == 0) {
853 vr_stop(ifp, 1);
854 return (0);
855 }
856
857 /* Disable interrupts. */
858 CSR_WRITE_2(sc, VR_IMR, 0x0000);
859
860 for (;;) {
861 status = CSR_READ_2(sc, VR_ISR);
862 if (status)
863 CSR_WRITE_2(sc, VR_ISR, status);
864
865 if ((status & VR_INTRS) == 0)
866 break;
867
868 handled = 1;
869
870 #if NRND > 0
871 if (RND_ENABLED(&sc->rnd_source))
872 rnd_add_uint32(&sc->rnd_source, status);
873 #endif
874
875 if (status & VR_ISR_RX_OK)
876 vr_rxeof(sc);
877
878 if (status &
879 (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
880 VR_ISR_RX_DROPPED))
881 vr_rxeoc(sc);
882
883 if (status & VR_ISR_TX_OK) {
884 dotx = 1;
885 vr_txeof(sc);
886 }
887
888 if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
889 if (status & VR_ISR_TX_UNDERRUN)
890 printf("%s: transmit underrun\n",
891 sc->vr_dev.dv_xname);
892 if (status & VR_ISR_TX_ABRT)
893 printf("%s: transmit aborted\n",
894 sc->vr_dev.dv_xname);
895 ifp->if_oerrors++;
896 dotx = 1;
897 vr_txeof(sc);
898 if (sc->vr_txpending) {
899 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
900 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
901 }
902 /*
903 * Unfortunately many cards get stuck after
904 * aborted transmits, so we reset them.
905 */
906 if (status & VR_ISR_TX_ABRT) {
907 printf("%s: restarting\n", sc->vr_dev.dv_xname);
908 dotx = 0;
909 (void) vr_init(ifp);
910 }
911 }
912
913 if (status & VR_ISR_BUSERR) {
914 printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
915 /* vr_init() calls vr_start() */
916 dotx = 0;
917 (void) vr_init(ifp);
918 }
919 }
920
921 /* Re-enable interrupts. */
922 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
923
924 if (dotx)
925 vr_start(ifp);
926
927 return (handled);
928 }
929
930 /*
931 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
932 * to the mbuf data regions directly in the transmit lists. We also save a
933 * copy of the pointers since the transmit list fragment pointers are
934 * physical addresses.
935 */
936 static void
937 vr_start(ifp)
938 struct ifnet *ifp;
939 {
940 struct vr_softc *sc = ifp->if_softc;
941 struct mbuf *m0, *m;
942 struct vr_desc *d;
943 struct vr_descsoft *ds;
944 int error, firsttx, nexttx, opending;
945
946 /*
947 * Remember the previous txpending and the first transmit
948 * descriptor we use.
949 */
950 opending = sc->vr_txpending;
951 firsttx = VR_NEXTTX(sc->vr_txlast);
952
953 /*
954 * Loop through the send queue, setting up transmit descriptors
955 * until we drain the queue, or use up all available transmit
956 * descriptors.
957 */
958 while (sc->vr_txpending < VR_NTXDESC) {
959 /*
960 * Grab a packet off the queue.
961 */
962 IFQ_POLL(&ifp->if_snd, m0);
963 if (m0 == NULL)
964 break;
965 m = NULL;
966
967 /*
968 * Get the next available transmit descriptor.
969 */
970 nexttx = VR_NEXTTX(sc->vr_txlast);
971 d = VR_CDTX(sc, nexttx);
972 ds = VR_DSTX(sc, nexttx);
973
974 /*
975 * Load the DMA map. If this fails, the packet didn't
976 * fit in one DMA segment, and we need to copy. Note,
977 * the packet must also be aligned.
978 * if the packet is too small, copy it too, so we're sure
979 * so have enouth room for the pad buffer.
980 */
981 if ((mtod(m0, uintptr_t) & 3) != 0 ||
982 m0->m_pkthdr.len < VR_MIN_FRAMELEN ||
983 bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
984 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
985 MGETHDR(m, M_DONTWAIT, MT_DATA);
986 if (m == NULL) {
987 printf("%s: unable to allocate Tx mbuf\n",
988 sc->vr_dev.dv_xname);
989 break;
990 }
991 if (m0->m_pkthdr.len > MHLEN) {
992 MCLGET(m, M_DONTWAIT);
993 if ((m->m_flags & M_EXT) == 0) {
994 printf("%s: unable to allocate Tx "
995 "cluster\n", sc->vr_dev.dv_xname);
996 m_freem(m);
997 break;
998 }
999 }
1000 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1001 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1002 /*
1003 * The Rhine doesn't auto-pad, so we have to do this
1004 * ourselves.
1005 */
1006 if (m0->m_pkthdr.len < VR_MIN_FRAMELEN) {
1007 memset(mtod(m, caddr_t) + m0->m_pkthdr.len,
1008 0, VR_MIN_FRAMELEN - m0->m_pkthdr.len);
1009 m->m_pkthdr.len = m->m_len = VR_MIN_FRAMELEN;
1010 }
1011 error = bus_dmamap_load_mbuf(sc->vr_dmat,
1012 ds->ds_dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1013 if (error) {
1014 printf("%s: unable to load Tx buffer, "
1015 "error = %d\n", sc->vr_dev.dv_xname, error);
1016 break;
1017 }
1018 }
1019
1020 IFQ_DEQUEUE(&ifp->if_snd, m0);
1021 if (m != NULL) {
1022 m_freem(m0);
1023 m0 = m;
1024 }
1025
1026 /* Sync the DMA map. */
1027 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1028 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1029
1030 /*
1031 * Store a pointer to the packet so we can free it later.
1032 */
1033 ds->ds_mbuf = m0;
1034
1035 #if NBPFILTER > 0
1036 /*
1037 * If there's a BPF listener, bounce a copy of this frame
1038 * to him.
1039 */
1040 if (ifp->if_bpf)
1041 bpf_mtap(ifp->if_bpf, m0);
1042 #endif
1043
1044 /*
1045 * Fill in the transmit descriptor.
1046 */
1047 d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1048 d->vr_ctl = htole32(m0->m_pkthdr.len);
1049 d->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG | VR_TXCTL_LASTFRAG);
1050
1051 /*
1052 * If this is the first descriptor we're enqueuing,
1053 * don't give it to the Rhine yet. That could cause
1054 * a race condition. We'll do it below.
1055 */
1056 if (nexttx == firsttx)
1057 d->vr_status = 0;
1058 else
1059 d->vr_status = htole32(VR_TXSTAT_OWN);
1060
1061 VR_CDTXSYNC(sc, nexttx,
1062 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1063
1064 /* Advance the tx pointer. */
1065 sc->vr_txpending++;
1066 sc->vr_txlast = nexttx;
1067 }
1068
1069 if (sc->vr_txpending == VR_NTXDESC) {
1070 /* No more slots left; notify upper layer. */
1071 ifp->if_flags |= IFF_OACTIVE;
1072 }
1073
1074 if (sc->vr_txpending != opending) {
1075 /*
1076 * We enqueued packets. If the transmitter was idle,
1077 * reset the txdirty pointer.
1078 */
1079 if (opending == 0)
1080 sc->vr_txdirty = firsttx;
1081
1082 /*
1083 * Cause a transmit interrupt to happen on the
1084 * last packet we enqueued.
1085 */
1086 VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1087 VR_CDTXSYNC(sc, sc->vr_txlast,
1088 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1089
1090 /*
1091 * The entire packet chain is set up. Give the
1092 * first descriptor to the Rhine now.
1093 */
1094 VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1095 VR_CDTXSYNC(sc, firsttx,
1096 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1097
1098 /* Start the transmitter. */
1099 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1100
1101 /* Set the watchdog timer in case the chip flakes out. */
1102 ifp->if_timer = 5;
1103 }
1104 }
1105
1106 /*
1107 * Initialize the interface. Must be called at splnet.
1108 */
1109 static int
1110 vr_init(ifp)
1111 struct ifnet *ifp;
1112 {
1113 struct vr_softc *sc = ifp->if_softc;
1114 struct vr_desc *d;
1115 struct vr_descsoft *ds;
1116 int i, error = 0;
1117
1118 /* Cancel pending I/O. */
1119 vr_stop(ifp, 0);
1120
1121 /* Reset the Rhine to a known state. */
1122 vr_reset(sc);
1123
1124 /* set DMA length in BCR0 and BCR1 */
1125 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1126 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1127
1128 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1129 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTH_128BYTES);
1130
1131 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1132 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTH_STORENFWD);
1133
1134 /* set DMA threshold length in RXCFG and TXCFG */
1135 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1136 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1137
1138 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1139 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1140
1141 /*
1142 * Initialize the transmit desciptor ring. txlast is initialized
1143 * to the end of the list so that it will wrap around to the first
1144 * descriptor when the first packet is transmitted.
1145 */
1146 for (i = 0; i < VR_NTXDESC; i++) {
1147 d = VR_CDTX(sc, i);
1148 memset(d, 0, sizeof(struct vr_desc));
1149 d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1150 VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1151 }
1152 sc->vr_txpending = 0;
1153 sc->vr_txdirty = 0;
1154 sc->vr_txlast = VR_NTXDESC - 1;
1155
1156 /*
1157 * Initialize the receive descriptor ring.
1158 */
1159 for (i = 0; i < VR_NRXDESC; i++) {
1160 ds = VR_DSRX(sc, i);
1161 if (ds->ds_mbuf == NULL) {
1162 if ((error = vr_add_rxbuf(sc, i)) != 0) {
1163 printf("%s: unable to allocate or map rx "
1164 "buffer %d, error = %d\n",
1165 sc->vr_dev.dv_xname, i, error);
1166 /*
1167 * XXX Should attempt to run with fewer receive
1168 * XXX buffers instead of just failing.
1169 */
1170 vr_rxdrain(sc);
1171 goto out;
1172 }
1173 } else
1174 VR_INIT_RXDESC(sc, i);
1175 }
1176 sc->vr_rxptr = 0;
1177
1178 /* If we want promiscuous mode, set the allframes bit. */
1179 if (ifp->if_flags & IFF_PROMISC)
1180 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1181 else
1182 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1183
1184 /* Set capture broadcast bit to capture broadcast frames. */
1185 if (ifp->if_flags & IFF_BROADCAST)
1186 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1187 else
1188 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1189
1190 /* Program the multicast filter, if necessary. */
1191 vr_setmulti(sc);
1192
1193 /* Give the transmit and receive rings to the Rhine. */
1194 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1195 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1196
1197 /* Set current media. */
1198 mii_mediachg(&sc->vr_mii);
1199
1200 /* Enable receiver and transmitter. */
1201 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1202 VR_CMD_TX_ON|VR_CMD_RX_ON|
1203 VR_CMD_RX_GO);
1204
1205 /* Enable interrupts. */
1206 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1207 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1208
1209 ifp->if_flags |= IFF_RUNNING;
1210 ifp->if_flags &= ~IFF_OACTIVE;
1211
1212 /* Start one second timer. */
1213 callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1214
1215 /* Attempt to start output on the interface. */
1216 vr_start(ifp);
1217
1218 out:
1219 if (error)
1220 printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1221 return (error);
1222 }
1223
1224 /*
1225 * Set media options.
1226 */
1227 static int
1228 vr_ifmedia_upd(ifp)
1229 struct ifnet *ifp;
1230 {
1231 struct vr_softc *sc = ifp->if_softc;
1232
1233 if (ifp->if_flags & IFF_UP)
1234 mii_mediachg(&sc->vr_mii);
1235 return (0);
1236 }
1237
1238 /*
1239 * Report current media status.
1240 */
1241 static void
1242 vr_ifmedia_sts(ifp, ifmr)
1243 struct ifnet *ifp;
1244 struct ifmediareq *ifmr;
1245 {
1246 struct vr_softc *sc = ifp->if_softc;
1247
1248 mii_pollstat(&sc->vr_mii);
1249 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1250 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1251 }
1252
1253 static int
1254 vr_ioctl(ifp, command, data)
1255 struct ifnet *ifp;
1256 u_long command;
1257 caddr_t data;
1258 {
1259 struct vr_softc *sc = ifp->if_softc;
1260 struct ifreq *ifr = (struct ifreq *)data;
1261 int s, error = 0;
1262
1263 s = splnet();
1264
1265 switch (command) {
1266 case SIOCGIFMEDIA:
1267 case SIOCSIFMEDIA:
1268 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1269 break;
1270
1271 default:
1272 error = ether_ioctl(ifp, command, data);
1273 if (error == ENETRESET) {
1274 /*
1275 * Multicast list has changed; set the hardware filter
1276 * accordingly.
1277 */
1278 vr_setmulti(sc);
1279 error = 0;
1280 }
1281 break;
1282 }
1283
1284 splx(s);
1285 return (error);
1286 }
1287
1288 static void
1289 vr_watchdog(ifp)
1290 struct ifnet *ifp;
1291 {
1292 struct vr_softc *sc = ifp->if_softc;
1293
1294 printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1295 ifp->if_oerrors++;
1296
1297 (void) vr_init(ifp);
1298 }
1299
1300 /*
1301 * One second timer, used to tick MII.
1302 */
1303 static void
1304 vr_tick(arg)
1305 void *arg;
1306 {
1307 struct vr_softc *sc = arg;
1308 int s;
1309
1310 s = splnet();
1311 mii_tick(&sc->vr_mii);
1312 splx(s);
1313
1314 callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1315 }
1316
1317 /*
1318 * Drain the receive queue.
1319 */
1320 static void
1321 vr_rxdrain(sc)
1322 struct vr_softc *sc;
1323 {
1324 struct vr_descsoft *ds;
1325 int i;
1326
1327 for (i = 0; i < VR_NRXDESC; i++) {
1328 ds = VR_DSRX(sc, i);
1329 if (ds->ds_mbuf != NULL) {
1330 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1331 m_freem(ds->ds_mbuf);
1332 ds->ds_mbuf = NULL;
1333 }
1334 }
1335 }
1336
1337 /*
1338 * Stop the adapter and free any mbufs allocated to the
1339 * transmit lists.
1340 */
1341 static void
1342 vr_stop(ifp, disable)
1343 struct ifnet *ifp;
1344 int disable;
1345 {
1346 struct vr_softc *sc = ifp->if_softc;
1347 struct vr_descsoft *ds;
1348 int i;
1349
1350 /* Cancel one second timer. */
1351 callout_stop(&sc->vr_tick_ch);
1352
1353 /* Down the MII. */
1354 mii_down(&sc->vr_mii);
1355
1356 ifp = &sc->vr_ec.ec_if;
1357 ifp->if_timer = 0;
1358
1359 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1360 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1361 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1362 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1363 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1364
1365 /*
1366 * Release any queued transmit buffers.
1367 */
1368 for (i = 0; i < VR_NTXDESC; i++) {
1369 ds = VR_DSTX(sc, i);
1370 if (ds->ds_mbuf != NULL) {
1371 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1372 m_freem(ds->ds_mbuf);
1373 ds->ds_mbuf = NULL;
1374 }
1375 }
1376
1377 if (disable)
1378 vr_rxdrain(sc);
1379
1380 /*
1381 * Mark the interface down and cancel the watchdog timer.
1382 */
1383 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1384 ifp->if_timer = 0;
1385 }
1386
1387 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1388 static int vr_probe __P((struct device *, struct cfdata *, void *));
1389 static void vr_attach __P((struct device *, struct device *, void *));
1390 static void vr_shutdown __P((void *));
1391
1392 CFATTACH_DECL(vr, sizeof (struct vr_softc),
1393 vr_probe, vr_attach, NULL, NULL);
1394
1395 static struct vr_type *
1396 vr_lookup(pa)
1397 struct pci_attach_args *pa;
1398 {
1399 struct vr_type *vrt;
1400
1401 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1402 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1403 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1404 return (vrt);
1405 }
1406 return (NULL);
1407 }
1408
1409 static int
1410 vr_probe(parent, match, aux)
1411 struct device *parent;
1412 struct cfdata *match;
1413 void *aux;
1414 {
1415 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1416
1417 if (vr_lookup(pa) != NULL)
1418 return (1);
1419
1420 return (0);
1421 }
1422
1423 /*
1424 * Stop all chip I/O so that the kernel's probe routines don't
1425 * get confused by errant DMAs when rebooting.
1426 */
1427 static void
1428 vr_shutdown(arg)
1429 void *arg;
1430 {
1431 struct vr_softc *sc = (struct vr_softc *)arg;
1432
1433 vr_stop(&sc->vr_ec.ec_if, 1);
1434 }
1435
1436 /*
1437 * Attach the interface. Allocate softc structures, do ifmedia
1438 * setup and ethernet/BPF attach.
1439 */
1440 static void
1441 vr_attach(parent, self, aux)
1442 struct device *parent;
1443 struct device *self;
1444 void *aux;
1445 {
1446 struct vr_softc *sc = (struct vr_softc *) self;
1447 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1448 bus_dma_segment_t seg;
1449 struct vr_type *vrt;
1450 u_int32_t pmreg, reg;
1451 struct ifnet *ifp;
1452 u_char eaddr[ETHER_ADDR_LEN];
1453 int i, rseg, error;
1454
1455 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1456 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1457
1458 callout_init(&sc->vr_tick_ch);
1459
1460 vrt = vr_lookup(pa);
1461 if (vrt == NULL) {
1462 printf("\n");
1463 panic("vr_attach: impossible");
1464 }
1465
1466 printf(": %s Ethernet\n", vrt->vr_name);
1467
1468 /*
1469 * Handle power management nonsense.
1470 */
1471
1472 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1473 PCI_CAP_PWRMGMT, &pmreg, 0)) {
1474 reg = PCI_CONF_READ(pmreg + PCI_PMCSR);
1475 if ((reg & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_STATE_D0) {
1476 u_int32_t iobase, membase, irq;
1477
1478 /* Save important PCI config data. */
1479 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1480 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1481 irq = PCI_CONF_READ(PCI_INTERRUPT_REG);
1482
1483 /* Reset the power state. */
1484 printf("%s: chip is in D%d power mode "
1485 "-- setting to D0\n",
1486 sc->vr_dev.dv_xname, reg & PCI_PMCSR_STATE_MASK);
1487 reg = (reg & ~PCI_PMCSR_STATE_MASK) |
1488 PCI_PMCSR_STATE_D0;
1489 PCI_CONF_WRITE(pmreg + PCI_PMCSR, reg);
1490
1491 /* Restore PCI config data. */
1492 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1493 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1494 PCI_CONF_WRITE(PCI_INTERRUPT_REG, irq);
1495 }
1496 }
1497
1498 /* Make sure bus mastering is enabled. */
1499 reg = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1500 reg |= PCI_COMMAND_MASTER_ENABLE;
1501 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, reg);
1502
1503 /* Get revision */
1504 sc->vr_revid = PCI_REVISION(pa->pa_class);
1505
1506 /*
1507 * Map control/status registers.
1508 */
1509 {
1510 bus_space_tag_t iot, memt;
1511 bus_space_handle_t ioh, memh;
1512 int ioh_valid, memh_valid;
1513 pci_intr_handle_t intrhandle;
1514 const char *intrstr;
1515
1516 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1517 PCI_MAPREG_TYPE_IO, 0,
1518 &iot, &ioh, NULL, NULL) == 0);
1519 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1520 PCI_MAPREG_TYPE_MEM |
1521 PCI_MAPREG_MEM_TYPE_32BIT,
1522 0, &memt, &memh, NULL, NULL) == 0);
1523 #if defined(VR_USEIOSPACE)
1524 if (ioh_valid) {
1525 sc->vr_bst = iot;
1526 sc->vr_bsh = ioh;
1527 } else if (memh_valid) {
1528 sc->vr_bst = memt;
1529 sc->vr_bsh = memh;
1530 }
1531 #else
1532 if (memh_valid) {
1533 sc->vr_bst = memt;
1534 sc->vr_bsh = memh;
1535 } else if (ioh_valid) {
1536 sc->vr_bst = iot;
1537 sc->vr_bsh = ioh;
1538 }
1539 #endif
1540 else {
1541 printf(": unable to map device registers\n");
1542 return;
1543 }
1544
1545 /* Allocate interrupt */
1546 if (pci_intr_map(pa, &intrhandle)) {
1547 printf("%s: couldn't map interrupt\n",
1548 sc->vr_dev.dv_xname);
1549 return;
1550 }
1551 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1552 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1553 vr_intr, sc);
1554 if (sc->vr_ih == NULL) {
1555 printf("%s: couldn't establish interrupt",
1556 sc->vr_dev.dv_xname);
1557 if (intrstr != NULL)
1558 printf(" at %s", intrstr);
1559 printf("\n");
1560 }
1561 printf("%s: interrupting at %s\n",
1562 sc->vr_dev.dv_xname, intrstr);
1563 }
1564
1565 /*
1566 * Windows may put the chip in suspend mode when it
1567 * shuts down. Be sure to kick it in the head to wake it
1568 * up again.
1569 */
1570 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
1571
1572 /* Reset the adapter. */
1573 vr_reset(sc);
1574
1575 /*
1576 * Get station address. The way the Rhine chips work,
1577 * you're not allowed to directly access the EEPROM once
1578 * they've been programmed a special way. Consequently,
1579 * we need to read the node address from the PAR0 and PAR1
1580 * registers.
1581 *
1582 * XXXSCW: On the Rhine III, setting VR_EECSR_LOAD forces a reload
1583 * of the *whole* EEPROM, not just the MAC address. This is
1584 * pretty pointless since the chip does this automatically
1585 * at powerup/reset.
1586 * I suspect the same thing applies to the other Rhine
1587 * variants, but in the absence of a data sheet for those
1588 * (and the lack of anyone else noticing the problems this
1589 * causes) I'm going to retain the old behaviour for the
1590 * other parts.
1591 */
1592 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6105 &&
1593 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6102) {
1594 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1595 DELAY(200);
1596 }
1597 for (i = 0; i < ETHER_ADDR_LEN; i++)
1598 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1599
1600 /*
1601 * A Rhine chip was detected. Inform the world.
1602 */
1603 printf("%s: Ethernet address: %s\n",
1604 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1605
1606 memcpy(sc->vr_enaddr, eaddr, ETHER_ADDR_LEN);
1607
1608 sc->vr_dmat = pa->pa_dmat;
1609
1610 /*
1611 * Allocate the control data structures, and create and load
1612 * the DMA map for it.
1613 */
1614 if ((error = bus_dmamem_alloc(sc->vr_dmat,
1615 sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1616 0)) != 0) {
1617 printf("%s: unable to allocate control data, error = %d\n",
1618 sc->vr_dev.dv_xname, error);
1619 goto fail_0;
1620 }
1621
1622 if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1623 sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1624 BUS_DMA_COHERENT)) != 0) {
1625 printf("%s: unable to map control data, error = %d\n",
1626 sc->vr_dev.dv_xname, error);
1627 goto fail_1;
1628 }
1629
1630 if ((error = bus_dmamap_create(sc->vr_dmat,
1631 sizeof(struct vr_control_data), 1,
1632 sizeof(struct vr_control_data), 0, 0,
1633 &sc->vr_cddmamap)) != 0) {
1634 printf("%s: unable to create control data DMA map, "
1635 "error = %d\n", sc->vr_dev.dv_xname, error);
1636 goto fail_2;
1637 }
1638
1639 if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1640 sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1641 0)) != 0) {
1642 printf("%s: unable to load control data DMA map, error = %d\n",
1643 sc->vr_dev.dv_xname, error);
1644 goto fail_3;
1645 }
1646
1647 /*
1648 * Create the transmit buffer DMA maps.
1649 */
1650 for (i = 0; i < VR_NTXDESC; i++) {
1651 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1652 1, MCLBYTES, 0, 0,
1653 &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1654 printf("%s: unable to create tx DMA map %d, "
1655 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1656 goto fail_4;
1657 }
1658 }
1659
1660 /*
1661 * Create the receive buffer DMA maps.
1662 */
1663 for (i = 0; i < VR_NRXDESC; i++) {
1664 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1665 MCLBYTES, 0, 0,
1666 &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1667 printf("%s: unable to create rx DMA map %d, "
1668 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1669 goto fail_5;
1670 }
1671 VR_DSRX(sc, i)->ds_mbuf = NULL;
1672 }
1673
1674 ifp = &sc->vr_ec.ec_if;
1675 ifp->if_softc = sc;
1676 ifp->if_mtu = ETHERMTU;
1677 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1678 ifp->if_ioctl = vr_ioctl;
1679 ifp->if_start = vr_start;
1680 ifp->if_watchdog = vr_watchdog;
1681 ifp->if_init = vr_init;
1682 ifp->if_stop = vr_stop;
1683 IFQ_SET_READY(&ifp->if_snd);
1684
1685 strcpy(ifp->if_xname, sc->vr_dev.dv_xname);
1686
1687 /*
1688 * Initialize MII/media info.
1689 */
1690 sc->vr_mii.mii_ifp = ifp;
1691 sc->vr_mii.mii_readreg = vr_mii_readreg;
1692 sc->vr_mii.mii_writereg = vr_mii_writereg;
1693 sc->vr_mii.mii_statchg = vr_mii_statchg;
1694 ifmedia_init(&sc->vr_mii.mii_media, IFM_IMASK, vr_ifmedia_upd,
1695 vr_ifmedia_sts);
1696 mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1697 MII_OFFSET_ANY, MIIF_FORCEANEG);
1698 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1699 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1700 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1701 } else
1702 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1703
1704 /*
1705 * Call MI attach routines.
1706 */
1707 if_attach(ifp);
1708 ether_ifattach(ifp, sc->vr_enaddr);
1709 #if NRND > 0
1710 rnd_attach_source(&sc->rnd_source, sc->vr_dev.dv_xname,
1711 RND_TYPE_NET, 0);
1712 #endif
1713
1714 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1715 if (sc->vr_ats == NULL)
1716 printf("%s: warning: couldn't establish shutdown hook\n",
1717 sc->vr_dev.dv_xname);
1718 return;
1719
1720 fail_5:
1721 for (i = 0; i < VR_NRXDESC; i++) {
1722 if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1723 bus_dmamap_destroy(sc->vr_dmat,
1724 sc->vr_rxsoft[i].ds_dmamap);
1725 }
1726 fail_4:
1727 for (i = 0; i < VR_NTXDESC; i++) {
1728 if (sc->vr_txsoft[i].ds_dmamap != NULL)
1729 bus_dmamap_destroy(sc->vr_dmat,
1730 sc->vr_txsoft[i].ds_dmamap);
1731 }
1732 bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1733 fail_3:
1734 bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1735 fail_2:
1736 bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1737 sizeof(struct vr_control_data));
1738 fail_1:
1739 bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1740 fail_0:
1741 return;
1742 }
1743